aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/base/firmware_loader/main.c30
-rw-r--r--drivers/block/null_blk.h17
-rw-r--r--drivers/block/null_blk_main.c45
-rw-r--r--drivers/block/null_blk_zoned.c34
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c92
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c53
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c32
-rw-r--r--drivers/char/ipmi/kcs_bmc.c7
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/fpga/dfl-fme-pr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c21
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c13
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c11
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-raid.c154
-rw-r--r--drivers/md/dm-thin-metadata.c36
-rw-r--r--drivers/md/dm-thin.c73
-rw-r--r--drivers/md/dm-verity-target.c24
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus.c12
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/hbm.c9
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c8
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1
-rw-r--r--drivers/nvme/target/rdma.c27
-rw-r--r--drivers/of/base.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c18
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c24
-rw-r--r--drivers/s390/crypto/ap_bus.c86
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/erofs/Kconfig2
-rw-r--r--drivers/staging/erofs/super.c4
-rw-r--r--drivers/staging/fbtft/TODO4
-rw-r--r--drivers/staging/gasket/TODO13
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c7
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c5
-rw-r--r--drivers/staging/wilc1000/Makefile3
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h2
-rw-r--r--drivers/tty/hvc/hvc_console.c38
-rw-r--r--drivers/usb/class/cdc-acm.c73
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/common/common.c25
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/of.c26
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c15
-rw-r--r--drivers/usb/gadget/udc/net2280.c16
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-plat.c27
-rw-r--r--drivers/usb/host/xhci.c30
-rw-r--r--drivers/usb/misc/uss720.c4
-rw-r--r--drivers/usb/misc/yurex.c8
-rw-r--r--drivers/usb/mtu3/mtu3_core.c6
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/scsiglue.c9
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/bus.c7
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c26
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/mem-reservation.c4
-rw-r--r--drivers/xen/xen-balloon.c3
99 files changed, 1003 insertions, 756 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3f3b7b253445..64fd96eada31 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -332,6 +332,35 @@ err_no_vma:
332 return vma ? -ENOMEM : -ESRCH; 332 return vma ? -ENOMEM : -ESRCH;
333} 333}
334 334
335
336static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
337 struct vm_area_struct *vma)
338{
339 if (vma)
340 alloc->vma_vm_mm = vma->vm_mm;
341 /*
342 * If we see alloc->vma is not NULL, buffer data structures set up
343 * completely. Look at smp_rmb side binder_alloc_get_vma.
344 * We also want to guarantee new alloc->vma_vm_mm is always visible
345 * if alloc->vma is set.
346 */
347 smp_wmb();
348 alloc->vma = vma;
349}
350
351static inline struct vm_area_struct *binder_alloc_get_vma(
352 struct binder_alloc *alloc)
353{
354 struct vm_area_struct *vma = NULL;
355
356 if (alloc->vma) {
357 /* Look at description in binder_alloc_set_vma */
358 smp_rmb();
359 vma = alloc->vma;
360 }
361 return vma;
362}
363
335static struct binder_buffer *binder_alloc_new_buf_locked( 364static struct binder_buffer *binder_alloc_new_buf_locked(
336 struct binder_alloc *alloc, 365 struct binder_alloc *alloc,
337 size_t data_size, 366 size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
348 size_t size, data_offsets_size; 377 size_t size, data_offsets_size;
349 int ret; 378 int ret;
350 379
351 if (alloc->vma == NULL) { 380 if (!binder_alloc_get_vma(alloc)) {
352 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 381 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
353 "%d: binder_alloc_buf, no vma\n", 382 "%d: binder_alloc_buf, no vma\n",
354 alloc->pid); 383 alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
723 buffer->free = 1; 752 buffer->free = 1;
724 binder_insert_free_buffer(alloc, buffer); 753 binder_insert_free_buffer(alloc, buffer);
725 alloc->free_async_space = alloc->buffer_size / 2; 754 alloc->free_async_space = alloc->buffer_size / 2;
726 barrier(); 755 binder_alloc_set_vma(alloc, vma);
727 alloc->vma = vma;
728 alloc->vma_vm_mm = vma->vm_mm;
729 mmgrab(alloc->vma_vm_mm); 756 mmgrab(alloc->vma_vm_mm);
730 757
731 return 0; 758 return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
754 int buffers, page_count; 781 int buffers, page_count;
755 struct binder_buffer *buffer; 782 struct binder_buffer *buffer;
756 783
757 BUG_ON(alloc->vma);
758
759 buffers = 0; 784 buffers = 0;
760 mutex_lock(&alloc->mutex); 785 mutex_lock(&alloc->mutex);
786 BUG_ON(alloc->vma);
787
761 while ((n = rb_first(&alloc->allocated_buffers))) { 788 while ((n = rb_first(&alloc->allocated_buffers))) {
762 buffer = rb_entry(n, struct binder_buffer, rb_node); 789 buffer = rb_entry(n, struct binder_buffer, rb_node);
763 790
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
900 */ 927 */
901void binder_alloc_vma_close(struct binder_alloc *alloc) 928void binder_alloc_vma_close(struct binder_alloc *alloc)
902{ 929{
903 WRITE_ONCE(alloc->vma, NULL); 930 binder_alloc_set_vma(alloc, NULL);
904} 931}
905 932
906/** 933/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
935 962
936 index = page - alloc->pages; 963 index = page - alloc->pages;
937 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 964 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
938 vma = alloc->vma; 965 vma = binder_alloc_get_vma(alloc);
939 if (vma) { 966 if (vma) {
940 if (!mmget_not_zero(alloc->vma_vm_mm)) 967 if (!mmget_not_zero(alloc->vma_vm_mm))
941 goto err_mmget; 968 goto err_mmget;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 0943e7065e0e..b3c0498ee433 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
209static int alloc_lookup_fw_priv(const char *fw_name, 209static int alloc_lookup_fw_priv(const char *fw_name,
210 struct firmware_cache *fwc, 210 struct firmware_cache *fwc,
211 struct fw_priv **fw_priv, void *dbuf, 211 struct fw_priv **fw_priv, void *dbuf,
212 size_t size) 212 size_t size, enum fw_opt opt_flags)
213{ 213{
214 struct fw_priv *tmp; 214 struct fw_priv *tmp;
215 215
216 spin_lock(&fwc->lock); 216 spin_lock(&fwc->lock);
217 tmp = __lookup_fw_priv(fw_name); 217 if (!(opt_flags & FW_OPT_NOCACHE)) {
218 if (tmp) { 218 tmp = __lookup_fw_priv(fw_name);
219 kref_get(&tmp->ref); 219 if (tmp) {
220 spin_unlock(&fwc->lock); 220 kref_get(&tmp->ref);
221 *fw_priv = tmp; 221 spin_unlock(&fwc->lock);
222 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); 222 *fw_priv = tmp;
223 return 1; 223 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
224 return 1;
225 }
224 } 226 }
227
225 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); 228 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
226 if (tmp) 229 if (tmp && !(opt_flags & FW_OPT_NOCACHE))
227 list_add(&tmp->list, &fwc->head); 230 list_add(&tmp->list, &fwc->head);
228 spin_unlock(&fwc->lock); 231 spin_unlock(&fwc->lock);
229 232
@@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device,
493 */ 496 */
494static int 497static int
495_request_firmware_prepare(struct firmware **firmware_p, const char *name, 498_request_firmware_prepare(struct firmware **firmware_p, const char *name,
496 struct device *device, void *dbuf, size_t size) 499 struct device *device, void *dbuf, size_t size,
500 enum fw_opt opt_flags)
497{ 501{
498 struct firmware *firmware; 502 struct firmware *firmware;
499 struct fw_priv *fw_priv; 503 struct fw_priv *fw_priv;
@@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
511 return 0; /* assigned */ 515 return 0; /* assigned */
512 } 516 }
513 517
514 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size); 518 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
519 opt_flags);
515 520
516 /* 521 /*
517 * bind with 'priv' now to avoid warning in failure path 522 * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
571 goto out; 576 goto out;
572 } 577 }
573 578
574 ret = _request_firmware_prepare(&fw, name, device, buf, size); 579 ret = _request_firmware_prepare(&fw, name, device, buf, size,
580 opt_flags);
575 if (ret <= 0) /* error or already assigned */ 581 if (ret <= 0) /* error or already assigned */
576 goto out; 582 goto out;
577 583
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index d81781f22dba..34e0030f0592 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,10 +87,10 @@ struct nullb {
87#ifdef CONFIG_BLK_DEV_ZONED 87#ifdef CONFIG_BLK_DEV_ZONED
88int null_zone_init(struct nullb_device *dev); 88int null_zone_init(struct nullb_device *dev);
89void null_zone_exit(struct nullb_device *dev); 89void null_zone_exit(struct nullb_device *dev);
90blk_status_t null_zone_report(struct nullb *nullb, 90blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
91 struct nullb_cmd *cmd); 91void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
92void null_zone_write(struct nullb_cmd *cmd); 92 unsigned int nr_sectors);
93void null_zone_reset(struct nullb_cmd *cmd); 93void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
94#else 94#else
95static inline int null_zone_init(struct nullb_device *dev) 95static inline int null_zone_init(struct nullb_device *dev)
96{ 96{
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
98} 98}
99static inline void null_zone_exit(struct nullb_device *dev) {} 99static inline void null_zone_exit(struct nullb_device *dev) {}
100static inline blk_status_t null_zone_report(struct nullb *nullb, 100static inline blk_status_t null_zone_report(struct nullb *nullb,
101 struct nullb_cmd *cmd) 101 struct bio *bio)
102{ 102{
103 return BLK_STS_NOTSUPP; 103 return BLK_STS_NOTSUPP;
104} 104}
105static inline void null_zone_write(struct nullb_cmd *cmd) {} 105static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
106static inline void null_zone_reset(struct nullb_cmd *cmd) {} 106 unsigned int nr_sectors)
107{
108}
109static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
107#endif /* CONFIG_BLK_DEV_ZONED */ 110#endif /* CONFIG_BLK_DEV_ZONED */
108#endif /* __NULL_BLK_H */ 111#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 6127e3ff7b4b..093b614d6524 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
1157 } 1157 }
1158} 1158}
1159 1159
1160static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
1161{
1162 struct nullb_device *dev = cmd->nq->dev;
1163
1164 if (dev->queue_mode == NULL_Q_BIO) {
1165 if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
1166 cmd->error = null_zone_report(nullb, cmd->bio);
1167 return true;
1168 }
1169 } else {
1170 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
1171 cmd->error = null_zone_report(nullb, cmd->rq->bio);
1172 return true;
1173 }
1174 }
1175
1176 return false;
1177}
1178
1160static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) 1179static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1161{ 1180{
1162 struct nullb_device *dev = cmd->nq->dev; 1181 struct nullb_device *dev = cmd->nq->dev;
1163 struct nullb *nullb = dev->nullb; 1182 struct nullb *nullb = dev->nullb;
1164 int err = 0; 1183 int err = 0;
1165 1184
1166 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { 1185 if (cmd_report_zone(nullb, cmd))
1167 cmd->error = null_zone_report(nullb, cmd);
1168 goto out; 1186 goto out;
1169 }
1170 1187
1171 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1188 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1172 struct request *rq = cmd->rq; 1189 struct request *rq = cmd->rq;
@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1234 cmd->error = errno_to_blk_status(err); 1251 cmd->error = errno_to_blk_status(err);
1235 1252
1236 if (!cmd->error && dev->zoned) { 1253 if (!cmd->error && dev->zoned) {
1237 if (req_op(cmd->rq) == REQ_OP_WRITE) 1254 sector_t sector;
1238 null_zone_write(cmd); 1255 unsigned int nr_sectors;
1239 else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) 1256 int op;
1240 null_zone_reset(cmd); 1257
1258 if (dev->queue_mode == NULL_Q_BIO) {
1259 op = bio_op(cmd->bio);
1260 sector = cmd->bio->bi_iter.bi_sector;
1261 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1262 } else {
1263 op = req_op(cmd->rq);
1264 sector = blk_rq_pos(cmd->rq);
1265 nr_sectors = blk_rq_sectors(cmd->rq);
1266 }
1267
1268 if (op == REQ_OP_WRITE)
1269 null_zone_write(cmd, sector, nr_sectors);
1270 else if (op == REQ_OP_ZONE_RESET)
1271 null_zone_reset(cmd, sector);
1241 } 1272 }
1242out: 1273out:
1243 /* Complete IO by inline, softirq or timer */ 1274 /* Complete IO by inline, softirq or timer */
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index a979ca00d7be..7c6b86d98700 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
48 kvfree(dev->zones); 48 kvfree(dev->zones);
49} 49}
50 50
51static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, 51static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
52 unsigned int zno, unsigned int nr_zones) 52 unsigned int zno, unsigned int nr_zones)
53{ 53{
54 struct blk_zone_report_hdr *hdr = NULL; 54 struct blk_zone_report_hdr *hdr = NULL;
55 struct bio_vec bvec; 55 struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
57 void *addr; 57 void *addr;
58 unsigned int zones_to_cpy; 58 unsigned int zones_to_cpy;
59 59
60 bio_for_each_segment(bvec, rq->bio, iter) { 60 bio_for_each_segment(bvec, bio, iter) {
61 addr = kmap_atomic(bvec.bv_page); 61 addr = kmap_atomic(bvec.bv_page);
62 62
63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); 63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
84 } 84 }
85} 85}
86 86
87blk_status_t null_zone_report(struct nullb *nullb, 87blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
88 struct nullb_cmd *cmd)
89{ 88{
90 struct nullb_device *dev = nullb->dev; 89 struct nullb_device *dev = nullb->dev;
91 struct request *rq = cmd->rq; 90 unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
92 unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
93 unsigned int nr_zones = dev->nr_zones - zno; 91 unsigned int nr_zones = dev->nr_zones - zno;
94 unsigned int max_zones = (blk_rq_bytes(rq) / 92 unsigned int max_zones;
95 sizeof(struct blk_zone)) - 1;
96 93
94 max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
97 nr_zones = min_t(unsigned int, nr_zones, max_zones); 95 nr_zones = min_t(unsigned int, nr_zones, max_zones);
98 96 null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
99 null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
100 97
101 return BLK_STS_OK; 98 return BLK_STS_OK;
102} 99}
103 100
104void null_zone_write(struct nullb_cmd *cmd) 101void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
102 unsigned int nr_sectors)
105{ 103{
106 struct nullb_device *dev = cmd->nq->dev; 104 struct nullb_device *dev = cmd->nq->dev;
107 struct request *rq = cmd->rq;
108 sector_t sector = blk_rq_pos(rq);
109 unsigned int rq_sectors = blk_rq_sectors(rq);
110 unsigned int zno = null_zone_no(dev, sector); 105 unsigned int zno = null_zone_no(dev, sector);
111 struct blk_zone *zone = &dev->zones[zno]; 106 struct blk_zone *zone = &dev->zones[zno];
112 107
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
118 case BLK_ZONE_COND_EMPTY: 113 case BLK_ZONE_COND_EMPTY:
119 case BLK_ZONE_COND_IMP_OPEN: 114 case BLK_ZONE_COND_IMP_OPEN:
120 /* Writes must be at the write pointer position */ 115 /* Writes must be at the write pointer position */
121 if (blk_rq_pos(rq) != zone->wp) { 116 if (sector != zone->wp) {
122 cmd->error = BLK_STS_IOERR; 117 cmd->error = BLK_STS_IOERR;
123 break; 118 break;
124 } 119 }
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
126 if (zone->cond == BLK_ZONE_COND_EMPTY) 121 if (zone->cond == BLK_ZONE_COND_EMPTY)
127 zone->cond = BLK_ZONE_COND_IMP_OPEN; 122 zone->cond = BLK_ZONE_COND_IMP_OPEN;
128 123
129 zone->wp += rq_sectors; 124 zone->wp += nr_sectors;
130 if (zone->wp == zone->start + zone->len) 125 if (zone->wp == zone->start + zone->len)
131 zone->cond = BLK_ZONE_COND_FULL; 126 zone->cond = BLK_ZONE_COND_FULL;
132 break; 127 break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
137 } 132 }
138} 133}
139 134
140void null_zone_reset(struct nullb_cmd *cmd) 135void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
141{ 136{
142 struct nullb_device *dev = cmd->nq->dev; 137 struct nullb_device *dev = cmd->nq->dev;
143 struct request *rq = cmd->rq; 138 unsigned int zno = null_zone_no(dev, sector);
144 unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
145 struct blk_zone *zone = &dev->zones[zno]; 139 struct blk_zone *zone = &dev->zones[zno];
146 140
147 zone->cond = BLK_ZONE_COND_EMPTY; 141 zone->cond = BLK_ZONE_COND_EMPTY;
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a3397664f800..97d6856c9c0f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -59,8 +59,6 @@ enum bt_states {
59 BT_STATE_RESET3, 59 BT_STATE_RESET3,
60 BT_STATE_RESTART, 60 BT_STATE_RESTART,
61 BT_STATE_PRINTME, 61 BT_STATE_PRINTME,
62 BT_STATE_CAPABILITIES_BEGIN,
63 BT_STATE_CAPABILITIES_END,
64 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ 62 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
65}; 63};
66 64
@@ -86,7 +84,6 @@ struct si_sm_data {
86 int error_retries; /* end of "common" fields */ 84 int error_retries; /* end of "common" fields */
87 int nonzero_status; /* hung BMCs stay all 0 */ 85 int nonzero_status; /* hung BMCs stay all 0 */
88 enum bt_states complete; /* to divert the state machine */ 86 enum bt_states complete; /* to divert the state machine */
89 int BT_CAP_outreqs;
90 long BT_CAP_req2rsp; 87 long BT_CAP_req2rsp;
91 int BT_CAP_retries; /* Recommended retries */ 88 int BT_CAP_retries; /* Recommended retries */
92}; 89};
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
137 case BT_STATE_RESET3: return("RESET3"); 134 case BT_STATE_RESET3: return("RESET3");
138 case BT_STATE_RESTART: return("RESTART"); 135 case BT_STATE_RESTART: return("RESTART");
139 case BT_STATE_LONG_BUSY: return("LONG_BUSY"); 136 case BT_STATE_LONG_BUSY: return("LONG_BUSY");
140 case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
141 case BT_STATE_CAPABILITIES_END: return("CAP_END");
142 } 137 }
143 return("BAD STATE"); 138 return("BAD STATE");
144} 139}
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
185 bt->complete = BT_STATE_IDLE; /* end here */ 180 bt->complete = BT_STATE_IDLE; /* end here */
186 bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; 181 bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
187 bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; 182 bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
188 /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
189 return 3; /* We claim 3 bytes of space; ought to check SPMI table */ 183 return 3; /* We claim 3 bytes of space; ought to check SPMI table */
190} 184}
191 185
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
451 445
452static enum si_sm_result bt_event(struct si_sm_data *bt, long time) 446static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
453{ 447{
454 unsigned char status, BT_CAP[8]; 448 unsigned char status;
455 static enum bt_states last_printed = BT_STATE_PRINTME; 449 static enum bt_states last_printed = BT_STATE_PRINTME;
456 int i; 450 int i;
457 451
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
504 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ 498 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
505 BT_CONTROL(BT_H_BUSY); 499 BT_CONTROL(BT_H_BUSY);
506 500
507 bt->timeout = bt->BT_CAP_req2rsp;
508
509 /* Read BT capabilities if it hasn't been done yet */
510 if (!bt->BT_CAP_outreqs)
511 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
512 SI_SM_CALL_WITHOUT_DELAY);
513 BT_SI_SM_RETURN(SI_SM_IDLE); 501 BT_SI_SM_RETURN(SI_SM_IDLE);
514 502
515 case BT_STATE_XACTION_START: 503 case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
614 BT_STATE_CHANGE(BT_STATE_XACTION_START, 602 BT_STATE_CHANGE(BT_STATE_XACTION_START,
615 SI_SM_CALL_WITH_DELAY); 603 SI_SM_CALL_WITH_DELAY);
616 604
617 /*
618 * Get BT Capabilities, using timing of upper level state machine.
619 * Set outreqs to prevent infinite loop on timeout.
620 */
621 case BT_STATE_CAPABILITIES_BEGIN:
622 bt->BT_CAP_outreqs = 1;
623 {
624 unsigned char GetBT_CAP[] = { 0x18, 0x36 };
625 bt->state = BT_STATE_IDLE;
626 bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
627 }
628 bt->complete = BT_STATE_CAPABILITIES_END;
629 BT_STATE_CHANGE(BT_STATE_XACTION_START,
630 SI_SM_CALL_WITH_DELAY);
631
632 case BT_STATE_CAPABILITIES_END:
633 i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
634 bt_init_data(bt, bt->io);
635 if ((i == 8) && !BT_CAP[2]) {
636 bt->BT_CAP_outreqs = BT_CAP[3];
637 bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
638 bt->BT_CAP_retries = BT_CAP[7];
639 } else
640 printk(KERN_WARNING "IPMI BT: using default values\n");
641 if (!bt->BT_CAP_outreqs)
642 bt->BT_CAP_outreqs = 1;
643 printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
644 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
645 bt->timeout = bt->BT_CAP_req2rsp;
646 return SI_SM_CALL_WITHOUT_DELAY;
647
648 default: /* should never occur */ 605 default: /* should never occur */
649 return error_recovery(bt, 606 return error_recovery(bt,
650 status, 607 status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
655 612
656static int bt_detect(struct si_sm_data *bt) 613static int bt_detect(struct si_sm_data *bt)
657{ 614{
615 unsigned char GetBT_CAP[] = { 0x18, 0x36 };
616 unsigned char BT_CAP[8];
617 enum si_sm_result smi_result;
618 int rv;
619
658 /* 620 /*
659 * It's impossible for the BT status and interrupt registers to be 621 * It's impossible for the BT status and interrupt registers to be
660 * all 1's, (assuming a properly functioning, self-initialized BMC) 622 * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
665 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) 627 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
666 return 1; 628 return 1;
667 reset_flags(bt); 629 reset_flags(bt);
630
631 /*
632 * Try getting the BT capabilities here.
633 */
634 rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
635 if (rv) {
636 dev_warn(bt->io->dev,
637 "Can't start capabilities transaction: %d\n", rv);
638 goto out_no_bt_cap;
639 }
640
641 smi_result = SI_SM_CALL_WITHOUT_DELAY;
642 for (;;) {
643 if (smi_result == SI_SM_CALL_WITH_DELAY ||
644 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
645 schedule_timeout_uninterruptible(1);
646 smi_result = bt_event(bt, jiffies_to_usecs(1));
647 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
648 smi_result = bt_event(bt, 0);
649 } else
650 break;
651 }
652
653 rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
654 bt_init_data(bt, bt->io);
655 if (rv < 8) {
656 dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
657 goto out_no_bt_cap;
658 }
659
660 if (BT_CAP[2]) {
661 dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
662out_no_bt_cap:
663 dev_warn(bt->io->dev, "using default values\n");
664 } else {
665 bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
666 bt->BT_CAP_retries = BT_CAP[7];
667 }
668
669 dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
670 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
671
668 return 0; 672 return 0;
669} 673}
670 674
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 51832b8a2c62..7fc9612070a1 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3381 3381
3382 rv = handlers->start_processing(send_info, intf); 3382 rv = handlers->start_processing(send_info, intf);
3383 if (rv) 3383 if (rv)
3384 goto out; 3384 goto out_err;
3385 3385
3386 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3386 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3387 if (rv) { 3387 if (rv) {
3388 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3388 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3389 goto out; 3389 goto out_err_started;
3390 } 3390 }
3391 3391
3392 mutex_lock(&intf->bmc_reg_mutex); 3392 mutex_lock(&intf->bmc_reg_mutex);
3393 rv = __scan_channels(intf, &id); 3393 rv = __scan_channels(intf, &id);
3394 mutex_unlock(&intf->bmc_reg_mutex); 3394 mutex_unlock(&intf->bmc_reg_mutex);
3395 if (rv)
3396 goto out_err_bmc_reg;
3395 3397
3396 out: 3398 /*
3397 if (rv) { 3399 * Keep memory order straight for RCU readers. Make
3398 ipmi_bmc_unregister(intf); 3400 * sure everything else is committed to memory before
3399 list_del_rcu(&intf->link); 3401 * setting intf_num to mark the interface valid.
3400 mutex_unlock(&ipmi_interfaces_mutex); 3402 */
3401 synchronize_srcu(&ipmi_interfaces_srcu); 3403 smp_wmb();
3402 cleanup_srcu_struct(&intf->users_srcu); 3404 intf->intf_num = i;
3403 kref_put(&intf->refcount, intf_free); 3405 mutex_unlock(&ipmi_interfaces_mutex);
3404 } else {
3405 /*
3406 * Keep memory order straight for RCU readers. Make
3407 * sure everything else is committed to memory before
3408 * setting intf_num to mark the interface valid.
3409 */
3410 smp_wmb();
3411 intf->intf_num = i;
3412 mutex_unlock(&ipmi_interfaces_mutex);
3413 3406
3414 /* After this point the interface is legal to use. */ 3407 /* After this point the interface is legal to use. */
3415 call_smi_watchers(i, intf->si_dev); 3408 call_smi_watchers(i, intf->si_dev);
3416 } 3409
3410 return 0;
3411
3412 out_err_bmc_reg:
3413 ipmi_bmc_unregister(intf);
3414 out_err_started:
3415 if (intf->handlers->shutdown)
3416 intf->handlers->shutdown(intf->send_info);
3417 out_err:
3418 list_del_rcu(&intf->link);
3419 mutex_unlock(&ipmi_interfaces_mutex);
3420 synchronize_srcu(&ipmi_interfaces_srcu);
3421 cleanup_srcu_struct(&intf->users_srcu);
3422 kref_put(&intf->refcount, intf_free);
3417 3423
3418 return rv; 3424 return rv;
3419} 3425}
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
3504 } 3510 }
3505 srcu_read_unlock(&intf->users_srcu, index); 3511 srcu_read_unlock(&intf->users_srcu, index);
3506 3512
3507 intf->handlers->shutdown(intf->send_info); 3513 if (intf->handlers->shutdown)
3514 intf->handlers->shutdown(intf->send_info);
3508 3515
3509 cleanup_smi_msgs(intf); 3516 cleanup_smi_msgs(intf);
3510 3517
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 90ec010bffbd..5faa917df1b6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
2083 si_to_str[new_smi->io.si_type]); 2083 si_to_str[new_smi->io.si_type]);
2084 2084
2085 WARN_ON(new_smi->io.dev->init_name != NULL); 2085 WARN_ON(new_smi->io.dev->init_name != NULL);
2086 kfree(init_name);
2087
2088 return 0;
2089
2090out_err:
2091 if (new_smi->intf) {
2092 ipmi_unregister_smi(new_smi->intf);
2093 new_smi->intf = NULL;
2094 }
2095 2086
2087 out_err:
2096 kfree(init_name); 2088 kfree(init_name);
2097
2098 return rv; 2089 return rv;
2099} 2090}
2100 2091
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
2227 2218
2228 kfree(smi_info->si_sm); 2219 kfree(smi_info->si_sm);
2229 smi_info->si_sm = NULL; 2220 smi_info->si_sm = NULL;
2221
2222 smi_info->intf = NULL;
2230} 2223}
2231 2224
2232/* 2225/*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
2240 2233
2241 list_del(&smi_info->link); 2234 list_del(&smi_info->link);
2242 2235
2243 if (smi_info->intf) { 2236 if (smi_info->intf)
2244 ipmi_unregister_smi(smi_info->intf); 2237 ipmi_unregister_smi(smi_info->intf);
2245 smi_info->intf = NULL;
2246 }
2247 2238
2248 if (smi_info->pdev) { 2239 if (smi_info->pdev) {
2249 if (smi_info->pdev_registered) 2240 if (smi_info->pdev_registered)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 18e4650c233b..29e67a80fb20 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -181,6 +181,8 @@ struct ssif_addr_info {
181 struct device *dev; 181 struct device *dev;
182 struct i2c_client *client; 182 struct i2c_client *client;
183 183
184 struct i2c_client *added_client;
185
184 struct mutex clients_mutex; 186 struct mutex clients_mutex;
185 struct list_head clients; 187 struct list_head clients;
186 188
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
1214 complete(&ssif_info->wake_thread); 1216 complete(&ssif_info->wake_thread);
1215 kthread_stop(ssif_info->thread); 1217 kthread_stop(ssif_info->thread);
1216 } 1218 }
1217
1218 /*
1219 * No message can be outstanding now, we have removed the
1220 * upper layer and it permitted us to do so.
1221 */
1222 kfree(ssif_info);
1223} 1219}
1224 1220
1225static int ssif_remove(struct i2c_client *client) 1221static int ssif_remove(struct i2c_client *client)
1226{ 1222{
1227 struct ssif_info *ssif_info = i2c_get_clientdata(client); 1223 struct ssif_info *ssif_info = i2c_get_clientdata(client);
1228 struct ipmi_smi *intf;
1229 struct ssif_addr_info *addr_info; 1224 struct ssif_addr_info *addr_info;
1230 1225
1231 if (!ssif_info) 1226 if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
1235 * After this point, we won't deliver anything asychronously 1230 * After this point, we won't deliver anything asychronously
1236 * to the message handler. We can unregister ourself. 1231 * to the message handler. We can unregister ourself.
1237 */ 1232 */
1238 intf = ssif_info->intf; 1233 ipmi_unregister_smi(ssif_info->intf);
1239 ssif_info->intf = NULL;
1240 ipmi_unregister_smi(intf);
1241 1234
1242 list_for_each_entry(addr_info, &ssif_infos, link) { 1235 list_for_each_entry(addr_info, &ssif_infos, link) {
1243 if (addr_info->client == client) { 1236 if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
1246 } 1239 }
1247 } 1240 }
1248 1241
1242 kfree(ssif_info);
1243
1249 return 0; 1244 return 0;
1250} 1245}
1251 1246
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1648 1643
1649 out: 1644 out:
1650 if (rv) { 1645 if (rv) {
1651 /* 1646 if (addr_info)
1652 * Note that if addr_info->client is assigned, we 1647 addr_info->client = NULL;
1653 * leave it. The i2c client hangs around even if we 1648
1654 * return a failure here, and the failure here is not
1655 * propagated back to the i2c code. This seems to be
1656 * design intent, strange as it may be. But if we
1657 * don't leave it, ssif_platform_remove will not remove
1658 * the client like it should.
1659 */
1660 dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); 1649 dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
1661 kfree(ssif_info); 1650 kfree(ssif_info);
1662 } 1651 }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
1676 if (adev->type != &i2c_adapter_type) 1665 if (adev->type != &i2c_adapter_type)
1677 return 0; 1666 return 0;
1678 1667
1679 i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); 1668 addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
1669 &addr_info->binfo);
1680 1670
1681 if (!addr_info->adapter_name) 1671 if (!addr_info->adapter_name)
1682 return 1; /* Only try the first I2C adapter by default. */ 1672 return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
1849 return 0; 1839 return 0;
1850 1840
1851 mutex_lock(&ssif_infos_mutex); 1841 mutex_lock(&ssif_infos_mutex);
1852 i2c_unregister_device(addr_info->client); 1842 i2c_unregister_device(addr_info->added_client);
1853 1843
1854 list_del(&addr_info->link); 1844 list_del(&addr_info->link);
1855 kfree(addr_info); 1845 kfree(addr_info);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index bb882ab161fe..e6124bd548df 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -16,6 +16,8 @@
16 16
17#include "kcs_bmc.h" 17#include "kcs_bmc.h"
18 18
19#define DEVICE_NAME "ipmi-kcs"
20
19#define KCS_MSG_BUFSIZ 1000 21#define KCS_MSG_BUFSIZ 1000
20 22
21#define KCS_ZERO_DATA 0 23#define KCS_ZERO_DATA 0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
429 if (!kcs_bmc) 431 if (!kcs_bmc)
430 return NULL; 432 return NULL;
431 433
432 dev_set_name(dev, "ipmi-kcs%u", channel);
433
434 spin_lock_init(&kcs_bmc->lock); 434 spin_lock_init(&kcs_bmc->lock);
435 kcs_bmc->channel = channel; 435 kcs_bmc->channel = channel;
436 436
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
444 return NULL; 444 return NULL;
445 445
446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; 446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
447 kcs_bmc->miscdev.name = dev_name(dev); 447 kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
448 DEVICE_NAME, channel);
448 kcs_bmc->miscdev.fops = &kcs_bmc_fops; 449 kcs_bmc->miscdev.fops = &kcs_bmc_fops;
449 450
450 return kcs_bmc; 451 return kcs_bmc;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index b76cb17d879c..adfd316db1a8 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
639 int ret; 639 int ret;
640 struct device *dev = &mbdev->dev; 640 struct device *dev = &mbdev->dev;
641 641
642 mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); 642 mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
643 if (!mic_dma_dev) { 643 if (!mic_dma_dev) {
644 ret = -ENOMEM; 644 ret = -ENOMEM;
645 goto alloc_error; 645 goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
664reg_error: 664reg_error:
665 mic_dma_uninit(mic_dma_dev); 665 mic_dma_uninit(mic_dma_dev);
666init_error: 666init_error:
667 kfree(mic_dma_dev);
668 mic_dma_dev = NULL; 667 mic_dma_dev = NULL;
669alloc_error: 668alloc_error:
670 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); 669 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
674static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) 673static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
675{ 674{
676 mic_dma_uninit(mic_dma_dev); 675 mic_dma_uninit(mic_dma_dev);
677 kfree(mic_dma_dev);
678} 676}
679 677
680/* DEBUGFS CODE */ 678/* DEBUGFS CODE */
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index fc9fd2d0482f..0b840531ef33 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
420 /* Create region for each port */ 420 /* Create region for each port */
421 fme_region = dfl_fme_create_region(pdata, mgr, 421 fme_region = dfl_fme_create_region(pdata, mgr,
422 fme_br->br, i); 422 fme_br->br, i);
423 if (!fme_region) { 423 if (IS_ERR(fme_region)) {
424 ret = PTR_ERR(fme_region); 424 ret = PTR_ERR(fme_region);
425 goto destroy_region; 425 goto destroy_region;
426 } 426 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b6e9df11115d..b31d121a876b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
39{ 39{
40 struct drm_gem_object *gobj; 40 struct drm_gem_object *gobj;
41 unsigned long size; 41 unsigned long size;
42 int r;
42 43
43 gobj = drm_gem_object_lookup(p->filp, data->handle); 44 gobj = drm_gem_object_lookup(p->filp, data->handle);
44 if (gobj == NULL) 45 if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
50 p->uf_entry.tv.shared = true; 51 p->uf_entry.tv.shared = true;
51 p->uf_entry.user_pages = NULL; 52 p->uf_entry.user_pages = NULL;
52 53
53 size = amdgpu_bo_size(p->uf_entry.robj);
54 if (size != PAGE_SIZE || (data->offset + 8) > size)
55 return -EINVAL;
56
57 *offset = data->offset;
58
59 drm_gem_object_put_unlocked(gobj); 54 drm_gem_object_put_unlocked(gobj);
60 55
56 size = amdgpu_bo_size(p->uf_entry.robj);
57 if (size != PAGE_SIZE || (data->offset + 8) > size) {
58 r = -EINVAL;
59 goto error_unref;
60 }
61
61 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { 62 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
62 amdgpu_bo_unref(&p->uf_entry.robj); 63 r = -EINVAL;
63 return -EINVAL; 64 goto error_unref;
64 } 65 }
65 66
67 *offset = data->offset;
68
66 return 0; 69 return 0;
70
71error_unref:
72 amdgpu_bo_unref(&p->uf_entry.robj);
73 return r;
67} 74}
68 75
69static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, 76static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1262,10 +1269,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1262error_abort: 1269error_abort:
1263 dma_fence_put(&job->base.s_fence->finished); 1270 dma_fence_put(&job->base.s_fence->finished);
1264 job->base.s_fence = NULL; 1271 job->base.s_fence = NULL;
1272 amdgpu_mn_unlock(p->mn);
1265 1273
1266error_unlock: 1274error_unlock:
1267 amdgpu_job_free(job); 1275 amdgpu_job_free(job);
1268 amdgpu_mn_unlock(p->mn);
1269 return r; 1276 return r;
1270} 1277}
1271 1278
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ab5ccbc14ac..39bf2ce548c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2063 static enum amd_ip_block_type ip_order[] = { 2063 static enum amd_ip_block_type ip_order[] = {
2064 AMD_IP_BLOCK_TYPE_GMC, 2064 AMD_IP_BLOCK_TYPE_GMC,
2065 AMD_IP_BLOCK_TYPE_COMMON, 2065 AMD_IP_BLOCK_TYPE_COMMON,
2066 AMD_IP_BLOCK_TYPE_PSP,
2066 AMD_IP_BLOCK_TYPE_IH, 2067 AMD_IP_BLOCK_TYPE_IH,
2067 }; 2068 };
2068 2069
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2093 2094
2094 static enum amd_ip_block_type ip_order[] = { 2095 static enum amd_ip_block_type ip_order[] = {
2095 AMD_IP_BLOCK_TYPE_SMC, 2096 AMD_IP_BLOCK_TYPE_SMC,
2096 AMD_IP_BLOCK_TYPE_PSP,
2097 AMD_IP_BLOCK_TYPE_DCE, 2097 AMD_IP_BLOCK_TYPE_DCE,
2098 AMD_IP_BLOCK_TYPE_GFX, 2098 AMD_IP_BLOCK_TYPE_GFX,
2099 AMD_IP_BLOCK_TYPE_SDMA, 2099 AMD_IP_BLOCK_TYPE_SDMA,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e7ca4623cfb9..7c3b634d8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
70 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), 70 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
71 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 71 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
72 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 72 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
73 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
73 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), 74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 76 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) 85 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
86 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
85}; 87};
86 88
87static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 89static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
109 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 111 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
110 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), 112 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
111 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 113 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
112 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) 114 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
115 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
113}; 116};
114 117
115static const struct soc15_reg_golden golden_settings_sdma_4_2[] = 118static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a45f46d8537f..c7afee37b2b8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -32,6 +32,7 @@
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/mmu_context.h> 34#include <linux/mmu_context.h>
35#include <linux/sched/mm.h>
35#include <linux/types.h> 36#include <linux/types.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/rbtree.h> 38#include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1792 info = (struct kvmgt_guest_info *)handle; 1793 info = (struct kvmgt_guest_info *)handle;
1793 kvm = info->kvm; 1794 kvm = info->kvm;
1794 1795
1795 if (kthread) 1796 if (kthread) {
1797 if (!mmget_not_zero(kvm->mm))
1798 return -EFAULT;
1796 use_mm(kvm->mm); 1799 use_mm(kvm->mm);
1800 }
1797 1801
1798 idx = srcu_read_lock(&kvm->srcu); 1802 idx = srcu_read_lock(&kvm->srcu);
1799 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1803 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1800 kvm_read_guest(kvm, gpa, buf, len); 1804 kvm_read_guest(kvm, gpa, buf, len);
1801 srcu_read_unlock(&kvm->srcu, idx); 1805 srcu_read_unlock(&kvm->srcu, idx);
1802 1806
1803 if (kthread) 1807 if (kthread) {
1804 unuse_mm(kvm->mm); 1808 unuse_mm(kvm->mm);
1809 mmput(kvm->mm);
1810 }
1805 1811
1806 return ret; 1812 return ret;
1807} 1813}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index fa75a2eead90..b0d3a43ccd03 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -42,8 +42,6 @@
42#define DEVICE_TYPE_EFP3 0x20 42#define DEVICE_TYPE_EFP3 0x20
43#define DEVICE_TYPE_EFP4 0x10 43#define DEVICE_TYPE_EFP4 0x10
44 44
45#define DEV_SIZE 38
46
47struct opregion_header { 45struct opregion_header {
48 u8 signature[16]; 46 u8 signature[16];
49 u32 size; 47 u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
63 u16 size; /* data size */ 61 u16 size; /* data size */
64} __packed; 62} __packed;
65 63
64/* For supporting windows guest with opregion, here hardcode the emulated
65 * bdb header version as '186', and the corresponding child_device_config
66 * length should be '33' but not '38'.
67 */
66struct efp_child_device_config { 68struct efp_child_device_config {
67 u16 handle; 69 u16 handle;
68 u16 device_type; 70 u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
109 u8 mipi_bridge_type; /* 171 */ 111 u8 mipi_bridge_type; /* 171 */
110 u16 device_class_ext; 112 u16 device_class_ext;
111 u8 dvo_function; 113 u8 dvo_function;
112 u8 dp_usb_type_c:1; /* 195 */
113 u8 skip6:7;
114 u8 dp_usb_type_c_2x_gpio_index; /* 195 */
115 u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
116 u8 iboost_dp:4; /* 196 */
117 u8 iboost_hdmi:4; /* 196 */
118} __packed; 114} __packed;
119 115
120struct vbt { 116struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
155 v->header.bdb_offset = offsetof(struct vbt, bdb_header); 151 v->header.bdb_offset = offsetof(struct vbt, bdb_header);
156 152
157 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); 153 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
158 v->bdb_header.version = 186; /* child_dev_size = 38 */ 154 v->bdb_header.version = 186; /* child_dev_size = 33 */
159 v->bdb_header.header_size = sizeof(v->bdb_header); 155 v->bdb_header.header_size = sizeof(v->bdb_header);
160 156
161 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) 157 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
169 165
170 /* child device */ 166 /* child device */
171 num_child = 4; /* each port has one child */ 167 num_child = 4; /* each port has one child */
168 v->general_definitions.child_dev_size =
169 sizeof(struct efp_child_device_config);
172 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; 170 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
173 /* size will include child devices */ 171 /* size will include child devices */
174 v->general_definitions_header.size = 172 v->general_definitions_header.size =
175 sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; 173 sizeof(struct bdb_general_definitions) +
176 v->general_definitions.child_dev_size = DEV_SIZE; 174 num_child * v->general_definitions.child_dev_size;
177 175
178 /* portA */ 176 /* portA */
179 v->child0.handle = DEVICE_TYPE_EFP1; 177 v->child0.handle = DEVICE_TYPE_EFP1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4a3c8ee9a973..d2951096bca0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5079 mutex_lock(&dev_priv->pcu_lock); 5079 mutex_lock(&dev_priv->pcu_lock);
5080 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5080 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5081 mutex_unlock(&dev_priv->pcu_lock); 5081 mutex_unlock(&dev_priv->pcu_lock);
5082 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 5082 /*
5083 * Wait for PCODE to finish disabling IPS. The BSpec specified
5084 * 42ms timeout value leads to occasional timeouts so use 100ms
5085 * instead.
5086 */
5083 if (intel_wait_for_register(dev_priv, 5087 if (intel_wait_for_register(dev_priv,
5084 IPS_CTL, IPS_ENABLE, 0, 5088 IPS_CTL, IPS_ENABLE, 0,
5085 42)) 5089 100))
5086 DRM_ERROR("Timed out waiting for IPS disable\n"); 5090 DRM_ERROR("Timed out waiting for IPS disable\n");
5087 } else { 5091 } else {
5088 I915_WRITE(IPS_CTL, 0); 5092 I915_WRITE(IPS_CTL, 0);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c2f10d899329..443dfaefd7a6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -181,8 +181,9 @@ struct intel_overlay {
181 u32 brightness, contrast, saturation; 181 u32 brightness, contrast, saturation;
182 u32 old_xscale, old_yscale; 182 u32 old_xscale, old_yscale;
183 /* register access */ 183 /* register access */
184 u32 flip_addr;
185 struct drm_i915_gem_object *reg_bo; 184 struct drm_i915_gem_object *reg_bo;
185 struct overlay_registers __iomem *regs;
186 u32 flip_addr;
186 /* flip handling */ 187 /* flip handling */
187 struct i915_gem_active last_flip; 188 struct i915_gem_active last_flip;
188}; 189};
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
210 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); 211 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
211} 212}
212 213
213static struct overlay_registers __iomem *
214intel_overlay_map_regs(struct intel_overlay *overlay)
215{
216 struct drm_i915_private *dev_priv = overlay->i915;
217 struct overlay_registers __iomem *regs;
218
219 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
220 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
221 else
222 regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
223 overlay->flip_addr,
224 PAGE_SIZE);
225
226 return regs;
227}
228
229static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
230 struct overlay_registers __iomem *regs)
231{
232 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
233 io_mapping_unmap(regs);
234}
235
236static void intel_overlay_submit_request(struct intel_overlay *overlay, 214static void intel_overlay_submit_request(struct intel_overlay *overlay,
237 struct i915_request *rq, 215 struct i915_request *rq,
238 i915_gem_retire_fn retire) 216 i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
784 struct drm_i915_gem_object *new_bo, 762 struct drm_i915_gem_object *new_bo,
785 struct put_image_params *params) 763 struct put_image_params *params)
786{ 764{
787 int ret, tmp_width; 765 struct overlay_registers __iomem *regs = overlay->regs;
788 struct overlay_registers __iomem *regs;
789 bool scale_changed = false;
790 struct drm_i915_private *dev_priv = overlay->i915; 766 struct drm_i915_private *dev_priv = overlay->i915;
791 u32 swidth, swidthsw, sheight, ostride; 767 u32 swidth, swidthsw, sheight, ostride;
792 enum pipe pipe = overlay->crtc->pipe; 768 enum pipe pipe = overlay->crtc->pipe;
769 bool scale_changed = false;
793 struct i915_vma *vma; 770 struct i915_vma *vma;
771 int ret, tmp_width;
794 772
795 lockdep_assert_held(&dev_priv->drm.struct_mutex); 773 lockdep_assert_held(&dev_priv->drm.struct_mutex);
796 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 774 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
815 793
816 if (!overlay->active) { 794 if (!overlay->active) {
817 u32 oconfig; 795 u32 oconfig;
818 regs = intel_overlay_map_regs(overlay); 796
819 if (!regs) {
820 ret = -ENOMEM;
821 goto out_unpin;
822 }
823 oconfig = OCONF_CC_OUT_8BIT; 797 oconfig = OCONF_CC_OUT_8BIT;
824 if (IS_GEN4(dev_priv)) 798 if (IS_GEN4(dev_priv))
825 oconfig |= OCONF_CSC_MODE_BT709; 799 oconfig |= OCONF_CSC_MODE_BT709;
826 oconfig |= pipe == 0 ? 800 oconfig |= pipe == 0 ?
827 OCONF_PIPE_A : OCONF_PIPE_B; 801 OCONF_PIPE_A : OCONF_PIPE_B;
828 iowrite32(oconfig, &regs->OCONFIG); 802 iowrite32(oconfig, &regs->OCONFIG);
829 intel_overlay_unmap_regs(overlay, regs);
830 803
831 ret = intel_overlay_on(overlay); 804 ret = intel_overlay_on(overlay);
832 if (ret != 0) 805 if (ret != 0)
833 goto out_unpin; 806 goto out_unpin;
834 } 807 }
835 808
836 regs = intel_overlay_map_regs(overlay);
837 if (!regs) {
838 ret = -ENOMEM;
839 goto out_unpin;
840 }
841
842 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS); 809 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
843 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ); 810 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
844 811
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
882 849
883 iowrite32(overlay_cmd_reg(params), &regs->OCMD); 850 iowrite32(overlay_cmd_reg(params), &regs->OCMD);
884 851
885 intel_overlay_unmap_regs(overlay, regs);
886
887 ret = intel_overlay_continue(overlay, vma, scale_changed); 852 ret = intel_overlay_continue(overlay, vma, scale_changed);
888 if (ret) 853 if (ret)
889 goto out_unpin; 854 goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
901int intel_overlay_switch_off(struct intel_overlay *overlay) 866int intel_overlay_switch_off(struct intel_overlay *overlay)
902{ 867{
903 struct drm_i915_private *dev_priv = overlay->i915; 868 struct drm_i915_private *dev_priv = overlay->i915;
904 struct overlay_registers __iomem *regs;
905 int ret; 869 int ret;
906 870
907 lockdep_assert_held(&dev_priv->drm.struct_mutex); 871 lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
918 if (ret != 0) 882 if (ret != 0)
919 return ret; 883 return ret;
920 884
921 regs = intel_overlay_map_regs(overlay); 885 iowrite32(0, &overlay->regs->OCMD);
922 iowrite32(0, &regs->OCMD);
923 intel_overlay_unmap_regs(overlay, regs);
924 886
925 return intel_overlay_off(overlay); 887 return intel_overlay_off(overlay);
926} 888}
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1305 struct drm_intel_overlay_attrs *attrs = data; 1267 struct drm_intel_overlay_attrs *attrs = data;
1306 struct drm_i915_private *dev_priv = to_i915(dev); 1268 struct drm_i915_private *dev_priv = to_i915(dev);
1307 struct intel_overlay *overlay; 1269 struct intel_overlay *overlay;
1308 struct overlay_registers __iomem *regs;
1309 int ret; 1270 int ret;
1310 1271
1311 overlay = dev_priv->overlay; 1272 overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1345 overlay->contrast = attrs->contrast; 1306 overlay->contrast = attrs->contrast;
1346 overlay->saturation = attrs->saturation; 1307 overlay->saturation = attrs->saturation;
1347 1308
1348 regs = intel_overlay_map_regs(overlay); 1309 update_reg_attrs(overlay, overlay->regs);
1349 if (!regs) {
1350 ret = -ENOMEM;
1351 goto out_unlock;
1352 }
1353
1354 update_reg_attrs(overlay, regs);
1355
1356 intel_overlay_unmap_regs(overlay, regs);
1357 1310
1358 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1311 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1359 if (IS_GEN2(dev_priv)) 1312 if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
1386 return ret; 1339 return ret;
1387} 1340}
1388 1341
1342static int get_registers(struct intel_overlay *overlay, bool use_phys)
1343{
1344 struct drm_i915_gem_object *obj;
1345 struct i915_vma *vma;
1346 int err;
1347
1348 obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
1349 if (obj == NULL)
1350 obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
1351 if (IS_ERR(obj))
1352 return PTR_ERR(obj);
1353
1354 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
1355 if (IS_ERR(vma)) {
1356 err = PTR_ERR(vma);
1357 goto err_put_bo;
1358 }
1359
1360 if (use_phys)
1361 overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
1362 else
1363 overlay->flip_addr = i915_ggtt_offset(vma);
1364 overlay->regs = i915_vma_pin_iomap(vma);
1365 i915_vma_unpin(vma);
1366
1367 if (IS_ERR(overlay->regs)) {
1368 err = PTR_ERR(overlay->regs);
1369 goto err_put_bo;
1370 }
1371
1372 overlay->reg_bo = obj;
1373 return 0;
1374
1375err_put_bo:
1376 i915_gem_object_put(obj);
1377 return err;
1378}
1379
1389void intel_setup_overlay(struct drm_i915_private *dev_priv) 1380void intel_setup_overlay(struct drm_i915_private *dev_priv)
1390{ 1381{
1391 struct intel_overlay *overlay; 1382 struct intel_overlay *overlay;
1392 struct drm_i915_gem_object *reg_bo;
1393 struct overlay_registers __iomem *regs;
1394 struct i915_vma *vma = NULL;
1395 int ret; 1383 int ret;
1396 1384
1397 if (!HAS_OVERLAY(dev_priv)) 1385 if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1401 if (!overlay) 1389 if (!overlay)
1402 return; 1390 return;
1403 1391
1404 mutex_lock(&dev_priv->drm.struct_mutex);
1405 if (WARN_ON(dev_priv->overlay))
1406 goto out_free;
1407
1408 overlay->i915 = dev_priv; 1392 overlay->i915 = dev_priv;
1409 1393
1410 reg_bo = NULL;
1411 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1412 reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
1413 if (reg_bo == NULL)
1414 reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
1415 if (IS_ERR(reg_bo))
1416 goto out_free;
1417 overlay->reg_bo = reg_bo;
1418
1419 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1420 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1421 if (ret) {
1422 DRM_ERROR("failed to attach phys overlay regs\n");
1423 goto out_free_bo;
1424 }
1425 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1426 } else {
1427 vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
1428 0, PAGE_SIZE, PIN_MAPPABLE);
1429 if (IS_ERR(vma)) {
1430 DRM_ERROR("failed to pin overlay register bo\n");
1431 ret = PTR_ERR(vma);
1432 goto out_free_bo;
1433 }
1434 overlay->flip_addr = i915_ggtt_offset(vma);
1435
1436 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1437 if (ret) {
1438 DRM_ERROR("failed to move overlay register bo into the GTT\n");
1439 goto out_unpin_bo;
1440 }
1441 }
1442
1443 /* init all values */
1444 overlay->color_key = 0x0101fe; 1394 overlay->color_key = 0x0101fe;
1445 overlay->color_key_enabled = true; 1395 overlay->color_key_enabled = true;
1446 overlay->brightness = -19; 1396 overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1449 1399
1450 init_request_active(&overlay->last_flip, NULL); 1400 init_request_active(&overlay->last_flip, NULL);
1451 1401
1452 regs = intel_overlay_map_regs(overlay); 1402 mutex_lock(&dev_priv->drm.struct_mutex);
1453 if (!regs) 1403
1454 goto out_unpin_bo; 1404 ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
1405 if (ret)
1406 goto out_free;
1407
1408 ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
1409 if (ret)
1410 goto out_reg_bo;
1455 1411
1456 memset_io(regs, 0, sizeof(struct overlay_registers)); 1412 mutex_unlock(&dev_priv->drm.struct_mutex);
1457 update_polyphase_filter(regs);
1458 update_reg_attrs(overlay, regs);
1459 1413
1460 intel_overlay_unmap_regs(overlay, regs); 1414 memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
1415 update_polyphase_filter(overlay->regs);
1416 update_reg_attrs(overlay, overlay->regs);
1461 1417
1462 dev_priv->overlay = overlay; 1418 dev_priv->overlay = overlay;
1463 mutex_unlock(&dev_priv->drm.struct_mutex); 1419 DRM_INFO("Initialized overlay support.\n");
1464 DRM_INFO("initialized overlay support\n");
1465 return; 1420 return;
1466 1421
1467out_unpin_bo: 1422out_reg_bo:
1468 if (vma) 1423 i915_gem_object_put(overlay->reg_bo);
1469 i915_vma_unpin(vma);
1470out_free_bo:
1471 i915_gem_object_put(reg_bo);
1472out_free: 1424out_free:
1473 mutex_unlock(&dev_priv->drm.struct_mutex); 1425 mutex_unlock(&dev_priv->drm.struct_mutex);
1474 kfree(overlay); 1426 kfree(overlay);
1475 return;
1476} 1427}
1477 1428
1478void intel_cleanup_overlay(struct drm_i915_private *dev_priv) 1429void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1479{ 1430{
1480 if (!dev_priv->overlay) 1431 struct intel_overlay *overlay;
1432
1433 overlay = fetch_and_zero(&dev_priv->overlay);
1434 if (!overlay)
1481 return; 1435 return;
1482 1436
1483 /* The bo's should be free'd by the generic code already. 1437 /*
1438 * The bo's should be free'd by the generic code already.
1484 * Furthermore modesetting teardown happens beforehand so the 1439 * Furthermore modesetting teardown happens beforehand so the
1485 * hardware should be off already */ 1440 * hardware should be off already.
1486 WARN_ON(dev_priv->overlay->active); 1441 */
1442 WARN_ON(overlay->active);
1443
1444 i915_gem_object_put(overlay->reg_bo);
1487 1445
1488 i915_gem_object_put(dev_priv->overlay->reg_bo); 1446 kfree(overlay);
1489 kfree(dev_priv->overlay);
1490} 1447}
1491 1448
1492#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 1449#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
1498 u32 isr; 1455 u32 isr;
1499}; 1456};
1500 1457
1501static struct overlay_registers __iomem *
1502intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1503{
1504 struct drm_i915_private *dev_priv = overlay->i915;
1505 struct overlay_registers __iomem *regs;
1506
1507 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1508 /* Cast to make sparse happy, but it's wc memory anyway, so
1509 * equivalent to the wc io mapping on X86. */
1510 regs = (struct overlay_registers __iomem *)
1511 overlay->reg_bo->phys_handle->vaddr;
1512 else
1513 regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
1514 overlay->flip_addr);
1515
1516 return regs;
1517}
1518
1519static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1520 struct overlay_registers __iomem *regs)
1521{
1522 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1523 io_mapping_unmap_atomic(regs);
1524}
1525
1526struct intel_overlay_error_state * 1458struct intel_overlay_error_state *
1527intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) 1459intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1528{ 1460{
1529 struct intel_overlay *overlay = dev_priv->overlay; 1461 struct intel_overlay *overlay = dev_priv->overlay;
1530 struct intel_overlay_error_state *error; 1462 struct intel_overlay_error_state *error;
1531 struct overlay_registers __iomem *regs;
1532 1463
1533 if (!overlay || !overlay->active) 1464 if (!overlay || !overlay->active)
1534 return NULL; 1465 return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1541 error->isr = I915_READ(ISR); 1472 error->isr = I915_READ(ISR);
1542 error->base = overlay->flip_addr; 1473 error->base = overlay->flip_addr;
1543 1474
1544 regs = intel_overlay_map_regs_atomic(overlay); 1475 memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
1545 if (!regs)
1546 goto err;
1547
1548 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
1549 intel_overlay_unmap_regs_atomic(overlay, regs);
1550 1476
1551 return error; 1477 return error;
1552
1553err:
1554 kfree(error);
1555 return NULL;
1556} 1478}
1557 1479
1558void 1480void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index d65959ef0564..17235e940ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
86 struct nvkm_bios *bios = subdev->device->bios; 86 struct nvkm_bios *bios = subdev->device->bios;
87 struct nvbios_pmuR pmu; 87 struct nvbios_pmuR pmu;
88 88
89 if (!nvbios_pmuRm(bios, type, &pmu)) { 89 if (!nvbios_pmuRm(bios, type, &pmu))
90 nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
91 return -EINVAL; 90 return -EINVAL;
92 }
93 91
94 if (!post) 92 if (!post)
95 return 0; 93 return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
124 return -EINVAL; 122 return -EINVAL;
125 } 123 }
126 124
125 /* Upload DEVINIT application from VBIOS onto PMU. */
127 ret = pmu_load(init, 0x04, post, &exec, &args); 126 ret = pmu_load(init, 0x04, post, &exec, &args);
128 if (ret) 127 if (ret) {
128 nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
129 return ret; 129 return ret;
130 }
130 131
131 /* upload first chunk of init data */ 132 /* Upload tables required by opcodes in boot scripts. */
132 if (post) { 133 if (post) {
133 // devinit tables
134 u32 pmu = pmu_args(init, args + 0x08, 0x08); 134 u32 pmu = pmu_args(init, args + 0x08, 0x08);
135 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14); 135 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
136 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16); 136 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
137 pmu_data(init, pmu, img, len); 137 pmu_data(init, pmu, img, len);
138 } 138 }
139 139
140 /* upload second chunk of init data */ 140 /* Upload boot scripts. */
141 if (post) { 141 if (post) {
142 // devinit boot scripts
143 u32 pmu = pmu_args(init, args + 0x08, 0x10); 142 u32 pmu = pmu_args(init, args + 0x08, 0x10);
144 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18); 143 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
145 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a); 144 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
146 pmu_data(init, pmu, img, len); 145 pmu_data(init, pmu, img, len);
147 } 146 }
148 147
149 /* execute init tables */ 148 /* Execute DEVINIT. */
150 if (post) { 149 if (post) {
151 nvkm_wr32(device, 0x10a040, 0x00005000); 150 nvkm_wr32(device, 0x10a040, 0x00005000);
152 pmu_exec(init, exec); 151 pmu_exec(init, exec);
@@ -157,7 +156,9 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
157 return -ETIMEDOUT; 156 return -ETIMEDOUT;
158 } 157 }
159 158
160 /* load and execute some other ucode image (bios therm?) */ 159 /* Optional: Execute PRE_OS application on PMU, which should at
160 * least take care of fans until a full PMU has been loaded.
161 */
161 pmu_load(init, 0x01, post, NULL, NULL); 162 pmu_load(init, 0x01, post, NULL, NULL);
162 return 0; 163 return 0;
163} 164}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b1b548a21f91..c71cc857b649 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1291 if (!attribute->show) 1291 if (!attribute->show)
1292 return -EIO; 1292 return -EIO;
1293 1293
1294 if (chan->state != CHANNEL_OPENED_STATE)
1295 return -EINVAL;
1296
1294 return attribute->show(chan, buf); 1297 return attribute->show(chan, buf);
1295} 1298}
1296 1299
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 7589f2ad1dae..631360b14ca7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
187 187
188int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) 188int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
189{ 189{
190 u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask; 190 u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
191 struct st_lsm6dsx_hw *hw = sensor->hw; 191 struct st_lsm6dsx_hw *hw = sensor->hw;
192 struct st_lsm6dsx_sensor *cur_sensor; 192 struct st_lsm6dsx_sensor *cur_sensor;
193 int i, err, data; 193 int i, err, data;
194 __le16 wdata; 194 __le16 wdata;
195 195
196 if (!hw->sip)
197 return 0;
198
196 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { 199 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
197 cur_sensor = iio_priv(hw->iio_devs[i]); 200 cur_sensor = iio_priv(hw->iio_devs[i]);
198 201
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
203 : cur_sensor->watermark; 206 : cur_sensor->watermark;
204 207
205 fifo_watermark = min_t(u16, fifo_watermark, cur_watermark); 208 fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
206 sip += cur_sensor->sip;
207 } 209 }
208 210
209 if (!sip) 211 fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
210 return 0; 212 fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
211
212 fifo_watermark = max_t(u16, fifo_watermark, sip);
213 fifo_watermark = (fifo_watermark / sip) * sip;
214 fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl; 213 fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
215 214
216 err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1, 215 err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 54e383231d1e..c31b9633f32d 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
258static const struct spi_device_id maxim_thermocouple_id[] = { 258static const struct spi_device_id maxim_thermocouple_id[] = {
259 {"max6675", MAX6675}, 259 {"max6675", MAX6675},
260 {"max31855", MAX31855}, 260 {"max31855", MAX31855},
261 {"max31856", MAX31855},
262 {}, 261 {},
263}; 262};
264MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id); 263MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index eec83757d55f..6c967dde58e7 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
893 } 893 }
894 894
895 /* 895 /*
896 * A secondary bus reset (SBR) issues a hot reset to our device. 896 * This is an end around to do an SBR during probe time. A new API needs
897 * The following routine does a 1s wait after the reset is dropped 897 * to be implemented to have cleaner interface but this fixes the
898 * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 - 898 * current brokenness
899 * Conventional Reset, paragraph 3, line 35 also says that a 1s
900 * delay after a reset is required. Per spec requirements,
901 * the link is either working or not after that point.
902 */ 899 */
903 return pci_reset_bus(dev); 900 return pci_bridge_secondary_bus_reset(dev->bus->self);
904} 901}
905 902
906/* 903/*
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f266c81f396f..0481223b1deb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
332 int err; 332 int err;
333 333
334 desc->tfm = essiv->hash_tfm; 334 desc->tfm = essiv->hash_tfm;
335 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 335 desc->flags = 0;
336 336
337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); 337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
338 shash_desc_zero(desc); 338 shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
606 int i, r; 606 int i, r;
607 607
608 desc->tfm = lmk->hash_tfm; 608 desc->tfm = lmk->hash_tfm;
609 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 609 desc->flags = 0;
610 610
611 r = crypto_shash_init(desc); 611 r = crypto_shash_init(desc);
612 if (r) 612 if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
768 768
769 /* calculate crc32 for every 32bit part and xor it */ 769 /* calculate crc32 for every 32bit part and xor it */
770 desc->tfm = tcw->crc32_tfm; 770 desc->tfm = tcw->crc32_tfm;
771 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 771 desc->flags = 0;
772 for (i = 0; i < 4; i++) { 772 for (i = 0; i < 4; i++) {
773 r = crypto_shash_init(desc); 773 r = crypto_shash_init(desc);
774 if (r) 774 if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1251 * requests if driver request queue is full. 1251 * requests if driver request queue is full.
1252 */ 1252 */
1253 skcipher_request_set_callback(ctx->r.req, 1253 skcipher_request_set_callback(ctx->r.req,
1254 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 1254 CRYPTO_TFM_REQ_MAY_BACKLOG,
1255 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); 1255 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1256} 1256}
1257 1257
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
1268 * requests if driver request queue is full. 1268 * requests if driver request queue is full.
1269 */ 1269 */
1270 aead_request_set_callback(ctx->r.req_aead, 1270 aead_request_set_callback(ctx->r.req_aead,
1271 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 1271 CRYPTO_TFM_REQ_MAY_BACKLOG,
1272 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); 1272 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1273} 1273}
1274 1274
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 378878599466..89ccb64342de 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
532 unsigned j, size; 532 unsigned j, size;
533 533
534 desc->tfm = ic->journal_mac; 534 desc->tfm = ic->journal_mac;
535 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 535 desc->flags = 0;
536 536
537 r = crypto_shash_init(desc); 537 r = crypto_shash_init(desc);
538 if (unlikely(r)) { 538 if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
676static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 676static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
677{ 677{
678 int r; 678 int r;
679 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 679 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
680 complete_journal_encrypt, comp); 680 complete_journal_encrypt, comp);
681 if (likely(encrypt)) 681 if (likely(encrypt))
682 r = crypto_skcipher_encrypt(req); 682 r = crypto_skcipher_encrypt(req);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index cae689de75fd..5ba067fa0c72 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010-2011 Neil Brown 2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
@@ -29,9 +29,6 @@
29 */ 29 */
30#define MIN_RAID456_JOURNAL_SPACE (4*2048) 30#define MIN_RAID456_JOURNAL_SPACE (4*2048)
31 31
32/* Global list of all raid sets */
33static LIST_HEAD(raid_sets);
34
35static bool devices_handle_discard_safely = false; 32static bool devices_handle_discard_safely = false;
36 33
37/* 34/*
@@ -227,7 +224,6 @@ struct rs_layout {
227 224
228struct raid_set { 225struct raid_set {
229 struct dm_target *ti; 226 struct dm_target *ti;
230 struct list_head list;
231 227
232 uint32_t stripe_cache_entries; 228 uint32_t stripe_cache_entries;
233 unsigned long ctr_flags; 229 unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
273 mddev->new_chunk_sectors = l->new_chunk_sectors; 269 mddev->new_chunk_sectors = l->new_chunk_sectors;
274} 270}
275 271
276/* Find any raid_set in active slot for @rs on global list */
277static struct raid_set *rs_find_active(struct raid_set *rs)
278{
279 struct raid_set *r;
280 struct mapped_device *md = dm_table_get_md(rs->ti->table);
281
282 list_for_each_entry(r, &raid_sets, list)
283 if (r != rs && dm_table_get_md(r->ti->table) == md)
284 return r;
285
286 return NULL;
287}
288
289/* raid10 algorithms (i.e. formats) */ 272/* raid10 algorithms (i.e. formats) */
290#define ALGORITHM_RAID10_DEFAULT 0 273#define ALGORITHM_RAID10_DEFAULT 0
291#define ALGORITHM_RAID10_NEAR 1 274#define ALGORITHM_RAID10_NEAR 1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
764 747
765 mddev_init(&rs->md); 748 mddev_init(&rs->md);
766 749
767 INIT_LIST_HEAD(&rs->list);
768 rs->raid_disks = raid_devs; 750 rs->raid_disks = raid_devs;
769 rs->delta_disks = 0; 751 rs->delta_disks = 0;
770 752
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
782 for (i = 0; i < raid_devs; i++) 764 for (i = 0; i < raid_devs; i++)
783 md_rdev_init(&rs->dev[i].rdev); 765 md_rdev_init(&rs->dev[i].rdev);
784 766
785 /* Add @rs to global list. */
786 list_add(&rs->list, &raid_sets);
787
788 /* 767 /*
789 * Remaining items to be initialized by further RAID params: 768 * Remaining items to be initialized by further RAID params:
790 * rs->md.persistent 769 * rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
797 return rs; 776 return rs;
798} 777}
799 778
800/* Free all @rs allocations and remove it from global list. */ 779/* Free all @rs allocations */
801static void raid_set_free(struct raid_set *rs) 780static void raid_set_free(struct raid_set *rs)
802{ 781{
803 int i; 782 int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
815 dm_put_device(rs->ti, rs->dev[i].data_dev); 794 dm_put_device(rs->ti, rs->dev[i].data_dev);
816 } 795 }
817 796
818 list_del(&rs->list);
819
820 kfree(rs); 797 kfree(rs);
821} 798}
822 799
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
2649 return 0; 2626 return 0;
2650 } 2627 }
2651 2628
2652 /* HM FIXME: get InSync raid_dev? */ 2629 /* HM FIXME: get In_Sync raid_dev? */
2653 rdev = &rs->dev[0].rdev; 2630 rdev = &rs->dev[0].rdev;
2654 2631
2655 if (rs->delta_disks < 0) { 2632 if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3149 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3126 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3150 rs_set_new(rs); 3127 rs_set_new(rs);
3151 } else if (rs_is_recovering(rs)) { 3128 } else if (rs_is_recovering(rs)) {
3129 /* Rebuild particular devices */
3130 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3131 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3132 rs_setup_recovery(rs, MaxSector);
3133 }
3152 /* A recovering raid set may be resized */ 3134 /* A recovering raid set may be resized */
3153 ; /* skip setup rs */ 3135 ; /* skip setup rs */
3154 } else if (rs_is_reshaping(rs)) { 3136 } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3242 /* Start raid set read-only and assumed clean to change in raid_resume() */ 3224 /* Start raid set read-only and assumed clean to change in raid_resume() */
3243 rs->md.ro = 1; 3225 rs->md.ro = 1;
3244 rs->md.in_sync = 1; 3226 rs->md.in_sync = 1;
3227
3228 /* Keep array frozen */
3245 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); 3229 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3246 3230
3247 /* Has to be held on running the array */ 3231 /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3265 rs->callbacks.congested_fn = raid_is_congested; 3249 rs->callbacks.congested_fn = raid_is_congested;
3266 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 3250 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3267 3251
3268 /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */ 3252 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
3269 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { 3253 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3270 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); 3254 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3271 if (r) { 3255 if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
3350 return DM_MAPIO_SUBMITTED; 3334 return DM_MAPIO_SUBMITTED;
3351} 3335}
3352 3336
3353/* Return string describing the current sync action of @mddev */ 3337/* Return sync state string for @state */
3354static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery) 3338enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3339static const char *sync_str(enum sync_state state)
3340{
3341 /* Has to be in above sync_state order! */
3342 static const char *sync_strs[] = {
3343 "frozen",
3344 "reshape",
3345 "resync",
3346 "check",
3347 "repair",
3348 "recover",
3349 "idle"
3350 };
3351
3352 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3353};
3354
3355/* Return enum sync_state for @mddev derived from @recovery flags */
3356static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3355{ 3357{
3356 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 3358 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3357 return "frozen"; 3359 return st_frozen;
3358 3360
3359 /* The MD sync thread can be done with io but still be running */ 3361 /* The MD sync thread can be done with io or be interrupted but still be running */
3360 if (!test_bit(MD_RECOVERY_DONE, &recovery) && 3362 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3361 (test_bit(MD_RECOVERY_RUNNING, &recovery) || 3363 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3362 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { 3364 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3363 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 3365 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3364 return "reshape"; 3366 return st_reshape;
3365 3367
3366 if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 3368 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3367 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 3369 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3368 return "resync"; 3370 return st_resync;
3369 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 3371 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3370 return "check"; 3372 return st_check;
3371 return "repair"; 3373 return st_repair;
3372 } 3374 }
3373 3375
3374 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3376 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3375 return "recover"; 3377 return st_recover;
3378
3379 if (mddev->reshape_position != MaxSector)
3380 return st_reshape;
3376 } 3381 }
3377 3382
3378 return "idle"; 3383 return st_idle;
3379} 3384}
3380 3385
3381/* 3386/*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3409 sector_t resync_max_sectors) 3414 sector_t resync_max_sectors)
3410{ 3415{
3411 sector_t r; 3416 sector_t r;
3417 enum sync_state state;
3412 struct mddev *mddev = &rs->md; 3418 struct mddev *mddev = &rs->md;
3413 3419
3414 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3420 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3419 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3425 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3420 3426
3421 } else { 3427 } else {
3422 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) && 3428 state = decipher_sync_action(mddev, recovery);
3423 !test_bit(MD_RECOVERY_INTR, &recovery) && 3429
3424 (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3430 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3425 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3426 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3427 r = mddev->curr_resync_completed;
3428 else
3429 r = mddev->recovery_cp; 3431 r = mddev->recovery_cp;
3432 else
3433 r = mddev->curr_resync_completed;
3430 3434
3431 if (r >= resync_max_sectors && 3435 if (state == st_idle && r >= resync_max_sectors) {
3432 (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
3433 (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
3434 !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
3435 !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
3436 /* 3436 /*
3437 * Sync complete. 3437 * Sync complete.
3438 */ 3438 */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3442 3442
3443 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) { 3443 } else if (state == st_recover)
3444 /* 3444 /*
3445 * In case we are recovering, the array is not in sync 3445 * In case we are recovering, the array is not in sync
3446 * and health chars should show the recovering legs. 3446 * and health chars should show the recovering legs.
3447 */ 3447 */
3448 ; 3448 ;
3449 3449 else if (state == st_resync)
3450 } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
3451 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3452 /* 3450 /*
3453 * If "resync" is occurring, the raid set 3451 * If "resync" is occurring, the raid set
3454 * is or may be out of sync hence the health 3452 * is or may be out of sync hence the health
3455 * characters shall be 'a'. 3453 * characters shall be 'a'.
3456 */ 3454 */
3457 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3455 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3458 3456 else if (state == st_reshape)
3459 } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
3460 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3461 /* 3457 /*
3462 * If "reshape" is occurring, the raid set 3458 * If "reshape" is occurring, the raid set
3463 * is or may be out of sync hence the health 3459 * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3465 */ 3461 */
3466 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3462 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3467 3463
3468 } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) { 3464 else if (state == st_check || state == st_repair)
3469 /* 3465 /*
3470 * If "check" or "repair" is occurring, the raid set has 3466 * If "check" or "repair" is occurring, the raid set has
3471 * undergone an initial sync and the health characters 3467 * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3473 */ 3469 */
3474 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3470 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3475 3471
3476 } else { 3472 else {
3477 struct md_rdev *rdev; 3473 struct md_rdev *rdev;
3478 3474
3479 /* 3475 /*
3480 * We are idle and recovery is needed, prevent 'A' chars race 3476 * We are idle and recovery is needed, prevent 'A' chars race
3481 * caused by components still set to in-sync by constrcuctor. 3477 * caused by components still set to in-sync by constructor.
3482 */ 3478 */
3483 if (test_bit(MD_RECOVERY_NEEDED, &recovery)) 3479 if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3484 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3480 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
3542 progress = rs_get_progress(rs, recovery, resync_max_sectors); 3538 progress = rs_get_progress(rs, recovery, resync_max_sectors);
3543 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? 3539 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3544 atomic64_read(&mddev->resync_mismatches) : 0; 3540 atomic64_read(&mddev->resync_mismatches) : 0;
3545 sync_action = decipher_sync_action(&rs->md, recovery); 3541 sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
3546 3542
3547 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ 3543 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3548 for (i = 0; i < rs->raid_disks; i++) 3544 for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
3892 struct mddev *mddev = &rs->md; 3888 struct mddev *mddev = &rs->md;
3893 struct md_personality *pers = mddev->pers; 3889 struct md_personality *pers = mddev->pers;
3894 3890
3891 /* Don't allow the sync thread to work until the table gets reloaded. */
3892 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3893
3895 r = rs_setup_reshape(rs); 3894 r = rs_setup_reshape(rs);
3896 if (r) 3895 if (r)
3897 return r; 3896 return r;
3898 3897
3899 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3900 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3901 mddev_resume(mddev);
3902
3903 /* 3898 /*
3904 * Check any reshape constraints enforced by the personalility 3899 * Check any reshape constraints enforced by the personalility
3905 * 3900 *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
3923 } 3918 }
3924 } 3919 }
3925 3920
3926 /* Suspend because a resume will happen in raid_resume() */
3927 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3928 mddev_suspend(mddev);
3929
3930 /* 3921 /*
3931 * Now reshape got set up, update superblocks to 3922 * Now reshape got set up, update superblocks to
3932 * reflect the fact so that a table reload will 3923 * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
3947 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) 3938 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3948 return 0; 3939 return 0;
3949 3940
3950 if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3951 struct raid_set *rs_active = rs_find_active(rs);
3952
3953 if (rs_active) {
3954 /*
3955 * In case no rebuilds have been requested
3956 * and an active table slot exists, copy
3957 * current resynchonization completed and
3958 * reshape position pointers across from
3959 * suspended raid set in the active slot.
3960 *
3961 * This resumes the new mapping at current
3962 * offsets to continue recover/reshape without
3963 * necessarily redoing a raid set partially or
3964 * causing data corruption in case of a reshape.
3965 */
3966 if (rs_active->md.curr_resync_completed != MaxSector)
3967 mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
3968 if (rs_active->md.reshape_position != MaxSector)
3969 mddev->reshape_position = rs_active->md.reshape_position;
3970 }
3971 }
3972
3973 /* 3941 /*
3974 * The superblocks need to be updated on disk if the 3942 * The superblocks need to be updated on disk if the
3975 * array is new or new devices got added (thus zeroed 3943 * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
4046 4014
4047static struct target_type raid_target = { 4015static struct target_type raid_target = {
4048 .name = "raid", 4016 .name = "raid",
4049 .version = {1, 13, 2}, 4017 .version = {1, 14, 0},
4050 .module = THIS_MODULE, 4018 .module = THIS_MODULE,
4051 .ctr = raid_ctr, 4019 .ctr = raid_ctr,
4052 .dtr = raid_dtr, 4020 .dtr = raid_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 72142021b5c9..74f6770c70b1 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -189,6 +189,12 @@ struct dm_pool_metadata {
189 sector_t data_block_size; 189 sector_t data_block_size;
190 190
191 /* 191 /*
192 * We reserve a section of the metadata for commit overhead.
193 * All reported space does *not* include this.
194 */
195 dm_block_t metadata_reserve;
196
197 /*
192 * Set if a transaction has to be aborted but the attempt to roll back 198 * Set if a transaction has to be aborted but the attempt to roll back
193 * to the previous (good) transaction failed. The only pool metadata 199 * to the previous (good) transaction failed. The only pool metadata
194 * operation possible in this state is the closing of the device. 200 * operation possible in this state is the closing of the device.
@@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
816 return dm_tm_commit(pmd->tm, sblock); 822 return dm_tm_commit(pmd->tm, sblock);
817} 823}
818 824
825static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
826{
827 int r;
828 dm_block_t total;
829 dm_block_t max_blocks = 4096; /* 16M */
830
831 r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
832 if (r) {
833 DMERR("could not get size of metadata device");
834 pmd->metadata_reserve = max_blocks;
835 } else {
836 sector_div(total, 10);
837 pmd->metadata_reserve = min(max_blocks, total);
838 }
839}
840
819struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, 841struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
820 sector_t data_block_size, 842 sector_t data_block_size,
821 bool format_device) 843 bool format_device)
@@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
849 return ERR_PTR(r); 871 return ERR_PTR(r);
850 } 872 }
851 873
874 __set_metadata_reserve(pmd);
875
852 return pmd; 876 return pmd;
853} 877}
854 878
@@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1820 down_read(&pmd->root_lock); 1844 down_read(&pmd->root_lock);
1821 if (!pmd->fail_io) 1845 if (!pmd->fail_io)
1822 r = dm_sm_get_nr_free(pmd->metadata_sm, result); 1846 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1847
1848 if (!r) {
1849 if (*result < pmd->metadata_reserve)
1850 *result = 0;
1851 else
1852 *result -= pmd->metadata_reserve;
1853 }
1823 up_read(&pmd->root_lock); 1854 up_read(&pmd->root_lock);
1824 1855
1825 return r; 1856 return r;
@@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
1932 int r = -EINVAL; 1963 int r = -EINVAL;
1933 1964
1934 down_write(&pmd->root_lock); 1965 down_write(&pmd->root_lock);
1935 if (!pmd->fail_io) 1966 if (!pmd->fail_io) {
1936 r = __resize_space_map(pmd->metadata_sm, new_count); 1967 r = __resize_space_map(pmd->metadata_sm, new_count);
1968 if (!r)
1969 __set_metadata_reserve(pmd);
1970 }
1937 up_write(&pmd->root_lock); 1971 up_write(&pmd->root_lock);
1938 1972
1939 return r; 1973 return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7bd60a150f8f..aaf1ad481ee8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
200enum pool_mode { 200enum pool_mode {
201 PM_WRITE, /* metadata may be changed */ 201 PM_WRITE, /* metadata may be changed */
202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ 202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
203
204 /*
205 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
206 */
207 PM_OUT_OF_METADATA_SPACE,
203 PM_READ_ONLY, /* metadata may not be changed */ 208 PM_READ_ONLY, /* metadata may not be changed */
209
204 PM_FAIL, /* all I/O fails */ 210 PM_FAIL, /* all I/O fails */
205}; 211};
206 212
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1371 1377
1372static void requeue_bios(struct pool *pool); 1378static void requeue_bios(struct pool *pool);
1373 1379
1374static void check_for_space(struct pool *pool) 1380static bool is_read_only_pool_mode(enum pool_mode mode)
1381{
1382 return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1383}
1384
1385static bool is_read_only(struct pool *pool)
1386{
1387 return is_read_only_pool_mode(get_pool_mode(pool));
1388}
1389
1390static void check_for_metadata_space(struct pool *pool)
1391{
1392 int r;
1393 const char *ooms_reason = NULL;
1394 dm_block_t nr_free;
1395
1396 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1397 if (r)
1398 ooms_reason = "Could not get free metadata blocks";
1399 else if (!nr_free)
1400 ooms_reason = "No free metadata blocks";
1401
1402 if (ooms_reason && !is_read_only(pool)) {
1403 DMERR("%s", ooms_reason);
1404 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1405 }
1406}
1407
1408static void check_for_data_space(struct pool *pool)
1375{ 1409{
1376 int r; 1410 int r;
1377 dm_block_t nr_free; 1411 dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
1397{ 1431{
1398 int r; 1432 int r;
1399 1433
1400 if (get_pool_mode(pool) >= PM_READ_ONLY) 1434 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1401 return -EINVAL; 1435 return -EINVAL;
1402 1436
1403 r = dm_pool_commit_metadata(pool->pmd); 1437 r = dm_pool_commit_metadata(pool->pmd);
1404 if (r) 1438 if (r)
1405 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); 1439 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1406 else 1440 else {
1407 check_for_space(pool); 1441 check_for_metadata_space(pool);
1442 check_for_data_space(pool);
1443 }
1408 1444
1409 return r; 1445 return r;
1410} 1446}
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1470 return r; 1506 return r;
1471 } 1507 }
1472 1508
1509 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1510 if (r) {
1511 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1512 return r;
1513 }
1514
1515 if (!free_blocks) {
1516 /* Let's commit before we use up the metadata reserve. */
1517 r = commit(pool);
1518 if (r)
1519 return r;
1520 }
1521
1473 return 0; 1522 return 0;
1474} 1523}
1475 1524
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1501 case PM_OUT_OF_DATA_SPACE: 1550 case PM_OUT_OF_DATA_SPACE:
1502 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; 1551 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1503 1552
1553 case PM_OUT_OF_METADATA_SPACE:
1504 case PM_READ_ONLY: 1554 case PM_READ_ONLY:
1505 case PM_FAIL: 1555 case PM_FAIL:
1506 return BLK_STS_IOERR; 1556 return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2464 error_retry_list(pool); 2514 error_retry_list(pool);
2465 break; 2515 break;
2466 2516
2517 case PM_OUT_OF_METADATA_SPACE:
2467 case PM_READ_ONLY: 2518 case PM_READ_ONLY:
2468 if (old_mode != new_mode) 2519 if (!is_read_only_pool_mode(old_mode))
2469 notify_of_pool_mode_change(pool, "read-only"); 2520 notify_of_pool_mode_change(pool, "read-only");
2470 dm_pool_metadata_read_only(pool->pmd); 2521 dm_pool_metadata_read_only(pool->pmd);
2471 pool->process_bio = process_bio_read_only; 2522 pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3403 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 3454 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3404 dm_device_name(pool->pool_md), 3455 dm_device_name(pool->pool_md),
3405 sb_metadata_dev_size, metadata_dev_size); 3456 sb_metadata_dev_size, metadata_dev_size);
3457
3458 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
3459 set_pool_mode(pool, PM_WRITE);
3460
3406 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); 3461 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3407 if (r) { 3462 if (r) {
3408 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); 3463 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
3707 struct pool_c *pt = ti->private; 3762 struct pool_c *pt = ti->private;
3708 struct pool *pool = pt->pool; 3763 struct pool *pool = pt->pool;
3709 3764
3710 if (get_pool_mode(pool) >= PM_READ_ONLY) { 3765 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
3711 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", 3766 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3712 dm_device_name(pool->pool_md)); 3767 dm_device_name(pool->pool_md));
3713 return -EOPNOTSUPP; 3768 return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3781 dm_block_t nr_blocks_data; 3836 dm_block_t nr_blocks_data;
3782 dm_block_t nr_blocks_metadata; 3837 dm_block_t nr_blocks_metadata;
3783 dm_block_t held_root; 3838 dm_block_t held_root;
3839 enum pool_mode mode;
3784 char buf[BDEVNAME_SIZE]; 3840 char buf[BDEVNAME_SIZE];
3785 char buf2[BDEVNAME_SIZE]; 3841 char buf2[BDEVNAME_SIZE];
3786 struct pool_c *pt = ti->private; 3842 struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3851 else 3907 else
3852 DMEMIT("- "); 3908 DMEMIT("- ");
3853 3909
3854 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) 3910 mode = get_pool_mode(pool);
3911 if (mode == PM_OUT_OF_DATA_SPACE)
3855 DMEMIT("out_of_data_space "); 3912 DMEMIT("out_of_data_space ");
3856 else if (pool->pf.mode == PM_READ_ONLY) 3913 else if (is_read_only_pool_mode(mode))
3857 DMEMIT("ro "); 3914 DMEMIT("ro ");
3858 else 3915 else
3859 DMEMIT("rw "); 3916 DMEMIT("rw ");
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 12decdbd722d..fc65f0dedf7f 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
99{ 99{
100 struct scatterlist sg; 100 struct scatterlist sg;
101 101
102 sg_init_one(&sg, data, len); 102 if (likely(!is_vmalloc_addr(data))) {
103 ahash_request_set_crypt(req, &sg, NULL, len); 103 sg_init_one(&sg, data, len);
104 104 ahash_request_set_crypt(req, &sg, NULL, len);
105 return crypto_wait_req(crypto_ahash_update(req), wait); 105 return crypto_wait_req(crypto_ahash_update(req), wait);
106 } else {
107 do {
108 int r;
109 size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
110 flush_kernel_vmap_range((void *)data, this_step);
111 sg_init_table(&sg, 1);
112 sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
113 ahash_request_set_crypt(req, &sg, NULL, this_step);
114 r = crypto_wait_req(crypto_ahash_update(req), wait);
115 if (unlikely(r))
116 return r;
117 data += this_step;
118 len -= this_step;
119 } while (len);
120 return 0;
121 }
106} 122}
107 123
108/* 124/*
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index eeb7eef62174..38f90e179927 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/sysfs.h> 29#include <linux/sysfs.h>
30#include <linux/nospec.h>
30 31
31static DEFINE_MUTEX(compass_mutex); 32static DEFINE_MUTEX(compass_mutex);
32 33
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
50 return ret; 51 return ret;
51 if (val >= strlen(map)) 52 if (val >= strlen(map))
52 return -EINVAL; 53 return -EINVAL;
54 val = array_index_nospec(val, strlen(map));
53 mutex_lock(&compass_mutex); 55 mutex_lock(&compass_mutex);
54 ret = compass_command(c, map[val]); 56 ret = compass_command(c, map[val]);
55 mutex_unlock(&compass_mutex); 57 mutex_unlock(&compass_mutex);
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 8f82bb9d11e2..b8aaa684c397 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2131 retrc = plpar_hcall_norets(H_REG_CRQ, 2131 retrc = plpar_hcall_norets(H_REG_CRQ,
2132 vdev->unit_address, 2132 vdev->unit_address,
2133 queue->msg_token, PAGE_SIZE); 2133 queue->msg_token, PAGE_SIZE);
2134 retrc = rc; 2134 rc = retrc;
2135 2135
2136 if (rc == H_RESOURCE) 2136 if (rc == H_RESOURCE)
2137 rc = ibmvmc_reset_crq_queue(adapter); 2137 rc = ibmvmc_reset_crq_queue(adapter);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 7bba62a72921..fc3872fe7b25 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
521 521
522 cl = cldev->cl; 522 cl = cldev->cl;
523 523
524 mutex_lock(&bus->device_lock);
524 if (cl->state == MEI_FILE_UNINITIALIZED) { 525 if (cl->state == MEI_FILE_UNINITIALIZED) {
525 mutex_lock(&bus->device_lock);
526 ret = mei_cl_link(cl); 526 ret = mei_cl_link(cl);
527 mutex_unlock(&bus->device_lock);
528 if (ret) 527 if (ret)
529 return ret; 528 goto out;
530 /* update pointers */ 529 /* update pointers */
531 cl->cldev = cldev; 530 cl->cldev = cldev;
532 } 531 }
533 532
534 mutex_lock(&bus->device_lock);
535 if (mei_cl_is_connected(cl)) { 533 if (mei_cl_is_connected(cl)) {
536 ret = 0; 534 ret = 0;
537 goto out; 535 goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
616 if (err < 0) 614 if (err < 0)
617 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 615 dev_err(bus->dev, "Could not disconnect from the ME client\n");
618 616
619out:
620 mei_cl_bus_module_put(cldev); 617 mei_cl_bus_module_put(cldev);
621 618out:
622 /* Flush queues and remove any pending read */ 619 /* Flush queues and remove any pending read */
623 mei_cl_flush_queues(cl, NULL); 620 mei_cl_flush_queues(cl, NULL);
624 mei_cl_unlink(cl); 621 mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
876 873
877 mei_me_cl_put(cldev->me_cl); 874 mei_me_cl_put(cldev->me_cl);
878 mei_dev_bus_put(cldev->bus); 875 mei_dev_bus_put(cldev->bus);
876 mei_cl_unlink(cldev->cl);
879 kfree(cldev->cl); 877 kfree(cldev->cl);
880 kfree(cldev); 878 kfree(cldev);
881} 879}
882 880
883static const struct device_type mei_cl_device_type = { 881static const struct device_type mei_cl_device_type = {
884 .release = mei_cl_bus_dev_release, 882 .release = mei_cl_bus_dev_release,
885}; 883};
886 884
887/** 885/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 4ab6251d418e..ebdcf0b450e2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1767,7 +1767,7 @@ out:
1767 } 1767 }
1768 } 1768 }
1769 1769
1770 rets = buf->size; 1770 rets = len;
1771err: 1771err:
1772 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1772 cl_dbg(dev, cl, "rpm: autosuspend\n");
1773 pm_runtime_mark_last_busy(dev->dev); 1773 pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 09e233d4c0de..e56f3e72d57a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1161 1161
1162 props_res = (struct hbm_props_response *)mei_msg; 1162 props_res = (struct hbm_props_response *)mei_msg;
1163 1163
1164 if (props_res->status) { 1164 if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
1165 dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
1166 props_res->me_addr);
1167 } else if (props_res->status) {
1165 dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", 1168 dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
1166 props_res->status, 1169 props_res->status,
1167 mei_hbm_status_str(props_res->status)); 1170 mei_hbm_status_str(props_res->status));
1168 return -EPROTO; 1171 return -EPROTO;
1172 } else {
1173 mei_hbm_me_cl_add(dev, props_res);
1169 } 1174 }
1170 1175
1171 mei_hbm_me_cl_add(dev, props_res);
1172
1173 /* request property for the next client */ 1176 /* request property for the next client */
1174 if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) 1177 if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
1175 return -EIO; 1178 return -EIO;
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 09cb89645d06..2cfec33178c1 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
517static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) 517static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
518{ 518{
519 struct device_node *slot_node; 519 struct device_node *slot_node;
520 struct platform_device *pdev;
520 521
521 /* 522 /*
522 * TODO: the MMC core framework currently does not support 523 * TODO: the MMC core framework currently does not support
523 * controllers with multiple slots properly. So we only register 524 * controllers with multiple slots properly. So we only register
524 * the first slot for now 525 * the first slot for now
525 */ 526 */
526 slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); 527 slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
527 if (!slot_node) { 528 if (!slot_node) {
528 dev_warn(parent, "no 'mmc-slot' sub-node found\n"); 529 dev_warn(parent, "no 'mmc-slot' sub-node found\n");
529 return ERR_PTR(-ENOENT); 530 return ERR_PTR(-ENOENT);
530 } 531 }
531 532
532 return of_platform_device_create(slot_node, NULL, parent); 533 pdev = of_platform_device_create(slot_node, NULL, parent);
534 of_node_put(slot_node);
535
536 return pdev;
533} 537}
534 538
535static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) 539static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 071693ebfe18..68760d4a5d3d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2177 dma_release_channel(host->tx_chan); 2177 dma_release_channel(host->tx_chan);
2178 dma_release_channel(host->rx_chan); 2178 dma_release_channel(host->rx_chan);
2179 2179
2180 dev_pm_clear_wake_irq(host->dev);
2180 pm_runtime_dont_use_autosuspend(host->dev); 2181 pm_runtime_dont_use_autosuspend(host->dev);
2181 pm_runtime_put_sync(host->dev); 2182 pm_runtime_put_sync(host->dev);
2182 pm_runtime_disable(host->dev); 2183 pm_runtime_disable(host->dev);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3533e918ea37..bfc4da660bb4 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
66 66
67 struct nvmet_req req; 67 struct nvmet_req req;
68 68
69 bool allocated;
69 u8 n_rdma; 70 u8 n_rdma;
70 u32 flags; 71 u32 flags;
71 u32 invalidate_rkey; 72 u32 invalidate_rkey;
@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
174 unsigned long flags; 175 unsigned long flags;
175 176
176 spin_lock_irqsave(&queue->rsps_lock, flags); 177 spin_lock_irqsave(&queue->rsps_lock, flags);
177 rsp = list_first_entry(&queue->free_rsps, 178 rsp = list_first_entry_or_null(&queue->free_rsps,
178 struct nvmet_rdma_rsp, free_list); 179 struct nvmet_rdma_rsp, free_list);
179 list_del(&rsp->free_list); 180 if (likely(rsp))
181 list_del(&rsp->free_list);
180 spin_unlock_irqrestore(&queue->rsps_lock, flags); 182 spin_unlock_irqrestore(&queue->rsps_lock, flags);
181 183
184 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp))
187 return NULL;
188 rsp->allocated = true;
189 }
190
182 return rsp; 191 return rsp;
183} 192}
184 193
@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
187{ 196{
188 unsigned long flags; 197 unsigned long flags;
189 198
199 if (rsp->allocated) {
200 kfree(rsp);
201 return;
202 }
203
190 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
191 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
192 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
776 790
777 cmd->queue = queue; 791 cmd->queue = queue;
778 rsp = nvmet_rdma_get_rsp(queue); 792 rsp = nvmet_rdma_get_rsp(queue);
793 if (unlikely(!rsp)) {
794 /*
795 * we get here only under memory pressure,
796 * silently drop and have the host retry
797 * as we can't even fail it.
798 */
799 nvmet_rdma_post_recv(queue->dev, cmd);
800 return;
801 }
779 rsp->queue = queue; 802 rsp->queue = queue;
780 rsp->cmd = cmd; 803 rsp->cmd = cmd;
781 rsp->flags = 0; 804 rsp->flags = 0;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 9095b8290150..74eaedd5b860 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -140,6 +140,9 @@ void of_populate_phandle_cache(void)
140 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) 140 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
141 phandles++; 141 phandles++;
142 142
143 if (!phandles)
144 goto out;
145
143 cache_entries = roundup_pow_of_two(phandles); 146 cache_entries = roundup_pow_of_two(phandles);
144 phandle_cache_mask = cache_entries - 1; 147 phandle_cache_mask = cache_entries - 1;
145 148
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7136e3430925..a938abdb41ce 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
496 u16 slot_status; 496 u16 slot_status;
497 int retval; 497 int retval;
498 498
499 /* Clear sticky power-fault bit from previous power failures */ 499 /* Clear power-fault bit from previous power failures */
500 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); 500 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
501 if (slot_status & PCI_EXP_SLTSTA_PFD) 501 if (slot_status & PCI_EXP_SLTSTA_PFD)
502 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, 502 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
646 pciehp_handle_button_press(slot); 646 pciehp_handle_button_press(slot);
647 } 647 }
648 648
649 /* Check Power Fault Detected */
650 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
651 ctrl->power_fault_detected = 1;
652 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
653 pciehp_set_attention_status(slot, 1);
654 pciehp_green_led_off(slot);
655 }
656
649 /* 657 /*
650 * Disable requests have higher priority than Presence Detect Changed 658 * Disable requests have higher priority than Presence Detect Changed
651 * or Data Link Layer State Changed events. 659 * or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
657 pciehp_handle_presence_or_link_change(slot, events); 665 pciehp_handle_presence_or_link_change(slot, events);
658 up_read(&ctrl->reset_lock); 666 up_read(&ctrl->reset_lock);
659 667
660 /* Check Power Fault Detected */
661 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
662 ctrl->power_fault_detected = 1;
663 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
664 pciehp_set_attention_status(slot, 1);
665 pciehp_green_led_off(slot);
666 }
667
668 pci_config_pm_runtime_put(pdev); 668 pci_config_pm_runtime_put(pdev);
669 wake_up(&ctrl->requester); 669 wake_up(&ctrl->requester);
670 return IRQ_HANDLED; 670 return IRQ_HANDLED;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 29ff9619b5fa..1835f3a7aa8d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4547 4547
4548 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); 4548 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4549} 4549}
4550EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4550 4551
4551static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 4552static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4552{ 4553{
@@ -5200,7 +5201,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
5200 */ 5201 */
5201int pci_reset_bus(struct pci_dev *pdev) 5202int pci_reset_bus(struct pci_dev *pdev)
5202{ 5203{
5203 return pci_probe_reset_slot(pdev->slot) ? 5204 return (!pci_probe_reset_slot(pdev->slot)) ?
5204 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); 5205 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5205} 5206}
5206EXPORT_SYMBOL_GPL(pci_reset_bus); 5207EXPORT_SYMBOL_GPL(pci_reset_bus);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ec784009a36b..201f9e5ff55c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2074{ 2074{
2075#ifdef CONFIG_PCI_PASID 2075#ifdef CONFIG_PCI_PASID
2076 struct pci_dev *bridge; 2076 struct pci_dev *bridge;
2077 int pcie_type;
2077 u32 cap; 2078 u32 cap;
2078 2079
2079 if (!pci_is_pcie(dev)) 2080 if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2083 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) 2084 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
2084 return; 2085 return;
2085 2086
2086 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2087 pcie_type = pci_pcie_type(dev);
2088 if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2089 pcie_type == PCI_EXP_TYPE_RC_END)
2087 dev->eetlp_prefix_path = 1; 2090 dev->eetlp_prefix_path = 1;
2088 else { 2091 else {
2089 bridge = pci_upstream_bridge(dev); 2092 bridge = pci_upstream_bridge(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ef7143a274e0..6bc27b7fd452 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4355 * 4355 *
4356 * 0x9d10-0x9d1b PCI Express Root port #{1-12} 4356 * 0x9d10-0x9d1b PCI Express Root port #{1-12}
4357 * 4357 *
4358 * The 300 series chipset suffers from the same bug so include those root
4359 * ports here as well.
4360 *
4361 * 0xa32c-0xa343 PCI Express Root port #{0-24}
4362 *
4363 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html 4358 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
4364 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html 4359 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
4365 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html 4360 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4377 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ 4372 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
4378 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ 4373 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
4379 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ 4374 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
4380 case 0xa32c ... 0xa343: /* 300 series */
4381 return true; 4375 return true;
4382 } 4376 }
4383 4377
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 9940cc70f38b..54a8b30dda38 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -14,6 +14,8 @@
14#include <linux/poll.h> 14#include <linux/poll.h>
15#include <linux/wait.h> 15#include <linux/wait.h>
16 16
17#include <linux/nospec.h>
18
17MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); 19MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
18MODULE_VERSION("0.1"); 20MODULE_VERSION("0.1");
19MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
909 default: 911 default:
910 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) 912 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
911 return -EINVAL; 913 return -EINVAL;
914 p.port = array_index_nospec(p.port,
915 ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
912 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); 916 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
913 break; 917 break;
914 } 918 }
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index ece41fb2848f..c4f4d904e4a6 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
1040 } 1040 }
1041 1041
1042 /* if the configuration is provided through pdata, apply it */ 1042 /* if the configuration is provided through pdata, apply it */
1043 if (pdata) { 1043 if (pdata && pdata->gpio_configs) {
1044 ret = pinctrl_register_mappings(pdata->gpio_configs, 1044 ret = pinctrl_register_mappings(pdata->gpio_configs,
1045 pdata->n_gpio_configs); 1045 pdata->n_gpio_configs);
1046 if (ret) { 1046 if (ret) {
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6a1b6058b991..628817c40e3b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
793 793
794 err = pinctrl_generic_add_group(jzpc->pctl, group->name, 794 err = pinctrl_generic_add_group(jzpc->pctl, group->name,
795 group->pins, group->num_pins, group->data); 795 group->pins, group->num_pins, group->data);
796 if (err) { 796 if (err < 0) {
797 dev_err(dev, "Failed to register group %s\n", 797 dev_err(dev, "Failed to register group %s\n",
798 group->name); 798 group->name);
799 return err; 799 return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
806 err = pinmux_generic_add_function(jzpc->pctl, func->name, 806 err = pinmux_generic_add_function(jzpc->pctl, func->name,
807 func->group_names, func->num_group_names, 807 func->group_names, func->num_group_names,
808 func->data); 808 func->data);
809 if (err) { 809 if (err < 0) {
810 dev_err(dev, "Failed to register function %s\n", 810 dev_err(dev, "Failed to register function %s\n",
811 func->name); 811 func->name);
812 return err; 812 return err;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2155a30c282b..5d72ffad32c2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
634 raw_spin_lock_irqsave(&pctrl->lock, flags); 634 raw_spin_lock_irqsave(&pctrl->lock, flags);
635 635
636 val = readl(pctrl->regs + g->intr_cfg_reg); 636 val = readl(pctrl->regs + g->intr_cfg_reg);
637 /*
638 * There are two bits that control interrupt forwarding to the CPU. The
639 * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
640 * latched into the interrupt status register when the hardware detects
641 * an irq that it's configured for (either edge for edge type or level
642 * for level type irq). The 'non-raw' status enable bit causes the
643 * hardware to assert the summary interrupt to the CPU if the latched
644 * status bit is set. There's a bug though, the edge detection logic
645 * seems to have a problem where toggling the RAW_STATUS_EN bit may
646 * cause the status bit to latch spuriously when there isn't any edge
647 * so we can't touch that bit for edge type irqs and we have to keep
648 * the bit set anyway so that edges are latched while the line is masked.
649 *
650 * To make matters more complicated, leaving the RAW_STATUS_EN bit
651 * enabled all the time causes level interrupts to re-latch into the
652 * status register because the level is still present on the line after
653 * we ack it. We clear the raw status enable bit during mask here and
654 * set the bit on unmask so the interrupt can't latch into the hardware
655 * while it's masked.
656 */
657 if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
658 val &= ~BIT(g->intr_raw_status_bit);
659
637 val &= ~BIT(g->intr_enable_bit); 660 val &= ~BIT(g->intr_enable_bit);
638 writel(val, pctrl->regs + g->intr_cfg_reg); 661 writel(val, pctrl->regs + g->intr_cfg_reg);
639 662
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
655 raw_spin_lock_irqsave(&pctrl->lock, flags); 678 raw_spin_lock_irqsave(&pctrl->lock, flags);
656 679
657 val = readl(pctrl->regs + g->intr_cfg_reg); 680 val = readl(pctrl->regs + g->intr_cfg_reg);
681 val |= BIT(g->intr_raw_status_bit);
658 val |= BIT(g->intr_enable_bit); 682 val |= BIT(g->intr_enable_bit);
659 writel(val, pctrl->regs + g->intr_cfg_reg); 683 writel(val, pctrl->regs + g->intr_cfg_reg);
660 684
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec891bc7d10a..f039266b275d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
872 if (bits & 0x07) 872 if (bits & 0x07)
873 return -EINVAL; 873 return -EINVAL;
874 874
875 memset(bitmap, 0, bits / 8);
876
877 if (str[0] == '0' && str[1] == 'x') 875 if (str[0] == '0' && str[1] == 'x')
878 str++; 876 str++;
879 if (*str == 'x') 877 if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
895} 893}
896 894
897/* 895/*
898 * str2clrsetmasks() - parse bitmask argument and set the clear and 896 * modify_bitmap() - parse bitmask argument and modify an existing
899 * the set bitmap mask. A concatenation (done with ',') of these terms 897 * bit mask accordingly. A concatenation (done with ',') of these
900 * is recognized: 898 * terms is recognized:
901 * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>] 899 * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
902 * <bitnr> may be any valid number (hex, decimal or octal) in the range 900 * <bitnr> may be any valid number (hex, decimal or octal) in the range
903 * 0...bits-1; the leading + or - is required. Here are some examples: 901 * 0...bits-1; the leading + or - is required. Here are some examples:
904 * +0-15,+32,-128,-0xFF 902 * +0-15,+32,-128,-0xFF
905 * -0-255,+1-16,+0x128 903 * -0-255,+1-16,+0x128
906 * +1,+2,+3,+4,-5,-7-10 904 * +1,+2,+3,+4,-5,-7-10
907 * Returns a clear and a set bitmask. Every positive value in the string 905 * Returns the new bitmap after all changes have been applied. Every
908 * results in a bit set in the set mask and every negative value in the 906 * positive value in the string will set a bit and every negative value
909 * string results in a bit SET in the clear mask. As a bit may be touched 907 * in the string will clear a bit. As a bit may be touched more than once,
910 * more than once, the last 'operation' wins: +0-255,-128 = all but bit 908 * the last 'operation' wins:
911 * 128 set in the set mask, only bit 128 set in the clear mask. 909 * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
910 * cleared again. All other bits are unmodified.
912 */ 911 */
913static int str2clrsetmasks(const char *str, 912static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
914 unsigned long *clrmap,
915 unsigned long *setmap,
916 int bits)
917{ 913{
918 int a, i, z; 914 int a, i, z;
919 char *np, sign; 915 char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
922 if (bits & 0x07) 918 if (bits & 0x07)
923 return -EINVAL; 919 return -EINVAL;
924 920
925 memset(clrmap, 0, bits / 8);
926 memset(setmap, 0, bits / 8);
927
928 while (*str) { 921 while (*str) {
929 sign = *str++; 922 sign = *str++;
930 if (sign != '+' && sign != '-') 923 if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
940 str = np; 933 str = np;
941 } 934 }
942 for (i = a; i <= z; i++) 935 for (i = a; i <= z; i++)
943 if (sign == '+') { 936 if (sign == '+')
944 set_bit_inv(i, setmap); 937 set_bit_inv(i, bitmap);
945 clear_bit_inv(i, clrmap); 938 else
946 } else { 939 clear_bit_inv(i, bitmap);
947 clear_bit_inv(i, setmap);
948 set_bit_inv(i, clrmap);
949 }
950 while (*str == ',' || *str == '\n') 940 while (*str == ',' || *str == '\n')
951 str++; 941 str++;
952 } 942 }
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
970 unsigned long *bitmap, int bits, 960 unsigned long *bitmap, int bits,
971 struct mutex *lock) 961 struct mutex *lock)
972{ 962{
973 int i; 963 unsigned long *newmap, size;
964 int rc;
974 965
975 /* bits needs to be a multiple of 8 */ 966 /* bits needs to be a multiple of 8 */
976 if (bits & 0x07) 967 if (bits & 0x07)
977 return -EINVAL; 968 return -EINVAL;
978 969
970 size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
971 newmap = kmalloc(size, GFP_KERNEL);
972 if (!newmap)
973 return -ENOMEM;
974 if (mutex_lock_interruptible(lock)) {
975 kfree(newmap);
976 return -ERESTARTSYS;
977 }
978
979 if (*str == '+' || *str == '-') { 979 if (*str == '+' || *str == '-') {
980 DECLARE_BITMAP(clrm, bits); 980 memcpy(newmap, bitmap, size);
981 DECLARE_BITMAP(setm, bits); 981 rc = modify_bitmap(str, newmap, bits);
982
983 i = str2clrsetmasks(str, clrm, setm, bits);
984 if (i)
985 return i;
986 if (mutex_lock_interruptible(lock))
987 return -ERESTARTSYS;
988 for (i = 0; i < bits; i++) {
989 if (test_bit_inv(i, clrm))
990 clear_bit_inv(i, bitmap);
991 if (test_bit_inv(i, setm))
992 set_bit_inv(i, bitmap);
993 }
994 } else { 982 } else {
995 DECLARE_BITMAP(setm, bits); 983 memset(newmap, 0, size);
996 984 rc = hex2bitmap(str, newmap, bits);
997 i = hex2bitmap(str, setm, bits);
998 if (i)
999 return i;
1000 if (mutex_lock_interruptible(lock))
1001 return -ERESTARTSYS;
1002 for (i = 0; i < bits; i++)
1003 if (test_bit_inv(i, setm))
1004 set_bit_inv(i, bitmap);
1005 else
1006 clear_bit_inv(i, bitmap);
1007 } 985 }
986 if (rc == 0)
987 memcpy(bitmap, newmap, size);
1008 mutex_unlock(lock); 988 mutex_unlock(lock);
1009 989 kfree(newmap);
1010 return 0; 990 return rc;
1011} 991}
1012 992
1013/* 993/*
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7cb3ab0a35a0..3082e72e4f6c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -30,7 +30,11 @@
30 30
31#define DRIVER_NAME "fsl-dspi" 31#define DRIVER_NAME "fsl-dspi"
32 32
33#ifdef CONFIG_M5441x
34#define DSPI_FIFO_SIZE 16
35#else
33#define DSPI_FIFO_SIZE 4 36#define DSPI_FIFO_SIZE 4
37#endif
34#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) 38#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
35 39
36#define SPI_MCR 0x00 40#define SPI_MCR 0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
623static void dspi_eoq_write(struct fsl_dspi *dspi) 627static void dspi_eoq_write(struct fsl_dspi *dspi)
624{ 628{
625 int fifo_size = DSPI_FIFO_SIZE; 629 int fifo_size = DSPI_FIFO_SIZE;
630 u16 xfer_cmd = dspi->tx_cmd;
626 631
627 /* Fill TX FIFO with as many transfers as possible */ 632 /* Fill TX FIFO with as many transfers as possible */
628 while (dspi->len && fifo_size--) { 633 while (dspi->len && fifo_size--) {
634 dspi->tx_cmd = xfer_cmd;
629 /* Request EOQF for last transfer in FIFO */ 635 /* Request EOQF for last transfer in FIFO */
630 if (dspi->len == dspi->bytes_per_word || fifo_size == 0) 636 if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
631 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; 637 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ec395a6baf9c..9da0bc5a036c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
2143 */ 2143 */
2144 if (ctlr->num_chipselect == 0) 2144 if (ctlr->num_chipselect == 0)
2145 return -EINVAL; 2145 return -EINVAL;
2146 /* allocate dynamic bus number using Linux idr */ 2146 if (ctlr->bus_num >= 0) {
2147 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { 2147 /* devices with a fixed bus num must check-in with the num */
2148 mutex_lock(&board_lock);
2149 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2150 ctlr->bus_num + 1, GFP_KERNEL);
2151 mutex_unlock(&board_lock);
2152 if (WARN(id < 0, "couldn't get idr"))
2153 return id == -ENOSPC ? -EBUSY : id;
2154 ctlr->bus_num = id;
2155 } else if (ctlr->dev.of_node) {
2156 /* allocate dynamic bus number using Linux idr */
2148 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2157 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2149 if (id >= 0) { 2158 if (id >= 0) {
2150 ctlr->bus_num = id; 2159 ctlr->bus_num = id;
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 96f614934df1..663b755bf2fb 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -2,7 +2,7 @@
2 2
3config EROFS_FS 3config EROFS_FS
4 tristate "EROFS filesystem support" 4 tristate "EROFS filesystem support"
5 depends on BROKEN 5 depends on BLOCK
6 help 6 help
7 EROFS(Enhanced Read-Only File System) is a lightweight 7 EROFS(Enhanced Read-Only File System) is a lightweight
8 read-only file system with modern designs (eg. page-sized 8 read-only file system with modern designs (eg. page-sized
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 1aec509c805f..2df9768edac9 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
340 goto err_sbread; 340 goto err_sbread;
341 341
342 sb->s_magic = EROFS_SUPER_MAGIC; 342 sb->s_magic = EROFS_SUPER_MAGIC;
343 sb->s_flags |= MS_RDONLY | MS_NOATIME; 343 sb->s_flags |= SB_RDONLY | SB_NOATIME;
344 sb->s_maxbytes = MAX_LFS_FILESIZE; 344 sb->s_maxbytes = MAX_LFS_FILESIZE;
345 sb->s_time_gran = 1; 345 sb->s_time_gran = 1;
346 346
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
627{ 627{
628 BUG_ON(!sb_rdonly(sb)); 628 BUG_ON(!sb_rdonly(sb));
629 629
630 *flags |= MS_RDONLY; 630 *flags |= SB_RDONLY;
631 return 0; 631 return 0;
632} 632}
633 633
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
index 7e64c7e438f0..a9f4802bb6be 100644
--- a/drivers/staging/fbtft/TODO
+++ b/drivers/staging/fbtft/TODO
@@ -2,3 +2,7 @@
2 GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO 2 GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
3 lines from device tree, ACPI or board files, board files should 3 lines from device tree, ACPI or board files, board files should
4 use <linux/gpio/machine.h> 4 use <linux/gpio/machine.h>
5
6* convert all these over to drm_simple_display_pipe and submit for inclusion
7 into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
8 drivers anymore.
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO
index 6ff8e01b04cc..5b1865f8af2d 100644
--- a/drivers/staging/gasket/TODO
+++ b/drivers/staging/gasket/TODO
@@ -1,9 +1,22 @@
1This is a list of things that need to be done to get this driver out of the 1This is a list of things that need to be done to get this driver out of the
2staging directory. 2staging directory.
3
4- Implement the gasket framework's functionality through UIO instead of
5 introducing a new user-space drivers framework that is quite similar.
6
7 UIO provides the necessary bits to implement user-space drivers. Meanwhile
8 the gasket APIs adds some extra conveniences like PCI BAR mapping, and
9 MSI interrupts. Add these features to the UIO subsystem, then re-implement
10 the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
11
3- Document sysfs files with Documentation/ABI/ entries. 12- Document sysfs files with Documentation/ABI/ entries.
13
4- Use misc interface instead of major number for driver version description. 14- Use misc interface instead of major number for driver version description.
15
5- Add descriptions of module_param's 16- Add descriptions of module_param's
17
6- apex_get_status() should actually check status. 18- apex_get_status() should actually check status.
19
7- "drivers" should never be dealing with "raw" sysfs calls or mess around with 20- "drivers" should never be dealing with "raw" sysfs calls or mess around with
8 kobjects at all. The driver core should handle all of this for you 21 kobjects at all. The driver core should handle all of this for you
9 automaically. There should not be a need for raw attribute macros. 22 automaically. There should not be a need for raw attribute macros.
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index da92c493f157..69cc508af1bc 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
59 ret = PTR_ERR(dev); 59 ret = PTR_ERR(dev);
60 goto err_drv_alloc; 60 goto err_drv_alloc;
61 } 61 }
62
63 ret = pci_enable_device(pdev);
64 if (ret)
65 goto err_pci_enable;
66
62 dev->pdev = pdev; 67 dev->pdev = pdev;
63 pci_set_drvdata(pdev, dev); 68 pci_set_drvdata(pdev, dev);
64 69
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
75 err_drv_dev_register: 80 err_drv_dev_register:
76 vbox_driver_unload(dev); 81 vbox_driver_unload(dev);
77 err_vbox_driver_load: 82 err_vbox_driver_load:
83 pci_disable_device(pdev);
84 err_pci_enable:
78 drm_dev_put(dev); 85 drm_dev_put(dev);
79 err_drv_alloc: 86 err_drv_alloc:
80 return ret; 87 return ret;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index a83eac8668d0..79836c8fb909 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
323 if (rc) 323 if (rc)
324 return rc; 324 return rc;
325 325
326 mutex_lock(&vbox->hw_mutex);
327 vbox_set_view(crtc);
328 vbox_do_modeset(crtc, &crtc->mode);
329 mutex_unlock(&vbox->hw_mutex);
330
326 spin_lock_irqsave(&drm->event_lock, flags); 331 spin_lock_irqsave(&drm->event_lock, flags);
327 332
328 if (event) 333 if (event)
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index f7b07c0b5ce2..ee7e26b886a5 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_WILC1000) += wilc1000.o
2 3
3ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ 4ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
4 -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" 5 -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
11 wilc_wlan.o 12 wilc_wlan.o
12 13
13obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o 14obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
14wilc1000-sdio-objs += $(wilc1000-objs)
15wilc1000-sdio-objs += wilc_sdio.o 15wilc1000-sdio-objs += wilc_sdio.o
16 16
17obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o 17obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
18wilc1000-spi-objs += $(wilc1000-objs)
19wilc1000-spi-objs += wilc_spi.o 18wilc1000-spi-objs += wilc_spi.o
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 01cf4bd2e192..3b8d237decbf 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
1038 } 1038 }
1039 1039
1040 kfree(wilc); 1040 kfree(wilc);
1041 wilc_debugfs_remove();
1042} 1041}
1042EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
1043 1043
1044static const struct net_device_ops wilc_netdev_ops = { 1044static const struct net_device_ops wilc_netdev_ops = {
1045 .ndo_init = mac_init_fn, 1045 .ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1062 if (!wl) 1062 if (!wl)
1063 return -ENOMEM; 1063 return -ENOMEM;
1064 1064
1065 wilc_debugfs_init();
1066 *wilc = wl; 1065 *wilc = wl;
1067 wl->io_type = io_type; 1066 wl->io_type = io_type;
1068 wl->hif_func = ops; 1067 wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1124 1123
1125 return 0; 1124 return 0;
1126} 1125}
1126EXPORT_SYMBOL_GPL(wilc_netdev_init);
1127
1128MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index edc72876458d..8001df66b8c2 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
19 19
20#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) 20#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
21static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); 21static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
22EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
22 23
23static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, 24static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
24 size_t count, loff_t *ppos) 25 size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
87 }, 88 },
88}; 89};
89 90
90int wilc_debugfs_init(void) 91static int __init wilc_debugfs_init(void)
91{ 92{
92 int i; 93 int i;
93 struct wilc_debugfs_info_t *info; 94 struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
103 } 104 }
104 return 0; 105 return 0;
105} 106}
107module_init(wilc_debugfs_init);
106 108
107void wilc_debugfs_remove(void) 109static void __exit wilc_debugfs_remove(void)
108{ 110{
109 debugfs_remove_recursive(wilc_dir); 111 debugfs_remove_recursive(wilc_dir);
110} 112}
113module_exit(wilc_debugfs_remove);
111 114
112#endif 115#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 6787b6e9f124..8b184aa30d25 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
417 wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); 417 wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
418 wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); 418 wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
419} 419}
420EXPORT_SYMBOL_GPL(chip_allow_sleep);
420 421
421void chip_wakeup(struct wilc *wilc) 422void chip_wakeup(struct wilc *wilc)
422{ 423{
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
471 } 472 }
472 chip_ps_state = CHIP_WAKEDUP; 473 chip_ps_state = CHIP_WAKEDUP;
473} 474}
475EXPORT_SYMBOL_GPL(chip_wakeup);
474 476
475void wilc_chip_sleep_manually(struct wilc *wilc) 477void wilc_chip_sleep_manually(struct wilc *wilc)
476{ 478{
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
484 chip_ps_state = CHIP_SLEEPING_MANUAL; 486 chip_ps_state = CHIP_SLEEPING_MANUAL;
485 release_bus(wilc, RELEASE_ONLY); 487 release_bus(wilc, RELEASE_ONLY);
486} 488}
489EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
487 490
488void host_wakeup_notify(struct wilc *wilc) 491void host_wakeup_notify(struct wilc *wilc)
489{ 492{
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
491 wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); 494 wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
492 release_bus(wilc, RELEASE_ONLY); 495 release_bus(wilc, RELEASE_ONLY);
493} 496}
497EXPORT_SYMBOL_GPL(host_wakeup_notify);
494 498
495void host_sleep_notify(struct wilc *wilc) 499void host_sleep_notify(struct wilc *wilc)
496{ 500{
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
498 wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); 502 wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
499 release_bus(wilc, RELEASE_ONLY); 503 release_bus(wilc, RELEASE_ONLY);
500} 504}
505EXPORT_SYMBOL_GPL(host_sleep_notify);
501 506
502int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) 507int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
503{ 508{
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
871 876
872 release_bus(wilc, RELEASE_ALLOW_SLEEP); 877 release_bus(wilc, RELEASE_ALLOW_SLEEP);
873} 878}
879EXPORT_SYMBOL_GPL(wilc_handle_isr);
874 880
875int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, 881int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
876 u32 buffer_size) 882 u32 buffer_size)
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 00d13b153f80..b81a73b9bd67 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -831,6 +831,4 @@ struct wilc;
831int wilc_wlan_init(struct net_device *dev); 831int wilc_wlan_init(struct net_device *dev);
832u32 wilc_get_chipid(struct wilc *wilc, bool update); 832u32 wilc_get_chipid(struct wilc *wilc, bool update);
833 833
834int wilc_debugfs_init(void);
835void wilc_debugfs_remove(void);
836#endif 834#endif
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 5414c4a87bea..27284a2dcd2b 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
522 return -EIO; 522 return -EIO;
523 523
524 while (count > 0) { 524 while (count > 0) {
525 int ret = 0;
526
525 spin_lock_irqsave(&hp->lock, flags); 527 spin_lock_irqsave(&hp->lock, flags);
526 528
527 rsize = hp->outbuf_size - hp->n_outbuf; 529 rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
537 } 539 }
538 540
539 if (hp->n_outbuf > 0) 541 if (hp->n_outbuf > 0)
540 hvc_push(hp); 542 ret = hvc_push(hp);
541 543
542 spin_unlock_irqrestore(&hp->lock, flags); 544 spin_unlock_irqrestore(&hp->lock, flags);
543 545
546 if (!ret)
547 break;
548
544 if (count) { 549 if (count) {
545 if (hp->n_outbuf > 0) 550 if (hp->n_outbuf > 0)
546 hvc_flush(hp); 551 hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
623#define MAX_TIMEOUT (2000) 628#define MAX_TIMEOUT (2000)
624static u32 timeout = MIN_TIMEOUT; 629static u32 timeout = MIN_TIMEOUT;
625 630
631/*
632 * Maximum number of bytes to get from the console driver if hvc_poll is
633 * called from driver (and can't sleep). Any more than this and we break
634 * and start polling with khvcd. This value was derived from from an OpenBMC
635 * console with the OPAL driver that results in about 0.25ms interrupts off
636 * latency.
637 */
638#define HVC_ATOMIC_READ_MAX 128
639
626#define HVC_POLL_READ 0x00000001 640#define HVC_POLL_READ 0x00000001
627#define HVC_POLL_WRITE 0x00000002 641#define HVC_POLL_WRITE 0x00000002
628 642
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
669 if (!hp->irq_requested) 683 if (!hp->irq_requested)
670 poll_mask |= HVC_POLL_READ; 684 poll_mask |= HVC_POLL_READ;
671 685
686 read_again:
672 /* Read data if any */ 687 /* Read data if any */
673
674 count = tty_buffer_request_room(&hp->port, N_INBUF); 688 count = tty_buffer_request_room(&hp->port, N_INBUF);
675 689
676 /* If flip is full, just reschedule a later read */ 690 /* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
717#endif /* CONFIG_MAGIC_SYSRQ */ 731#endif /* CONFIG_MAGIC_SYSRQ */
718 tty_insert_flip_char(&hp->port, buf[i], 0); 732 tty_insert_flip_char(&hp->port, buf[i], 0);
719 } 733 }
720 if (n == count) 734 read_total += n;
721 poll_mask |= HVC_POLL_READ; 735
722 read_total = n; 736 if (may_sleep) {
737 /* Keep going until the flip is full */
738 spin_unlock_irqrestore(&hp->lock, flags);
739 cond_resched();
740 spin_lock_irqsave(&hp->lock, flags);
741 goto read_again;
742 } else if (read_total < HVC_ATOMIC_READ_MAX) {
743 /* Break and defer if it's a large read in atomic */
744 goto read_again;
745 }
746
747 /*
748 * Latency break, schedule another poll immediately.
749 */
750 poll_mask |= HVC_POLL_READ;
723 751
724 out: 752 out:
725 /* Wakeup write queue if necessary */ 753 /* Wakeup write queue if necessary */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 27346d69f393..f9b40a9dc4d3 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty,
780 } 780 }
781 781
782 if (acm->susp_count) { 782 if (acm->susp_count) {
783 if (acm->putbuffer) {
784 /* now to preserve order */
785 usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
786 acm->putbuffer = NULL;
787 }
788 usb_anchor_urb(wb->urb, &acm->delayed); 783 usb_anchor_urb(wb->urb, &acm->delayed);
789 spin_unlock_irqrestore(&acm->write_lock, flags); 784 spin_unlock_irqrestore(&acm->write_lock, flags);
790 return count; 785 return count;
791 } else {
792 if (acm->putbuffer) {
793 /* at this point there is no good way to handle errors */
794 acm_start_wb(acm, acm->putbuffer);
795 acm->putbuffer = NULL;
796 }
797 } 786 }
798 787
799 stat = acm_start_wb(acm, wb); 788 stat = acm_start_wb(acm, wb);
@@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty,
804 return count; 793 return count;
805} 794}
806 795
807static void acm_tty_flush_chars(struct tty_struct *tty)
808{
809 struct acm *acm = tty->driver_data;
810 struct acm_wb *cur;
811 int err;
812 unsigned long flags;
813
814 spin_lock_irqsave(&acm->write_lock, flags);
815
816 cur = acm->putbuffer;
817 if (!cur) /* nothing to do */
818 goto out;
819
820 acm->putbuffer = NULL;
821 err = usb_autopm_get_interface_async(acm->control);
822 if (err < 0) {
823 cur->use = 0;
824 acm->putbuffer = cur;
825 goto out;
826 }
827
828 if (acm->susp_count)
829 usb_anchor_urb(cur->urb, &acm->delayed);
830 else
831 acm_start_wb(acm, cur);
832out:
833 spin_unlock_irqrestore(&acm->write_lock, flags);
834 return;
835}
836
837static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
838{
839 struct acm *acm = tty->driver_data;
840 struct acm_wb *cur;
841 int wbn;
842 unsigned long flags;
843
844overflow:
845 cur = acm->putbuffer;
846 if (!cur) {
847 spin_lock_irqsave(&acm->write_lock, flags);
848 wbn = acm_wb_alloc(acm);
849 if (wbn >= 0) {
850 cur = &acm->wb[wbn];
851 acm->putbuffer = cur;
852 }
853 spin_unlock_irqrestore(&acm->write_lock, flags);
854 if (!cur)
855 return 0;
856 }
857
858 if (cur->len == acm->writesize) {
859 acm_tty_flush_chars(tty);
860 goto overflow;
861 }
862
863 cur->buf[cur->len++] = ch;
864 return 1;
865}
866
867static int acm_tty_write_room(struct tty_struct *tty) 796static int acm_tty_write_room(struct tty_struct *tty)
868{ 797{
869 struct acm *acm = tty->driver_data; 798 struct acm *acm = tty->driver_data;
@@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = {
1987 .cleanup = acm_tty_cleanup, 1916 .cleanup = acm_tty_cleanup,
1988 .hangup = acm_tty_hangup, 1917 .hangup = acm_tty_hangup,
1989 .write = acm_tty_write, 1918 .write = acm_tty_write,
1990 .put_char = acm_tty_put_char,
1991 .flush_chars = acm_tty_flush_chars,
1992 .write_room = acm_tty_write_room, 1919 .write_room = acm_tty_write_room,
1993 .ioctl = acm_tty_ioctl, 1920 .ioctl = acm_tty_ioctl,
1994 .throttle = acm_tty_throttle, 1921 .throttle = acm_tty_throttle,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index eacc116e83da..ca06b20d7af9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
96 unsigned long read_urbs_free; 96 unsigned long read_urbs_free;
97 struct urb *read_urbs[ACM_NR]; 97 struct urb *read_urbs[ACM_NR];
98 struct acm_rb read_buffers[ACM_NR]; 98 struct acm_rb read_buffers[ACM_NR];
99 struct acm_wb *putbuffer; /* for acm_tty_put_char() */
100 int rx_buflimit; 99 int rx_buflimit;
101 spinlock_t read_lock; 100 spinlock_t read_lock;
102 u8 *notification_buffer; /* to reassemble fragmented notifications */ 101 u8 *notification_buffer; /* to reassemble fragmented notifications */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index bec581fb7c63..656d247819c9 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
460 460
461 set_bit(WDM_RESPONDING, &desc->flags); 461 set_bit(WDM_RESPONDING, &desc->flags);
462 spin_unlock_irq(&desc->iuspin); 462 spin_unlock_irq(&desc->iuspin);
463 rv = usb_submit_urb(desc->response, GFP_KERNEL); 463 rv = usb_submit_urb(desc->response, GFP_ATOMIC);
464 spin_lock_irq(&desc->iuspin); 464 spin_lock_irq(&desc->iuspin);
465 if (rv) { 465 if (rv) {
466 dev_err(&desc->intf->dev, 466 dev_err(&desc->intf->dev,
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 50a2362ed3ea..48277bbc15e4 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
246} 246}
247EXPORT_SYMBOL_GPL(of_usb_update_otg_caps); 247EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
248 248
249/**
250 * usb_of_get_companion_dev - Find the companion device
251 * @dev: the device pointer to find a companion
252 *
253 * Find the companion device from platform bus.
254 *
255 * Takes a reference to the returned struct device which needs to be dropped
256 * after use.
257 *
258 * Return: On success, a pointer to the companion device, %NULL on failure.
259 */
260struct device *usb_of_get_companion_dev(struct device *dev)
261{
262 struct device_node *node;
263 struct platform_device *pdev = NULL;
264
265 node = of_parse_phandle(dev->of_node, "companion", 0);
266 if (node)
267 pdev = of_find_device_by_node(node);
268
269 of_node_put(node);
270
271 return pdev ? &pdev->dev : NULL;
272}
273EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
249#endif 274#endif
250 275
251MODULE_LICENSE("GPL"); 276MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 66fe1b78d952..03432467b05f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
515 event == PM_EVENT_RESTORE); 515 event == PM_EVENT_RESTORE);
516 if (retval) { 516 if (retval) {
517 dev_err(dev, "PCI post-resume error %d!\n", retval); 517 dev_err(dev, "PCI post-resume error %d!\n", retval);
518 if (hcd->shared_hcd)
519 usb_hc_died(hcd->shared_hcd);
520 usb_hc_died(hcd); 518 usb_hc_died(hcd);
521 } 519 }
522 } 520 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 228672f2c4a1..bfa5eda0cc26 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
1341 * is submitted that needs that bandwidth. Some other operating systems 1341 * is submitted that needs that bandwidth. Some other operating systems
1342 * allocate bandwidth early, when a configuration is chosen. 1342 * allocate bandwidth early, when a configuration is chosen.
1343 * 1343 *
1344 * xHCI reserves bandwidth and configures the alternate setting in
1345 * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
1346 * may be disabled. Drivers cannot rely on any particular alternate
1347 * setting being in effect after a failure.
1348 *
1344 * This call is synchronous, and may not be used in an interrupt context. 1349 * This call is synchronous, and may not be used in an interrupt context.
1345 * Also, drivers must not change altsettings while urbs are scheduled for 1350 * Also, drivers must not change altsettings while urbs are scheduled for
1346 * endpoints in that interface; all such urbs must first be completed 1351 * endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1376 alternate); 1381 alternate);
1377 return -EINVAL; 1382 return -EINVAL;
1378 } 1383 }
1384 /*
1385 * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
1386 * including freeing dropped endpoint ring buffers.
1387 * Make sure the interface endpoints are flushed before that
1388 */
1389 usb_disable_interface(dev, iface, false);
1379 1390
1380 /* Make sure we have enough bandwidth for this alternate interface. 1391 /* Make sure we have enough bandwidth for this alternate interface.
1381 * Remove the current alt setting and add the new alt setting. 1392 * Remove the current alt setting and add the new alt setting.
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index fd77442c2d12..651708d8c908 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
105 return NULL; 105 return NULL;
106} 106}
107EXPORT_SYMBOL_GPL(usb_of_get_interface_node); 107EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
108
109/**
110 * usb_of_get_companion_dev - Find the companion device
111 * @dev: the device pointer to find a companion
112 *
113 * Find the companion device from platform bus.
114 *
115 * Takes a reference to the returned struct device which needs to be dropped
116 * after use.
117 *
118 * Return: On success, a pointer to the companion device, %NULL on failure.
119 */
120struct device *usb_of_get_companion_dev(struct device *dev)
121{
122 struct device_node *node;
123 struct platform_device *pdev = NULL;
124
125 node = of_parse_phandle(dev->of_node, "companion", 0);
126 if (node)
127 pdev = of_find_device_by_node(node);
128
129 of_node_put(node);
130
131 return pdev ? &pdev->dev : NULL;
132}
133EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 097057d2eacf..e77dfe5ed5ec 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -178,6 +178,10 @@ static const struct usb_device_id usb_quirk_list[] = {
178 /* CBM - Flash disk */ 178 /* CBM - Flash disk */
179 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 179 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
180 180
181 /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
182 { USB_DEVICE(0x0218, 0x0201), .driver_info =
183 USB_QUIRK_CONFIG_INTF_STRINGS },
184
181 /* WORLDE easy key (easykey.25) MIDI controller */ 185 /* WORLDE easy key (easykey.25) MIDI controller */
182 { USB_DEVICE(0x0218, 0x0401), .driver_info = 186 { USB_DEVICE(0x0218, 0x0401), .driver_info =
183 USB_QUIRK_CONFIG_INTF_STRINGS }, 187 USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +410,9 @@ static const struct usb_device_id usb_quirk_list[] = {
406 { USB_DEVICE(0x2040, 0x7200), .driver_info = 410 { USB_DEVICE(0x2040, 0x7200), .driver_info =
407 USB_QUIRK_CONFIG_INTF_STRINGS }, 411 USB_QUIRK_CONFIG_INTF_STRINGS },
408 412
413 /* DJI CineSSD */
414 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
415
409 /* INTEL VALUE SSD */ 416 /* INTEL VALUE SSD */
410 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, 417 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
411 418
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 9a53a58e676e..577642895b57 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
412 dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", 412 dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
413 (unsigned long)res->start, hsotg->regs); 413 (unsigned long)res->start, hsotg->regs);
414 414
415 hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
416
417 retval = dwc2_lowlevel_hw_init(hsotg); 415 retval = dwc2_lowlevel_hw_init(hsotg);
418 if (retval) 416 if (retval)
419 return retval; 417 return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
438 if (retval) 436 if (retval)
439 return retval; 437 return retval;
440 438
439 hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
440
441 retval = dwc2_get_dr_mode(hsotg); 441 retval = dwc2_get_dr_mode(hsotg);
442 if (retval) 442 if (retval)
443 goto error; 443 goto error;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 40bf9e0bbc59..4c2771c5e727 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
180 return 0; 180 return 0;
181} 181}
182 182
183#ifdef CONFIG_PM 183static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
184static int dwc3_of_simple_runtime_suspend(struct device *dev)
185{ 184{
186 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 185 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
187 int i; 186 int i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
192 return 0; 191 return 0;
193} 192}
194 193
195static int dwc3_of_simple_runtime_resume(struct device *dev) 194static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
196{ 195{
197 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 196 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
198 int ret; 197 int ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
210 return 0; 209 return 0;
211} 210}
212 211
213static int dwc3_of_simple_suspend(struct device *dev) 212static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
214{ 213{
215 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 214 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
216 215
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
220 return 0; 219 return 0;
221} 220}
222 221
223static int dwc3_of_simple_resume(struct device *dev) 222static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
224{ 223{
225 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 224 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
226 225
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
229 228
230 return 0; 229 return 0;
231} 230}
232#endif
233 231
234static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { 232static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
235 SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume) 233 SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 5edd79470368..1286076a8890 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
85 u32 value; 85 u32 value;
86 86
87 reg = pcim_iomap(pci, GP_RWBAR, 0); 87 reg = pcim_iomap(pci, GP_RWBAR, 0);
88 if (IS_ERR(reg)) 88 if (!reg)
89 return PTR_ERR(reg); 89 return -ENOMEM;
90 90
91 value = readl(reg + GP_RWREG1); 91 value = readl(reg + GP_RWREG1);
92 if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE)) 92 if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 032ea7d709ba..2b53194081ba 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
473 473
474/** 474/**
475 * dwc3_gadget_start_config - configure ep resources 475 * dwc3_gadget_start_config - configure ep resources
476 * @dwc: pointer to our controller context structure
477 * @dep: endpoint that is being enabled 476 * @dep: endpoint that is being enabled
478 * 477 *
479 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 478 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 53a48f561458..587c5037ff07 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
1063static int fotg210_udc_remove(struct platform_device *pdev) 1063static int fotg210_udc_remove(struct platform_device *pdev)
1064{ 1064{
1065 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); 1065 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
1066 int i;
1066 1067
1067 usb_del_gadget_udc(&fotg210->gadget); 1068 usb_del_gadget_udc(&fotg210->gadget);
1068 iounmap(fotg210->reg); 1069 iounmap(fotg210->reg);
1069 free_irq(platform_get_irq(pdev, 0), fotg210); 1070 free_irq(platform_get_irq(pdev, 0), fotg210);
1070 1071
1071 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); 1072 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1073 for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1074 kfree(fotg210->ep[i]);
1072 kfree(fotg210); 1075 kfree(fotg210);
1073 1076
1074 return 0; 1077 return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1099 /* initialize udc */ 1102 /* initialize udc */
1100 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); 1103 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
1101 if (fotg210 == NULL) 1104 if (fotg210 == NULL)
1102 goto err_alloc; 1105 goto err;
1103 1106
1104 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { 1107 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
1105 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); 1108 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1111 fotg210->reg = ioremap(res->start, resource_size(res)); 1114 fotg210->reg = ioremap(res->start, resource_size(res));
1112 if (fotg210->reg == NULL) { 1115 if (fotg210->reg == NULL) {
1113 pr_err("ioremap error.\n"); 1116 pr_err("ioremap error.\n");
1114 goto err_map; 1117 goto err_alloc;
1115 } 1118 }
1116 1119
1117 spin_lock_init(&fotg210->lock); 1120 spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1159 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, 1162 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
1160 GFP_KERNEL); 1163 GFP_KERNEL);
1161 if (fotg210->ep0_req == NULL) 1164 if (fotg210->ep0_req == NULL)
1162 goto err_req; 1165 goto err_map;
1163 1166
1164 fotg210_init(fotg210); 1167 fotg210_init(fotg210);
1165 1168
@@ -1187,12 +1190,14 @@ err_req:
1187 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); 1190 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1188 1191
1189err_map: 1192err_map:
1190 if (fotg210->reg) 1193 iounmap(fotg210->reg);
1191 iounmap(fotg210->reg);
1192 1194
1193err_alloc: 1195err_alloc:
1196 for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1197 kfree(fotg210->ep[i]);
1194 kfree(fotg210); 1198 kfree(fotg210);
1195 1199
1200err:
1196 return ret; 1201 return ret;
1197} 1202}
1198 1203
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 318246d8b2e2..b02ab2a8d927 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1545 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1545 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1546 } else { 1546 } else {
1547 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1547 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1548 stop_activity(dev, dev->driver); 1548 stop_activity(dev, NULL);
1549 } 1549 }
1550 1550
1551 spin_unlock_irqrestore(&dev->lock, flags); 1551 spin_unlock_irqrestore(&dev->lock, flags);
1552 1552
1553 if (!is_on && dev->driver)
1554 dev->driver->disconnect(&dev->gadget);
1555
1553 return 0; 1556 return 0;
1554} 1557}
1555 1558
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2466 nuke(&dev->ep[i]); 2469 nuke(&dev->ep[i]);
2467 2470
2468 /* report disconnect; the driver is already quiesced */ 2471 /* report disconnect; the driver is already quiesced */
2469 if (driver) 2472 if (driver) {
2473 spin_unlock(&dev->lock);
2470 driver->disconnect(&dev->gadget); 2474 driver->disconnect(&dev->gadget);
2475 spin_lock(&dev->lock);
2476 }
2471 2477
2472 usb_reinit(dev); 2478 usb_reinit(dev);
2473} 2479}
@@ -3341,6 +3347,8 @@ next_endpoints:
3341 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3347 BIT(PCI_RETRY_ABORT_INTERRUPT))
3342 3348
3343static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3349static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3350__releases(dev->lock)
3351__acquires(dev->lock)
3344{ 3352{
3345 struct net2280_ep *ep; 3353 struct net2280_ep *ep;
3346 u32 tmp, num, mask, scratch; 3354 u32 tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3381 if (disconnect || reset) { 3389 if (disconnect || reset) {
3382 stop_activity(dev, dev->driver); 3390 stop_activity(dev, dev->driver);
3383 ep0_start(dev); 3391 ep0_start(dev);
3392 spin_unlock(&dev->lock);
3384 if (reset) 3393 if (reset)
3385 usb_gadget_udc_reset 3394 usb_gadget_udc_reset
3386 (&dev->gadget, dev->driver); 3395 (&dev->gadget, dev->driver);
3387 else 3396 else
3388 (dev->driver->disconnect) 3397 (dev->driver->disconnect)
3389 (&dev->gadget); 3398 (&dev->gadget);
3399 spin_lock(&dev->lock);
3390 return; 3400 return;
3391 } 3401 }
3392 } 3402 }
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3405 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3415 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3406 if (stat & tmp) { 3416 if (stat & tmp) {
3407 writel(tmp, &dev->regs->irqstat1); 3417 writel(tmp, &dev->regs->irqstat1);
3418 spin_unlock(&dev->lock);
3408 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3419 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3409 if (dev->driver->suspend) 3420 if (dev->driver->suspend)
3410 dev->driver->suspend(&dev->gadget); 3421 dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3415 dev->driver->resume(&dev->gadget); 3426 dev->driver->resume(&dev->gadget);
3416 /* at high speed, note erratum 0133 */ 3427 /* at high speed, note erratum 0133 */
3417 } 3428 }
3429 spin_lock(&dev->lock);
3418 stat &= ~tmp; 3430 stat &= ~tmp;
3419 } 3431 }
3420 3432
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 1f879b3f2c96..e1656f361e08 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
812 switch (speed) { 812 switch (speed) {
813 case USB_STA_SPEED_SS: 813 case USB_STA_SPEED_SS:
814 usb3->gadget.speed = USB_SPEED_SUPER; 814 usb3->gadget.speed = USB_SPEED_SUPER;
815 usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
815 break; 816 break;
816 case USB_STA_SPEED_HS: 817 case USB_STA_SPEED_HS:
817 usb3->gadget.speed = USB_SPEED_HIGH; 818 usb3->gadget.speed = USB_SPEED_HIGH;
819 usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
818 break; 820 break;
819 case USB_STA_SPEED_FS: 821 case USB_STA_SPEED_FS:
820 usb3->gadget.speed = USB_SPEED_FULL; 822 usb3->gadget.speed = USB_SPEED_FULL;
823 usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
821 break; 824 break;
822 default: 825 default:
823 usb3->gadget.speed = USB_SPEED_UNKNOWN; 826 usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
2513 /* for control pipe */ 2516 /* for control pipe */
2514 usb3->gadget.ep0 = &usb3_ep->ep; 2517 usb3->gadget.ep0 = &usb3_ep->ep;
2515 usb_ep_set_maxpacket_limit(&usb3_ep->ep, 2518 usb_ep_set_maxpacket_limit(&usb3_ep->ep,
2516 USB3_EP0_HSFS_MAX_PACKET_SIZE); 2519 USB3_EP0_SS_MAX_PACKET_SIZE);
2517 usb3_ep->ep.caps.type_control = true; 2520 usb3_ep->ep.caps.type_control = true;
2518 usb3_ep->ep.caps.dir_in = true; 2521 usb3_ep->ep.caps.dir_in = true;
2519 usb3_ep->ep.caps.dir_out = true; 2522 usb3_ep->ep.caps.dir_out = true;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 072bd5d5738e..5b8a3d9530c4 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
2555 } else { 2555 } else {
2556 int frame = 0; 2556 int frame = 0;
2557 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); 2557 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
2558 msleep(100); 2558 mdelay(100);
2559 return frame; 2559 return frame;
2560 } 2560 }
2561} 2561}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ef350c33dc4a..b1f27aa38b10 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614 in_ep_ctx->deq = out_ep_ctx->deq; 1614 in_ep_ctx->deq = out_ep_ctx->deq;
1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616 if (xhci->quirks & XHCI_MTK_HOST) {
1617 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1618 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1619 }
1616} 1620}
1617 1621
1618/* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1622/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8dc77e34a859..94e939249b2b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
153{ 153{
154 const struct xhci_plat_priv *priv_match; 154 const struct xhci_plat_priv *priv_match;
155 const struct hc_driver *driver; 155 const struct hc_driver *driver;
156 struct device *sysdev; 156 struct device *sysdev, *tmpdev;
157 struct xhci_hcd *xhci; 157 struct xhci_hcd *xhci;
158 struct resource *res; 158 struct resource *res;
159 struct usb_hcd *hcd; 159 struct usb_hcd *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
273 goto disable_clk; 273 goto disable_clk;
274 } 274 }
275 275
276 if (device_property_read_bool(sysdev, "usb2-lpm-disable")) 276 /* imod_interval is the interrupt moderation value in nanoseconds. */
277 xhci->quirks |= XHCI_HW_LPM_DISABLE; 277 xhci->imod_interval = 40000;
278 278
279 if (device_property_read_bool(sysdev, "usb3-lpm-capable")) 279 /* Iterate over all parent nodes for finding quirks */
280 xhci->quirks |= XHCI_LPM_SUPPORT; 280 for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
281 281
282 if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) 282 if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
283 xhci->quirks |= XHCI_BROKEN_PORT_PED; 283 xhci->quirks |= XHCI_HW_LPM_DISABLE;
284 284
285 /* imod_interval is the interrupt moderation value in nanoseconds. */ 285 if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
286 xhci->imod_interval = 40000; 286 xhci->quirks |= XHCI_LPM_SUPPORT;
287 device_property_read_u32(sysdev, "imod-interval-ns", 287
288 &xhci->imod_interval); 288 if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
289 xhci->quirks |= XHCI_BROKEN_PORT_PED;
290
291 device_property_read_u32(tmpdev, "imod-interval-ns",
292 &xhci->imod_interval);
293 }
289 294
290 hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); 295 hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
291 if (IS_ERR(hcd->usb_phy)) { 296 if (IS_ERR(hcd->usb_phy)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 61f48b17e57b..0420eefa647a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -37,6 +37,21 @@ static unsigned long long quirks;
37module_param(quirks, ullong, S_IRUGO); 37module_param(quirks, ullong, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39 39
40static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
41{
42 struct xhci_segment *seg = ring->first_seg;
43
44 if (!td || !td->start_seg)
45 return false;
46 do {
47 if (seg == td->start_seg)
48 return true;
49 seg = seg->next;
50 } while (seg && seg != ring->first_seg);
51
52 return false;
53}
54
40/* TODO: copied from ehci-hcd.c - can this be refactored? */ 55/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/* 56/*
42 * xhci_handshake - spin reading hc until handshake completes or fails 57 * xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1571 goto done; 1586 goto done;
1572 } 1587 }
1573 1588
1589 /*
1590 * check ring is not re-allocated since URB was enqueued. If it is, then
1591 * make sure none of the ring related pointers in this URB private data
1592 * are touched, such as td_list, otherwise we overwrite freed data
1593 */
1594 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1595 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1596 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1597 td = &urb_priv->td[i];
1598 if (!list_empty(&td->cancelled_td_list))
1599 list_del_init(&td->cancelled_td_list);
1600 }
1601 goto err_giveback;
1602 }
1603
1574 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1604 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1575 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1605 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1576 "HC halted, freeing TD manually."); 1606 "HC halted, freeing TD manually.");
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 82f220631bd7..b5d661644263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
369 mask &= 0x0f; 369 mask &= 0x0f;
370 val &= 0x0f; 370 val &= 0x0f;
371 d = (priv->reg[1] & (~mask)) ^ val; 371 d = (priv->reg[1] & (~mask)) ^ val;
372 if (set_1284_register(pp, 2, d, GFP_KERNEL)) 372 if (set_1284_register(pp, 2, d, GFP_ATOMIC))
373 return 0; 373 return 0;
374 priv->reg[1] = d; 374 priv->reg[1] = d;
375 return d & 0xf; 375 return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
379{ 379{
380 unsigned char ret; 380 unsigned char ret;
381 381
382 if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) 382 if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
383 return 0; 383 return 0;
384 return ret & 0xf8; 384 return ret & 0xf8;
385} 385}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 3be40eaa1ac9..6d9fd5f64903 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
413 spin_unlock_irqrestore(&dev->lock, flags); 413 spin_unlock_irqrestore(&dev->lock, flags);
414 mutex_unlock(&dev->io_mutex); 414 mutex_unlock(&dev->io_mutex);
415 415
416 if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
417 return -EIO;
418
416 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); 419 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
417} 420}
418 421
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
421{ 424{
422 struct usb_yurex *dev; 425 struct usb_yurex *dev;
423 int i, set = 0, retval = 0; 426 int i, set = 0, retval = 0;
424 char buffer[16]; 427 char buffer[16 + 1];
425 char *data = buffer; 428 char *data = buffer;
426 unsigned long long c, c2 = 0; 429 unsigned long long c, c2 = 0;
427 signed long timeout = 0; 430 signed long timeout = 0;
428 DEFINE_WAIT(wait); 431 DEFINE_WAIT(wait);
429 432
430 count = min(sizeof(buffer), count); 433 count = min(sizeof(buffer) - 1, count);
431 dev = file->private_data; 434 dev = file->private_data;
432 435
433 /* verify that we actually have some data to write */ 436 /* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
446 retval = -EFAULT; 449 retval = -EFAULT;
447 goto error; 450 goto error;
448 } 451 }
452 buffer[count] = 0;
449 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); 453 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
450 454
451 switch (buffer[0]) { 455 switch (buffer[0]) {
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index eecfd0671362..d045d8458f81 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
107 (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | 107 (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
108 SSUSB_U2_PORT_HOST_SEL)); 108 SSUSB_U2_PORT_HOST_SEL));
109 109
110 if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) 110 if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
111 mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); 111 mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
112 if (mtu->is_u3_ip)
113 mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
114 SSUSB_U3_PORT_DUAL_MODE);
115 }
112 116
113 return ssusb_check_clocks(mtu->ssusb, check_clk); 117 return ssusb_check_clocks(mtu->ssusb, check_clk);
114} 118}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 6ee371478d89..a45bb253939f 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -459,6 +459,7 @@
459 459
460/* U3D_SSUSB_U3_CTRL_0P */ 460/* U3D_SSUSB_U3_CTRL_0P */
461#define SSUSB_U3_PORT_SSP_SPEED BIT(9) 461#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
462#define SSUSB_U3_PORT_DUAL_MODE BIT(7)
462#define SSUSB_U3_PORT_HOST_SEL BIT(2) 463#define SSUSB_U3_PORT_HOST_SEL BIT(2)
463#define SSUSB_U3_PORT_PDN BIT(1) 464#define SSUSB_U3_PORT_PDN BIT(1)
464#define SSUSB_U3_PORT_DIS BIT(0) 465#define SSUSB_U3_PORT_DIS BIT(0)
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index e53c68261017..9bbcee37524e 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -173,7 +173,7 @@ struct ump_interrupt {
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
175 175
176#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) 176#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01)
177#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) 177#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
178#define TIUMP_INTERRUPT_CODE_LSR 0x03 178#define TIUMP_INTERRUPT_CODE_LSR 0x03
179#define TIUMP_INTERRUPT_CODE_MSR 0x04 179#define TIUMP_INTERRUPT_CODE_MSR 0x04
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3010878f7f8e..e3c5832337e0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
1119 1119
1120static int ti_get_port_from_code(unsigned char code) 1120static int ti_get_port_from_code(unsigned char code)
1121{ 1121{
1122 return (code >> 4) - 3; 1122 return (code >> 6) & 0x01;
1123} 1123}
1124 1124
1125static int ti_get_func_from_code(unsigned char code) 1125static int ti_get_func_from_code(unsigned char code)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c267f2812a04..e227bb5b794f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
376 return 0; 376 return 0;
377 } 377 }
378 378
379 if ((us->fflags & US_FL_NO_ATA_1X) &&
380 (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
381 memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
382 sizeof(usb_stor_sense_invalidCDB));
383 srb->result = SAM_STAT_CHECK_CONDITION;
384 done(srb);
385 return 0;
386 }
387
379 /* enqueue the command and wake up the control thread */ 388 /* enqueue the command and wake up the control thread */
380 srb->scsi_done = done; 389 srb->scsi_done = done;
381 us->srb = srb; 390 us->srb = srb;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9e9de5452860..1f7b401c4d04 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
842 sdev->skip_ms_page_8 = 1; 842 sdev->skip_ms_page_8 = 1;
843 sdev->wce_default_on = 1; 843 sdev->wce_default_on = 1;
844 } 844 }
845
846 /*
847 * Some disks return the total number of blocks in response
848 * to READ CAPACITY rather than the highest block number.
849 * If this device makes that mistake, tell the sd driver.
850 */
851 if (devinfo->flags & US_FL_FIX_CAPACITY)
852 sdev->fix_capacity = 1;
853
854 /*
855 * Some devices don't like MODE SENSE with page=0x3f,
856 * which is the command used for checking if a device
857 * is write-protected. Now that we tell the sd driver
858 * to do a 192-byte transfer with this command the
859 * majority of devices work fine, but a few still can't
860 * handle it. The sd driver will simply assume those
861 * devices are write-enabled.
862 */
863 if (devinfo->flags & US_FL_NO_WP_DETECT)
864 sdev->skip_ms_page_3f = 1;
865
845 scsi_change_queue_depth(sdev, devinfo->qdepth - 2); 866 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
846 return 0; 867 return 0;
847} 868}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 22fcfccf453a..f7f83b21dc74 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
2288 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2288 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2289 US_FL_GO_SLOW ), 2289 US_FL_GO_SLOW ),
2290 2290
2291/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
2292UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999,
2293 "DJI",
2294 "CineSSD",
2295 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2296 US_FL_NO_ATA_1X),
2297
2291/* 2298/*
2292 * Reported by Frederic Marchal <frederic.marchal@wowcompany.com> 2299 * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
2293 * Mio Moov 330 2300 * Mio Moov 330
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 95a2b10127db..76299b6ff06d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
255/* API for the port drivers */ 255/* API for the port drivers */
256 256
257/** 257/**
258 * typec_match_altmode - Match SVID to an array of alternate modes 258 * typec_match_altmode - Match SVID and mode to an array of alternate modes
259 * @altmodes: Array of alternate modes 259 * @altmodes: Array of alternate modes
260 * @n: Number of elements in the array, or -1 for NULL termiated arrays 260 * @n: Number of elements in the array, or -1 for NULL terminated arrays
261 * @svid: Standard or Vendor ID to match with 261 * @svid: Standard or Vendor ID to match with
262 * @mode: Mode to match with
262 * 263 *
263 * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no 264 * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
264 * match is found. 265 * match is found.
265 */ 266 */
266struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, 267struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index c202975f8097..e61dffb27a0c 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
1484 * typec_port_register_altmode - Register USB Type-C Port Alternate Mode 1484 * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
1485 * @port: USB Type-C Port that supports the alternate mode 1485 * @port: USB Type-C Port that supports the alternate mode
1486 * @desc: Description of the alternate mode 1486 * @desc: Description of the alternate mode
1487 * @drvdata: Private pointer to driver specific info
1488 * 1487 *
1489 * This routine is used to register an alternate mode that @port is capable of 1488 * This routine is used to register an alternate mode that @port is capable of
1490 * supporting. 1489 * supporting.
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b459edfacff3..90d387b50ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
79 This value is used to allocate enough space in internal 79 This value is used to allocate enough space in internal
80 tables needed for physical memory administration. 80 tables needed for physical memory administration.
81 81
82config XEN_SCRUB_PAGES 82config XEN_SCRUB_PAGES_DEFAULT
83 bool "Scrub pages before returning them to system" 83 bool "Scrub pages before returning them to system by default"
84 depends on XEN_BALLOON 84 depends on XEN_BALLOON
85 default y 85 default y
86 help 86 help
87 Scrub pages before returning them to the system for reuse by 87 Scrub pages before returning them to the system for reuse by
88 other domains. This makes sure that any confidential data 88 other domains. This makes sure that any confidential data
89 is not accidentally visible to other domains. Is it more 89 is not accidentally visible to other domains. Is it more
90 secure, but slightly less efficient. 90 secure, but slightly less efficient. This can be controlled with
91 xen_scrub_pages=0 parameter and
92 /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
93 This option only sets the default value.
94
91 If in doubt, say yes. 95 If in doubt, say yes.
92 96
93config XEN_DEV_EVTCHN 97config XEN_DEV_EVTCHN
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index d4265c8ebb22..b1357aa4bc55 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
19 19
20static void disable_hotplug_cpu(int cpu) 20static void disable_hotplug_cpu(int cpu)
21{ 21{
22 if (cpu_online(cpu)) { 22 if (!cpu_is_hotpluggable(cpu))
23 lock_device_hotplug(); 23 return;
24 lock_device_hotplug();
25 if (cpu_online(cpu))
24 device_offline(get_cpu_device(cpu)); 26 device_offline(get_cpu_device(cpu));
25 unlock_device_hotplug(); 27 if (!cpu_online(cpu) && cpu_present(cpu)) {
26 }
27 if (cpu_present(cpu))
28 xen_arch_unregister_cpu(cpu); 28 xen_arch_unregister_cpu(cpu);
29 29 set_cpu_present(cpu, false);
30 set_cpu_present(cpu, false); 30 }
31 unlock_device_hotplug();
31} 32}
32 33
33static int vcpu_online(unsigned int cpu) 34static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 08e4af04d6f2..e6c1934734b7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
138 clear_evtchn_to_irq_row(row); 138 clear_evtchn_to_irq_row(row);
139 } 139 }
140 140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 141 evtchn_to_irq[row][col] = irq;
142 return 0; 142 return 0;
143} 143}
144 144
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57390c7666e5..b0b02a501167 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
492 return true; 492 return true;
493} 493}
494 494
495static void unmap_if_in_range(struct gntdev_grant_map *map, 495static int unmap_if_in_range(struct gntdev_grant_map *map,
496 unsigned long start, unsigned long end) 496 unsigned long start, unsigned long end,
497 bool blockable)
497{ 498{
498 unsigned long mstart, mend; 499 unsigned long mstart, mend;
499 int err; 500 int err;
500 501
502 if (!in_range(map, start, end))
503 return 0;
504
505 if (!blockable)
506 return -EAGAIN;
507
501 mstart = max(start, map->vma->vm_start); 508 mstart = max(start, map->vma->vm_start);
502 mend = min(end, map->vma->vm_end); 509 mend = min(end, map->vma->vm_end);
503 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 510 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
508 (mstart - map->vma->vm_start) >> PAGE_SHIFT, 515 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
509 (mend - mstart) >> PAGE_SHIFT); 516 (mend - mstart) >> PAGE_SHIFT);
510 WARN_ON(err); 517 WARN_ON(err);
518
519 return 0;
511} 520}
512 521
513static int mn_invl_range_start(struct mmu_notifier *mn, 522static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
519 struct gntdev_grant_map *map; 528 struct gntdev_grant_map *map;
520 int ret = 0; 529 int ret = 0;
521 530
522 /* TODO do we really need a mutex here? */
523 if (blockable) 531 if (blockable)
524 mutex_lock(&priv->lock); 532 mutex_lock(&priv->lock);
525 else if (!mutex_trylock(&priv->lock)) 533 else if (!mutex_trylock(&priv->lock))
526 return -EAGAIN; 534 return -EAGAIN;
527 535
528 list_for_each_entry(map, &priv->maps, next) { 536 list_for_each_entry(map, &priv->maps, next) {
529 if (in_range(map, start, end)) { 537 ret = unmap_if_in_range(map, start, end, blockable);
530 ret = -EAGAIN; 538 if (ret)
531 goto out_unlock; 539 goto out_unlock;
532 }
533 unmap_if_in_range(map, start, end);
534 } 540 }
535 list_for_each_entry(map, &priv->freeable_maps, next) { 541 list_for_each_entry(map, &priv->freeable_maps, next) {
536 if (in_range(map, start, end)) { 542 ret = unmap_if_in_range(map, start, end, blockable);
537 ret = -EAGAIN; 543 if (ret)
538 goto out_unlock; 544 goto out_unlock;
539 }
540 unmap_if_in_range(map, start, end);
541 } 545 }
542 546
543out_unlock: 547out_unlock:
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c93d8ef8df34..5bb01a62f214 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
280 /* 280 /*
281 * The Xenstore watch fires directly after registering it and 281 * The Xenstore watch fires directly after registering it and
282 * after a suspend/resume cycle. So ENOENT is no error but 282 * after a suspend/resume cycle. So ENOENT is no error but
283 * might happen in those cases. 283 * might happen in those cases. ERANGE is observed when we get
284 * an empty value (''), this happens when we acknowledge the
285 * request by writing '\0' below.
284 */ 286 */
285 if (err != -ENOENT) 287 if (err != -ENOENT && err != -ERANGE)
286 pr_err("Error %d reading sysrq code in control/sysrq\n", 288 pr_err("Error %d reading sysrq code in control/sysrq\n",
287 err); 289 err);
288 xenbus_transaction_end(xbt, 1); 290 xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
index 084799c6180e..3782cf070338 100644
--- a/drivers/xen/mem-reservation.c
+++ b/drivers/xen/mem-reservation.c
@@ -14,6 +14,10 @@
14 14
15#include <xen/interface/memory.h> 15#include <xen/interface/memory.h>
16#include <xen/mem-reservation.h> 16#include <xen/mem-reservation.h>
17#include <linux/moduleparam.h>
18
19bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
17 21
18/* 22/*
19 * Use one extent per PAGE_SIZE to avoid to break down the page into 23 * Use one extent per PAGE_SIZE to avoid to break down the page into
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 294f35ce9e46..63c1494a8d73 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -44,6 +44,7 @@
44#include <xen/xenbus.h> 44#include <xen/xenbus.h>
45#include <xen/features.h> 45#include <xen/features.h>
46#include <xen/page.h> 46#include <xen/page.h>
47#include <xen/mem-reservation.h>
47 48
48#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) 49#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
49 50
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
137static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); 138static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
138static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); 139static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
139static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); 140static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
141static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
140 142
141static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, 143static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
142 char *buf) 144 char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
203 &dev_attr_max_schedule_delay.attr.attr, 205 &dev_attr_max_schedule_delay.attr.attr,
204 &dev_attr_retry_count.attr.attr, 206 &dev_attr_retry_count.attr.attr,
205 &dev_attr_max_retry_count.attr.attr, 207 &dev_attr_max_retry_count.attr.attr,
208 &dev_attr_scrub_pages.attr.attr,
206 NULL 209 NULL
207}; 210};
208 211