aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-04-18 21:07:14 -0400
committerDave Airlie <airlied@redhat.com>2017-04-18 21:07:14 -0400
commit856ee92e8602bd86d34388ac08381c5cb3918756 (patch)
treec3bed2bf85214f78239724adb34a1c2d6ec8444d /drivers
parenta6a5c983b35e579071370d4eb2b4ed8ad5c18da9 (diff)
parent4f7d029b9bf009fbee76bb10c0c4351a1870d2f3 (diff)
Merge tag 'v4.11-rc7' into drm-next
Backmerge Linux 4.11-rc7 from Linus tree, to fix some conflicts that were causing problems with the rerere cache in drm-tip.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/utresrc.c17
-rw-r--r--drivers/acpi/nfit/core.c6
-rw-r--r--drivers/acpi/scan.c19
-rw-r--r--drivers/ata/pata_atiixp.c5
-rw-r--r--drivers/ata/sata_via.c18
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/char/mem.c82
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c66
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/dax/Kconfig1
-rw-r--r--drivers/dax/dax.c13
-rw-r--r--drivers/firmware/efi/libstub/gop.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c43
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c26
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c3
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c65
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c3
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/usbnet.c19
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/claim.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c77
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c80
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c1
-rw-r--r--drivers/pwm/pwm-lpss.c19
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-rockchip.c40
-rw-r--r--drivers/reset/core.c22
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commsup.c3
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/qedf/qedf_fip.c3
-rw-r--r--drivers/scsi/qedf/qedf_main.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_alua.c136
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c102
-rw-r--r--drivers/target/target_core_user.c97
-rw-r--r--drivers/tty/tty_ldisc.c85
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/video/fbdev/efifb.c66
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c15
-rw-r--r--drivers/video/fbdev/ssd1307fb.c24
-rw-r--r--drivers/video/fbdev/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_pci_common.c375
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
85 files changed, 1199 insertions, 746 deletions
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index c86bae7b1d0f..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
421 421
422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
423 423
424 /* 424 /* The absolute minimum resource template is one end_tag descriptor */
425 * The absolute minimum resource template is one end_tag descriptor. 425
426 * However, we will treat a lone end_tag as just a simple buffer.
427 */
428 if (aml_length < sizeof(struct aml_resource_end_tag)) { 426 if (aml_length < sizeof(struct aml_resource_end_tag)) {
429 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 427 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
430 } 428 }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
456 /* Invoke the user function */ 454 /* Invoke the user function */
457 455
458 if (user_function) { 456 if (user_function) {
459 status = user_function(aml, length, offset, 457 status =
460 resource_index, context); 458 user_function(aml, length, offset, resource_index,
459 context);
461 if (ACPI_FAILURE(status)) { 460 if (ACPI_FAILURE(status)) {
462 return_ACPI_STATUS(status); 461 return_ACPI_STATUS(status);
463 } 462 }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
481 *context = aml; 480 *context = aml;
482 } 481 }
483 482
484 /* Check if buffer is defined to be longer than the resource length */
485
486 if (aml_length > (offset + length)) {
487 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
488 }
489
490 /* Normal exit */ 483 /* Normal exit */
491 484
492 return_ACPI_STATUS(AE_OK); 485 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 662036bdc65e..c8ea9d698cd0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
1617 const struct nfit_set_info_map *map0 = m0; 1617 const struct nfit_set_info_map *map0 = m0;
1618 const struct nfit_set_info_map *map1 = m1; 1618 const struct nfit_set_info_map *map1 = m1;
1619 1619
1620 return map0->region_offset - map1->region_offset; 1620 if (map0->region_offset < map1->region_offset)
1621 return -1;
1622 else if (map0->region_offset > map1->region_offset)
1623 return 1;
1624 return 0;
1621} 1625}
1622 1626
1623/* Retrieve the nth entry referencing this spa */ 1627/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 192691880d55..2433569b02ef 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
1857 return; 1857 return;
1858 1858
1859 device->flags.match_driver = true; 1859 device->flags.match_driver = true;
1860 if (!ret) { 1860 if (ret > 0) {
1861 ret = device_attach(&device->dev); 1861 acpi_device_set_enumerated(device);
1862 if (ret < 0) 1862 goto ok;
1863 return;
1864
1865 if (!ret && device->pnp.type.platform_id)
1866 acpi_default_enumeration(device);
1867 } 1863 }
1868 1864
1865 ret = device_attach(&device->dev);
1866 if (ret < 0)
1867 return;
1868
1869 if (ret > 0 || !device->pnp.type.platform_id)
1870 acpi_device_set_enumerated(device);
1871 else
1872 acpi_default_enumeration(device);
1873
1869 ok: 1874 ok:
1870 list_for_each_entry(child, &device->children, node) 1875 list_for_each_entry(child, &device->children, node)
1871 acpi_bus_attach(child); 1876 acpi_bus_attach(child);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6c9aa95a9a05..49d705c9f0f7 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
278 }; 278 };
279 const struct ata_port_info *ppi[] = { &info, &info }; 279 const struct ata_port_info *ppi[] = { &info, &info };
280 280
281 /* SB600/700 don't have secondary port wired */
282 if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
283 (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
284 ppi[1] = &ata_dummy_port_info;
285
286 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, 281 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
287 ATA_HOST_PARALLEL_SCAN); 282 ATA_HOST_PARALLEL_SCAN);
288} 283}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0636d84fbefe..f3f538eec7b3 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
645 } 645 }
646 646
647 /* enable IRQ on hotplug */ 647 if (board_id == vt6421) {
648 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 648 /* enable IRQ on hotplug */
649 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 649 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
650 dev_dbg(&pdev->dev, 650 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
651 "enabling SATA hotplug (0x%x)\n", 651 dev_dbg(&pdev->dev,
652 (int) tmp8); 652 "enabling SATA hotplug (0x%x)\n",
653 tmp8 |= SATA_HOTPLUG; 653 (int) tmp8);
654 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 654 tmp8 |= SATA_HOTPLUG;
655 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
656 }
655 } 657 }
656 658
657 /* 659 /*
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index dceb5edd1e54..0c09d4256108 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
523 523
524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); 524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
525 if (size == PAGE_SIZE) { 525 if (size == PAGE_SIZE) {
526 copy_page(mem, cmem); 526 memcpy(mem, cmem, PAGE_SIZE);
527 } else { 527 } else {
528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); 528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529 529
@@ -717,7 +717,7 @@ compress_again:
717 717
718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { 718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
719 src = kmap_atomic(page); 719 src = kmap_atomic(page);
720 copy_page(cmem, src); 720 memcpy(cmem, src, PAGE_SIZE);
721 kunmap_atomic(src); 721 kunmap_atomic(src);
722 } else { 722 } else {
723 memcpy(cmem, src, clen); 723 memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
928 } 928 }
929 929
930 index = sector >> SECTORS_PER_PAGE_SHIFT; 930 index = sector >> SECTORS_PER_PAGE_SHIFT;
931 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; 931 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
932 932
933 bv.bv_page = page; 933 bv.bv_page = page;
934 bv.bv_len = PAGE_SIZE; 934 bv.bv_len = PAGE_SIZE;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d39d22..7e4a9d1296bb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
60#endif 60#endif
61 61
62#ifdef CONFIG_STRICT_DEVMEM 62#ifdef CONFIG_STRICT_DEVMEM
63static inline int page_is_allowed(unsigned long pfn)
64{
65 return devmem_is_allowed(pfn);
66}
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 67static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 68{
65 u64 from = ((u64)pfn) << PAGE_SHIFT; 69 u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
75 return 1; 79 return 1;
76} 80}
77#else 81#else
82static inline int page_is_allowed(unsigned long pfn)
83{
84 return 1;
85}
78static inline int range_is_allowed(unsigned long pfn, unsigned long size) 86static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{ 87{
80 return 1; 88 return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
122 130
123 while (count > 0) { 131 while (count > 0) {
124 unsigned long remaining; 132 unsigned long remaining;
133 int allowed;
125 134
126 sz = size_inside_page(p, count); 135 sz = size_inside_page(p, count);
127 136
128 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
129 return -EPERM; 139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
130 152
131 /* 153 remaining = copy_to_user(buf, ptr, sz);
132 * On ia64 if a page has been mapped somewhere as uncached, then 154
133 * it must also be accessed uncached by the kernel or data 155 unxlate_dev_mem_ptr(p, ptr);
134 * corruption may occur. 156 }
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139 157
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining) 158 if (remaining)
143 return -EFAULT; 159 return -EFAULT;
144 160
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
181#endif 197#endif
182 198
183 while (count > 0) { 199 while (count > 0) {
200 int allowed;
201
184 sz = size_inside_page(p, count); 202 sz = size_inside_page(p, count);
185 203
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 204 allowed = page_is_allowed(p >> PAGE_SHIFT);
205 if (!allowed)
187 return -EPERM; 206 return -EPERM;
188 207
189 /* 208 /* Skip actual writing when a page is marked as restricted. */
190 * On ia64 if a page has been mapped somewhere as uncached, then 209 if (allowed == 1) {
191 * it must also be accessed uncached by the kernel or data 210 /*
192 * corruption may occur. 211 * On ia64 if a page has been mapped somewhere as
193 */ 212 * uncached, then it must also be accessed uncached
194 ptr = xlate_dev_mem_ptr(p); 213 * by the kernel or data corruption may occur.
195 if (!ptr) { 214 */
196 if (written) 215 ptr = xlate_dev_mem_ptr(p);
197 break; 216 if (!ptr) {
198 return -EFAULT; 217 if (written)
199 } 218 break;
219 return -EFAULT;
220 }
200 221
201 copied = copy_from_user(ptr, buf, sz); 222 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr); 223 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) { 224 if (copied) {
204 written += sz - copied; 225 written += sz - copied;
205 if (written) 226 if (written)
206 break; 227 break;
207 return -EFAULT; 228 return -EFAULT;
229 }
208 } 230 }
209 231
210 buf += sz; 232 buf += sz;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e9b7e0b3cabe..87fe111d0be6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
2202 2202
2203 vdev->config->reset(vdev); 2203 vdev->config->reset(vdev);
2204 2204
2205 virtqueue_disable_cb(portdev->c_ivq); 2205 if (use_multiport(portdev))
2206 virtqueue_disable_cb(portdev->c_ivq);
2206 cancel_work_sync(&portdev->control_work); 2207 cancel_work_sync(&portdev->control_work);
2207 cancel_work_sync(&portdev->config_work); 2208 cancel_work_sync(&portdev->config_work);
2208 /* 2209 /*
2209 * Once more: if control_work_handler() was running, it would 2210 * Once more: if control_work_handler() was running, it would
2210 * enable the cb as the last step. 2211 * enable the cb as the last step.
2211 */ 2212 */
2212 virtqueue_disable_cb(portdev->c_ivq); 2213 if (use_multiport(portdev))
2214 virtqueue_disable_cb(portdev->c_ivq);
2213 remove_controlq_data(portdev); 2215 remove_controlq_data(portdev);
2214 2216
2215 list_for_each_entry(port, &portdev->ports, list) { 2217 list_for_each_entry(port, &portdev->ports, list) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index bc96d423781a..0e3f6496524d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2398 *********************************************************************/ 2398 *********************************************************************/
2399static enum cpuhp_state hp_online; 2399static enum cpuhp_state hp_online;
2400 2400
2401static int cpuhp_cpufreq_online(unsigned int cpu)
2402{
2403 cpufreq_online(cpu);
2404
2405 return 0;
2406}
2407
2408static int cpuhp_cpufreq_offline(unsigned int cpu)
2409{
2410 cpufreq_offline(cpu);
2411
2412 return 0;
2413}
2414
2401/** 2415/**
2402 * cpufreq_register_driver - register a CPU Frequency driver 2416 * cpufreq_register_driver - register a CPU Frequency driver
2403 * @driver_data: A struct cpufreq_driver containing the values# 2417 * @driver_data: A struct cpufreq_driver containing the values#
@@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2460 } 2474 }
2461 2475
2462 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", 2476 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2463 cpufreq_online, 2477 cpuhp_cpufreq_online,
2464 cpufreq_offline); 2478 cpuhp_cpufreq_offline);
2465 if (ret < 0) 2479 if (ret < 0)
2466 goto err_if_unreg; 2480 goto err_if_unreg;
2467 hp_online = ret; 2481 hp_online = ret;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 32100c4851dd..49cbdcba7883 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
506 ctx->dev = caam_jr_alloc(); 506 ctx->dev = caam_jr_alloc();
507 507
508 if (IS_ERR(ctx->dev)) { 508 if (IS_ERR(ctx->dev)) {
509 dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); 509 pr_err("Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx->dev); 510 return PTR_ERR(ctx->dev);
511 } 511 }
512 512
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fef39f9f41ee..5d7f73d60515 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
281 /* Try to run it through DECO0 */ 281 /* Try to run it through DECO0 */
282 ret = run_descriptor_deco0(ctrldev, desc, &status); 282 ret = run_descriptor_deco0(ctrldev, desc, &status);
283 283
284 if (ret || status) { 284 if (ret ||
285 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
285 dev_err(ctrldev, 286 dev_err(ctrldev,
286 "Failed to deinstantiate RNG4 SH%d\n", 287 "Failed to deinstantiate RNG4 SH%d\n",
287 sh_idx); 288 sh_idx);
@@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
301 struct device *ctrldev; 302 struct device *ctrldev;
302 struct caam_drv_private *ctrlpriv; 303 struct caam_drv_private *ctrlpriv;
303 struct caam_ctrl __iomem *ctrl; 304 struct caam_ctrl __iomem *ctrl;
304 int ring;
305 305
306 ctrldev = &pdev->dev; 306 ctrldev = &pdev->dev;
307 ctrlpriv = dev_get_drvdata(ctrldev); 307 ctrlpriv = dev_get_drvdata(ctrldev);
308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
309 309
310 /* Remove platform devices for JobRs */ 310 /* Remove platform devices under the crypto node */
311 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) 311 of_platform_depopulate(ctrldev);
312 of_device_unregister(ctrlpriv->jrpdev[ring]);
313 312
314 /* De-initialize RNG state handles initialized by this driver. */ 313 /* De-initialize RNG state handles initialized by this driver. */
315 if (ctrlpriv->rng4_sh_init) 314 if (ctrlpriv->rng4_sh_init)
@@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
418DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); 417DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
419#endif 418#endif
420 419
420static const struct of_device_id caam_match[] = {
421 {
422 .compatible = "fsl,sec-v4.0",
423 },
424 {
425 .compatible = "fsl,sec4.0",
426 },
427 {},
428};
429MODULE_DEVICE_TABLE(of, caam_match);
430
421/* Probe routine for CAAM top (controller) level */ 431/* Probe routine for CAAM top (controller) level */
422static int caam_probe(struct platform_device *pdev) 432static int caam_probe(struct platform_device *pdev)
423{ 433{
424 int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 434 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
425 u64 caam_id; 435 u64 caam_id;
426 struct device *dev; 436 struct device *dev;
427 struct device_node *nprop, *np; 437 struct device_node *nprop, *np;
@@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
597 goto iounmap_ctrl; 607 goto iounmap_ctrl;
598 } 608 }
599 609
600 /* 610 ret = of_platform_populate(nprop, caam_match, NULL, dev);
601 * Detect and enable JobRs 611 if (ret) {
602 * First, find out how many ring spec'ed, allocate references 612 dev_err(dev, "JR platform devices creation error\n");
603 * for all, then go probe each one.
604 */
605 rspec = 0;
606 for_each_available_child_of_node(nprop, np)
607 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
608 of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
609 rspec++;
610
611 ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
612 sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
613 if (ctrlpriv->jrpdev == NULL) {
614 ret = -ENOMEM;
615 goto iounmap_ctrl; 613 goto iounmap_ctrl;
616 } 614 }
617 615
618 ring = 0; 616 ring = 0;
619 ridx = 0;
620 ctrlpriv->total_jobrs = 0;
621 for_each_available_child_of_node(nprop, np) 617 for_each_available_child_of_node(nprop, np)
622 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 618 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
623 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 619 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
624 ctrlpriv->jrpdev[ring] =
625 of_platform_device_create(np, NULL, dev);
626 if (!ctrlpriv->jrpdev[ring]) {
627 pr_warn("JR physical index %d: Platform device creation error\n",
628 ridx);
629 ridx++;
630 continue;
631 }
632 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 620 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
633 ((__force uint8_t *)ctrl + 621 ((__force uint8_t *)ctrl +
634 (ridx + JR_BLOCK_NUMBER) * 622 (ring + JR_BLOCK_NUMBER) *
635 BLOCK_OFFSET 623 BLOCK_OFFSET
636 ); 624 );
637 ctrlpriv->total_jobrs++; 625 ctrlpriv->total_jobrs++;
638 ring++; 626 ring++;
639 ridx++; 627 }
640 }
641 628
642 /* Check to see if QI present. If so, enable */ 629 /* Check to see if QI present. If so, enable */
643 ctrlpriv->qi_present = 630 ctrlpriv->qi_present =
@@ -847,17 +834,6 @@ disable_caam_ipg:
847 return ret; 834 return ret;
848} 835}
849 836
850static struct of_device_id caam_match[] = {
851 {
852 .compatible = "fsl,sec-v4.0",
853 },
854 {
855 .compatible = "fsl,sec4.0",
856 },
857 {},
858};
859MODULE_DEVICE_TABLE(of, caam_match);
860
861static struct platform_driver caam_driver = { 837static struct platform_driver caam_driver = {
862 .driver = { 838 .driver = {
863 .name = "caam", 839 .name = "caam",
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc1a921..dbed8baeebe5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,7 +66,6 @@ struct caam_drv_private_jr {
66struct caam_drv_private { 66struct caam_drv_private {
67 67
68 struct device *dev; 68 struct device *dev;
69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
70 struct platform_device *pdev; 69 struct platform_device *pdev;
71 70
72 /* Physical-presence section */ 71 /* Physical-presence section */
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3e2ab3b14eea..9e95bf94eb13 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
2 tristate "DAX: direct access to differentiated memory" 2 tristate "DAX: direct access to differentiated memory"
3 default m if NVDIMM_DAX 3 default m if NVDIMM_DAX
4 depends on TRANSPARENT_HUGEPAGE 4 depends on TRANSPARENT_HUGEPAGE
5 select SRCU
5 help 6 help
6 Support raw access to differentiated (persistence, bandwidth, 7 Support raw access to differentiated (persistence, bandwidth,
7 latency...) memory via an mmap(2) capable character 8 latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 80c6db279ae1..806f180c80d8 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -25,6 +25,7 @@
25#include "dax.h" 25#include "dax.h"
26 26
27static dev_t dax_devt; 27static dev_t dax_devt;
28DEFINE_STATIC_SRCU(dax_srcu);
28static struct class *dax_class; 29static struct class *dax_class;
29static DEFINE_IDA(dax_minor_ida); 30static DEFINE_IDA(dax_minor_ida);
30static int nr_dax = CONFIG_NR_DEV_DAX; 31static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
60 * @region - parent region 61 * @region - parent region
61 * @dev - device backing the character device 62 * @dev - device backing the character device
62 * @cdev - core chardev data 63 * @cdev - core chardev data
63 * @alive - !alive + rcu grace period == no new mappings can be established 64 * @alive - !alive + srcu grace period == no new mappings can be established
64 * @id - child id in the region 65 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device 66 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges 67 * @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
569static int dax_dev_huge_fault(struct vm_fault *vmf, 570static int dax_dev_huge_fault(struct vm_fault *vmf,
570 enum page_entry_size pe_size) 571 enum page_entry_size pe_size)
571{ 572{
572 int rc; 573 int rc, id;
573 struct file *filp = vmf->vma->vm_file; 574 struct file *filp = vmf->vma->vm_file;
574 struct dax_dev *dax_dev = filp->private_data; 575 struct dax_dev *dax_dev = filp->private_data;
575 576
@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
578 ? "write" : "read", 579 ? "write" : "read",
579 vmf->vma->vm_start, vmf->vma->vm_end); 580 vmf->vma->vm_start, vmf->vma->vm_end);
580 581
581 rcu_read_lock(); 582 id = srcu_read_lock(&dax_srcu);
582 switch (pe_size) { 583 switch (pe_size) {
583 case PE_SIZE_PTE: 584 case PE_SIZE_PTE:
584 rc = __dax_dev_pte_fault(dax_dev, vmf); 585 rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
592 default: 593 default:
593 return VM_FAULT_FALLBACK; 594 return VM_FAULT_FALLBACK;
594 } 595 }
595 rcu_read_unlock(); 596 srcu_read_unlock(&dax_srcu, id);
596 597
597 return rc; 598 return rc;
598} 599}
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
713 * Note, rcu is not protecting the liveness of dax_dev, rcu is 714 * Note, rcu is not protecting the liveness of dax_dev, rcu is
714 * ensuring that any fault handlers that might have seen 715 * ensuring that any fault handlers that might have seen
715 * dax_dev->alive == true, have completed. Any fault handlers 716 * dax_dev->alive == true, have completed. Any fault handlers
716 * that start after synchronize_rcu() has started will abort 717 * that start after synchronize_srcu() has started will abort
717 * upon seeing dax_dev->alive == false. 718 * upon seeing dax_dev->alive == false.
718 */ 719 */
719 dax_dev->alive = false; 720 dax_dev->alive = false;
720 synchronize_rcu(); 721 synchronize_srcu(&dax_srcu);
721 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); 722 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
722 cdev_del(cdev); 723 cdev_del(cdev);
723 device_unregister(dev); 724 device_unregister(dev);
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e4cf23..24c461dea7af 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
149 149
150 status = __gop_query32(sys_table_arg, gop32, &info, &size, 150 status = __gop_query32(sys_table_arg, gop32, &info, &size,
151 &current_fb_base); 151 &current_fb_base);
152 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 152 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
153 info->pixel_format != PIXEL_BLT_ONLY) {
153 /* 154 /*
154 * Systems that use the UEFI Console Splitter may 155 * Systems that use the UEFI Console Splitter may
155 * provide multiple GOP devices, not all of which are 156 * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
266 267
267 status = __gop_query64(sys_table_arg, gop64, &info, &size, 268 status = __gop_query64(sys_table_arg, gop64, &info, &size,
268 &current_fb_base); 269 &current_fb_base);
269 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 270 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
271 info->pixel_format != PIXEL_BLT_ONLY) {
270 /* 272 /*
271 * Systems that use the UEFI Console Splitter may 273 * Systems that use the UEFI Console Splitter may
272 * provide multiple GOP devices, not all of which are 274 * provide multiple GOP devices, not all of which are
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 4f587058a3aa..9a9c40717801 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1333,7 +1333,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1333 if (!fence) { 1333 if (!fence) {
1334 event_free(gpu, event); 1334 event_free(gpu, event);
1335 ret = -ENOMEM; 1335 ret = -ENOMEM;
1336 goto out_pm_put; 1336 goto out_unlock;
1337 } 1337 }
1338 1338
1339 gpu->event[event].fence = fence; 1339 gpu->event[event].fence = fence;
@@ -1373,6 +1373,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1373 hangcheck_timer_reset(gpu); 1373 hangcheck_timer_reset(gpu);
1374 ret = 0; 1374 ret = 0;
1375 1375
1376out_unlock:
1376 mutex_unlock(&gpu->lock); 1377 mutex_unlock(&gpu->lock);
1377 1378
1378out_pm_put: 1379out_pm_put:
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285{ 285{
286 int ret; 286 int ret;
287 287
288 if (vgpu->failsafe)
289 return 0;
290
291 if (WARN_ON(bytes > 4)) 288 if (WARN_ON(bytes > 4))
292 return -EINVAL; 289 return -EINVAL;
293 290
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index ce4276a7cf9c..536bde8638c8 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -776,7 +776,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
776 _EL_OFFSET_STATUS_PTR); 776 _EL_OFFSET_STATUS_PTR);
777 777
778 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 778 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
779 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; 779 ctx_status_ptr.read_ptr = 0;
780 ctx_status_ptr.write_ptr = 0x7;
780 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 781 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
781} 782}
782 783
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
75 struct gvt_firmware_header *h; 75 struct gvt_firmware_header *h;
76 void *firmware; 76 void *firmware;
77 void *p; 77 void *p;
78 unsigned long size; 78 unsigned long size, crc32_start;
79 int i; 79 int i;
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
83 firmware = vzalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
112 112
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 113 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 114
115 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
116 h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
117
115 firmware_attr.size = size; 118 firmware_attr.size = size;
116 firmware_attr.private = firmware; 119 firmware_attr.private = firmware;
117 120
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
234 237
235 firmware->mmio = mem; 238 firmware->mmio = mem;
236 239
237 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", 240 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
238 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, 241 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
239 pdev->revision); 242 pdev->revision);
240 243
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 0f3a98865a58..7dea5e5d5567 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
52 .vgpu_create = intel_gvt_create_vgpu, 52 .vgpu_create = intel_gvt_create_vgpu,
53 .vgpu_destroy = intel_gvt_destroy_vgpu, 53 .vgpu_destroy = intel_gvt_destroy_vgpu,
54 .vgpu_reset = intel_gvt_reset_vgpu, 54 .vgpu_reset = intel_gvt_reset_vgpu,
55 .vgpu_activate = intel_gvt_activate_vgpu,
56 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
55}; 57};
56 58
57/** 59/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 806da96b6a92..930732e5c780 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -395,7 +395,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
395void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 395void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
396 unsigned int engine_mask); 396 unsigned int engine_mask);
397void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 397void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
398 398void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
399void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
399 400
400/* validating GM functions */ 401/* validating GM functions */
401#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 402#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -462,6 +463,8 @@ struct intel_gvt_ops {
462 struct intel_vgpu_type *); 463 struct intel_vgpu_type *);
463 void (*vgpu_destroy)(struct intel_vgpu *); 464 void (*vgpu_destroy)(struct intel_vgpu *);
464 void (*vgpu_reset)(struct intel_vgpu *); 465 void (*vgpu_reset)(struct intel_vgpu *);
466 void (*vgpu_activate)(struct intel_vgpu *);
467 void (*vgpu_deactivate)(struct intel_vgpu *);
465}; 468};
466 469
467 470
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 42ff7ffb6066..1ae0b4083ce1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -546,6 +546,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
546 if (ret) 546 if (ret)
547 goto undo_group; 547 goto undo_group;
548 548
549 intel_gvt_ops->vgpu_activate(vgpu);
550
549 atomic_set(&vgpu->vdev.released, 0); 551 atomic_set(&vgpu->vdev.released, 0);
550 return ret; 552 return ret;
551 553
@@ -571,6 +573,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
571 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 573 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
572 return; 574 return;
573 575
576 intel_gvt_ops->vgpu_deactivate(vgpu);
577
574 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 578 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
575 &vgpu->vdev.iommu_notifier); 579 &vgpu->vdev.iommu_notifier);
576 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 580 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 6ba02525e905..6e3cbd8caec2 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -196,20 +196,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
196} 196}
197 197
198/** 198/**
199 * intel_gvt_destroy_vgpu - destroy a virtual GPU 199 * intel_gvt_active_vgpu - activate a virtual GPU
200 * @vgpu: virtual GPU 200 * @vgpu: virtual GPU
201 * 201 *
202 * This function is called when user wants to destroy a virtual GPU. 202 * This function is called when user wants to activate a virtual GPU.
203 * 203 *
204 */ 204 */
205void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 205void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
206{
207 mutex_lock(&vgpu->gvt->lock);
208 vgpu->active = true;
209 mutex_unlock(&vgpu->gvt->lock);
210}
211
212/**
213 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
214 * @vgpu: virtual GPU
215 *
216 * This function is called when user wants to deactivate a virtual GPU.
217 * All virtual GPU runtime information will be destroyed.
218 *
219 */
220void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
206{ 221{
207 struct intel_gvt *gvt = vgpu->gvt; 222 struct intel_gvt *gvt = vgpu->gvt;
208 223
209 mutex_lock(&gvt->lock); 224 mutex_lock(&gvt->lock);
210 225
211 vgpu->active = false; 226 vgpu->active = false;
212 idr_remove(&gvt->vgpu_idr, vgpu->id);
213 227
214 if (atomic_read(&vgpu->running_workload_num)) { 228 if (atomic_read(&vgpu->running_workload_num)) {
215 mutex_unlock(&gvt->lock); 229 mutex_unlock(&gvt->lock);
@@ -218,6 +232,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
218 } 232 }
219 233
220 intel_vgpu_stop_schedule(vgpu); 234 intel_vgpu_stop_schedule(vgpu);
235
236 mutex_unlock(&gvt->lock);
237}
238
239/**
240 * intel_gvt_destroy_vgpu - destroy a virtual GPU
241 * @vgpu: virtual GPU
242 *
243 * This function is called when user wants to destroy a virtual GPU.
244 *
245 */
246void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
247{
248 struct intel_gvt *gvt = vgpu->gvt;
249
250 mutex_lock(&gvt->lock);
251
252 WARN(vgpu->active, "vGPU is still active!\n");
253
254 idr_remove(&gvt->vgpu_idr, vgpu->id);
221 intel_vgpu_clean_sched_policy(vgpu); 255 intel_vgpu_clean_sched_policy(vgpu);
222 intel_vgpu_clean_gvt_context(vgpu); 256 intel_vgpu_clean_gvt_context(vgpu);
223 intel_vgpu_clean_execlist(vgpu); 257 intel_vgpu_clean_execlist(vgpu);
@@ -349,7 +383,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
349 if (ret) 383 if (ret)
350 goto out_clean_shadow_ctx; 384 goto out_clean_shadow_ctx;
351 385
352 vgpu->active = true;
353 mutex_unlock(&gvt->lock); 386 mutex_unlock(&gvt->lock);
354 387
355 return vgpu; 388 return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c616b4e755bc..7b4fa84cbc3c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1469,8 +1469,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1469 goto out; 1469 goto out;
1470 } 1470 }
1471 1471
1472 intel_guc_suspend(dev_priv);
1473
1474 intel_display_suspend(dev); 1472 intel_display_suspend(dev);
1475 1473
1476 intel_dp_mst_suspend(dev); 1474 intel_dp_mst_suspend(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bbc6f1c9f175..92343343044f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4456,6 +4456,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
4456 i915_gem_context_lost(dev_priv); 4456 i915_gem_context_lost(dev_priv);
4457 mutex_unlock(&dev->struct_mutex); 4457 mutex_unlock(&dev->struct_mutex);
4458 4458
4459 intel_guc_suspend(dev_priv);
4460
4459 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4461 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4460 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4462 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4461 4463
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 2978acdd995e..129ed303a6c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
53 BUG(); 53 BUG();
54} 54}
55 55
56static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
57{
58 if (!unlock)
59 return;
60
61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65}
66
56static bool any_vma_pinned(struct drm_i915_gem_object *obj) 67static bool any_vma_pinned(struct drm_i915_gem_object *obj)
57{ 68{
58 struct i915_vma *vma; 69 struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
232 intel_runtime_pm_put(dev_priv); 243 intel_runtime_pm_put(dev_priv);
233 244
234 i915_gem_retire_requests(dev_priv); 245 i915_gem_retire_requests(dev_priv);
235 if (unlock)
236 mutex_unlock(&dev_priv->drm.struct_mutex);
237 246
238 /* expedite the RCU grace period to free some request slabs */ 247 i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
239 synchronize_rcu_expedited();
240 248
241 return count; 249 return count;
242} 250}
@@ -296,8 +304,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
296 count += obj->base.size >> PAGE_SHIFT; 304 count += obj->base.size >> PAGE_SHIFT;
297 } 305 }
298 306
299 if (unlock) 307 i915_gem_shrinker_unlock(dev, unlock);
300 mutex_unlock(&dev->struct_mutex);
301 308
302 return count; 309 return count;
303} 310}
@@ -324,8 +331,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
324 sc->nr_to_scan - freed, 331 sc->nr_to_scan - freed,
325 I915_SHRINK_BOUND | 332 I915_SHRINK_BOUND |
326 I915_SHRINK_UNBOUND); 333 I915_SHRINK_UNBOUND);
327 if (unlock) 334
328 mutex_unlock(&dev->struct_mutex); 335 i915_gem_shrinker_unlock(dev, unlock);
329 336
330 return freed; 337 return freed;
331} 338}
@@ -367,8 +374,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
367 struct shrinker_lock_uninterruptible *slu) 374 struct shrinker_lock_uninterruptible *slu)
368{ 375{
369 dev_priv->mm.interruptible = slu->was_interruptible; 376 dev_priv->mm.interruptible = slu->was_interruptible;
370 if (slu->unlock) 377 i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
371 mutex_unlock(&dev_priv->drm.struct_mutex);
372} 378}
373 379
374static int 380static int
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 917dcb978c2c..0c87b1ac6b68 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/prefetch.h> 16#include <linux/prefetch.h>
17#include <asm/unaligned.h>
17 18
18#include <drm/drmP.h> 19#include <drm/drmP.h>
19#include "udl_drv.h" 20#include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
163 const u8 *const start = pixel; 164 const u8 *const start = pixel;
164 const uint16_t repeating_pixel_val16 = pixel_val16; 165 const uint16_t repeating_pixel_val16 = pixel_val16;
165 166
166 *(uint16_t *)cmd = cpu_to_be16(pixel_val16); 167 put_unaligned_be16(pixel_val16, cmd);
167 168
168 cmd += 2; 169 cmd += 2;
169 pixel += bpp; 170 pixel += bpp;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 63ec1993eaaa..d162f0dc76e3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
819 hid->group = HID_GROUP_WACOM; 819 hid->group = HID_GROUP_WACOM;
820 break; 820 break;
821 case USB_VENDOR_ID_SYNAPTICS: 821 case USB_VENDOR_ID_SYNAPTICS:
822 if (hid->group == HID_GROUP_GENERIC || 822 if (hid->group == HID_GROUP_GENERIC)
823 hid->group == HID_GROUP_MULTITOUCH_WIN_8)
824 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 823 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
825 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 824 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
826 /* 825 /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2097 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4e2648c86c8c..b26c030926c1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1028,6 +1028,9 @@
1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d 1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
1030 1030
1031#define USB_VENDOR_ID_UGEE 0x28bd
1032#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
1033
1031#define USB_VENDOR_ID_UNITEC 0x227d 1034#define USB_VENDOR_ID_UNITEC 0x227d
1032#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 1035#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
1033#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 1036#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 1509d7287ff3..e3e6e5c893cc 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
977 } 977 }
978 break; 978 break;
979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: 979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
980 case USB_DEVICE_ID_UGEE_TABLET_EX07S:
980 /* If this is the pen interface */ 981 /* If this is the pen interface */
981 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 982 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
982 rc = uclogic_tablet_enable(hdev); 983 rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
1069 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 1071 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
1071 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 1072 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
1073 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
1072 { } 1074 { }
1073}; 1075};
1074MODULE_DEVICE_TABLE(hid, uclogic_devices); 1076MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 91cbe86b25c8..fcbed35e95a8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
817 rx_wr->sg_list = &rx_desc->rx_sg; 817 rx_wr->sg_list = &rx_desc->rx_sg;
818 rx_wr->num_sge = 1; 818 rx_wr->num_sge = 1;
819 rx_wr->next = rx_wr + 1; 819 rx_wr->next = rx_wr + 1;
820 rx_desc->in_use = false;
820 } 821 }
821 rx_wr--; 822 rx_wr--;
822 rx_wr->next = NULL; /* mark end of work requests list */ 823 rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
835 struct ib_recv_wr *rx_wr_failed, rx_wr; 836 struct ib_recv_wr *rx_wr_failed, rx_wr;
836 int ret; 837 int ret;
837 838
839 if (!rx_desc->in_use) {
840 /*
841 * if the descriptor is not in-use we already reposted it
842 * for recv, so just silently return
843 */
844 return 0;
845 }
846
847 rx_desc->in_use = false;
838 rx_wr.wr_cqe = &rx_desc->rx_cqe; 848 rx_wr.wr_cqe = &rx_desc->rx_cqe;
839 rx_wr.sg_list = &rx_desc->rx_sg; 849 rx_wr.sg_list = &rx_desc->rx_sg;
840 rx_wr.num_sge = 1; 850 rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1397 return; 1407 return;
1398 } 1408 }
1399 1409
1410 rx_desc->in_use = true;
1411
1400 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1412 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1401 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1413 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1402 1414
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1659 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1671 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1660 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1672 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1661 1673
1662 if (ret) 1674 if (ret) {
1663 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1675 /*
1664 else 1676 * transport_generic_request_failure() expects to have
1665 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1680 */
1681 kref_get(&cmd->cmd_kref);
1682 transport_generic_request_failure(cmd, cmd->pi_err);
1683 } else {
1684 /*
1685 * XXX: isert_put_response() failure is not retried.
1686 */
1687 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1688 if (ret)
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1690 }
1666} 1691}
1667 1692
1668static void 1693static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1699 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1724 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1700 spin_unlock_bh(&cmd->istate_lock); 1725 spin_unlock_bh(&cmd->istate_lock);
1701 1726
1702 if (ret) { 1727 /*
1703 target_put_sess_cmd(se_cmd); 1728 * transport_generic_request_failure() will drop the extra
1704 transport_send_check_condition_and_sense(se_cmd, 1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1705 se_cmd->pi_err, 0); 1730 * any non-zero ->queue_status() callback error retries.
1706 } else { 1731 */
1732 if (ret)
1733 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1734 else
1707 target_execute_cmd(se_cmd); 1735 target_execute_cmd(se_cmd);
1708 }
1709} 1736}
1710 1737
1711static void 1738static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2171 chain_wr = &isert_cmd->tx_desc.send_wr; 2198 chain_wr = &isert_cmd->tx_desc.send_wr;
2172 } 2199 }
2173 2200
2174 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2201 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2175 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2202 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2176 return 1; 2203 isert_cmd, rc);
2204 return rc;
2177} 2205}
2178 2206
2179static int 2207static int
2180isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2208isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2181{ 2209{
2182 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2210 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2211 int ret;
2183 2212
2184 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2213 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2185 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2214 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2186 2215
2187 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2216 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2188 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2217 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2189 &isert_cmd->tx_desc.tx_cqe, NULL); 2218 &isert_cmd->tx_desc.tx_cqe, NULL);
2190 2219
2191 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2220 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2192 isert_cmd); 2221 isert_cmd, ret);
2193 return 0; 2222 return ret;
2194} 2223}
2195 2224
2196static int 2225static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c02ada57d7f5..87d994de8c91 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -60,7 +60,7 @@
60 60
61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ 61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ 62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
63 sizeof(struct ib_cqe))) 63 sizeof(struct ib_cqe) + sizeof(bool)))
64 64
65#define ISCSI_ISER_SG_TABLESIZE 256 65#define ISCSI_ISER_SG_TABLESIZE 256
66 66
@@ -85,6 +85,7 @@ struct iser_rx_desc {
85 u64 dma_addr; 85 u64 dma_addr;
86 struct ib_sge rx_sg; 86 struct ib_sge rx_sg;
87 struct ib_cqe rx_cqe; 87 struct ib_cqe rx_cqe;
88 bool in_use;
88 char pad[ISER_RX_PAD_SIZE]; 89 char pad[ISER_RX_PAD_SIZE];
89} __packed; 90} __packed;
90 91
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 155fcb3b6230..153b1ee13e03 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -202,6 +202,7 @@ static const struct xpad_device {
202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
205 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
205 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 206 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
206 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 207 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
207 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, 208 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
326 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 327 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
327 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 328 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
328 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ 329 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
330 XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
329 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ 331 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
330 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ 332 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
331 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ 333 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
230 return -ENOMEM; 230 return -ENOMEM;
231 } 231 }
232 232
233 raw_spin_lock_init(&cd->rlock);
234
233 cd->gpc_base = of_iomap(node, 0); 235 cd->gpc_base = of_iomap(node, 0);
234 if (!cd->gpc_base) { 236 if (!cd->gpc_base) {
235 pr_err("fsl-gpcv2: unable to map gpc registers\n"); 237 pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 138f5ae75c0b..4d1fe8d95042 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
557 int work_done = 0; 557 int work_done = 0;
558 558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); 559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); 560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
562 562
563 /* Handle bus state changes */ 563 /* Handle bus state changes */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index caed4e6960f8..11662f479e76 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
826 826
827 devm_can_led_init(ndev); 827 devm_can_led_init(ndev);
828 828
829 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", 829 dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
830 priv->regs, ndev->irq);
831 830
832 return 0; 831 return 0;
833fail_candev: 832fail_candev:
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 1b52520715ae..f8c81f12d988 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void ___team_compute_features(struct team *team) 993static void __team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024} 1024}
1025 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1029 netdev_change_features(team->dev);
1030}
1031
1032static void team_compute_features(struct team *team) 1026static void team_compute_features(struct team *team)
1033{ 1027{
1034 mutex_lock(&team->lock); 1028 mutex_lock(&team->lock);
1035 ___team_compute_features(team); 1029 __team_compute_features(team);
1036 mutex_unlock(&team->lock); 1030 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev); 1031 netdev_change_features(team->dev);
1038} 1032}
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
1641 team_notify_peers_fini(team); 1635 team_notify_peers_fini(team);
1642 team_queue_override_fini(team); 1636 team_queue_override_fini(team);
1643 mutex_unlock(&team->lock); 1637 mutex_unlock(&team->lock);
1638 netdev_change_features(dev);
1644} 1639}
1645 1640
1646static void team_destructor(struct net_device *dev) 1641static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1928 mutex_lock(&team->lock); 1923 mutex_lock(&team->lock);
1929 err = team_port_add(team, port_dev); 1924 err = team_port_add(team, port_dev);
1930 mutex_unlock(&team->lock); 1925 mutex_unlock(&team->lock);
1926
1927 if (!err)
1928 netdev_change_features(dev);
1929
1931 return err; 1930 return err;
1932} 1931}
1933 1932
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1939 mutex_lock(&team->lock); 1938 mutex_lock(&team->lock);
1940 err = team_port_del(team, port_dev); 1939 err = team_port_del(team, port_dev);
1941 mutex_unlock(&team->lock); 1940 mutex_unlock(&team->lock);
1941
1942 if (!err)
1943 netdev_change_features(dev);
1944
1942 return err; 1945 return err;
1943} 1946}
1944 1947
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 156f7f85e486..2474618404f5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
911 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 911 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3de65ea6531a..453244805c52 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1929 " value=0x%04x index=0x%04x size=%d\n", 1929 " value=0x%04x index=0x%04x size=%d\n",
1930 cmd, reqtype, value, index, size); 1930 cmd, reqtype, value, index, size);
1931 1931
1932 if (data) { 1932 if (size) {
1933 buf = kmalloc(size, GFP_KERNEL); 1933 buf = kmalloc(size, GFP_KERNEL);
1934 if (!buf) 1934 if (!buf)
1935 goto out; 1935 goto out;
@@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1939 cmd, reqtype, value, index, buf, size, 1939 cmd, reqtype, value, index, buf, size,
1940 USB_CTRL_GET_TIMEOUT); 1940 USB_CTRL_GET_TIMEOUT);
1941 if (err > 0 && err <= size) 1941 if (err > 0 && err <= size) {
1942 memcpy(data, buf, err); 1942 if (data)
1943 memcpy(data, buf, err);
1944 else
1945 netdev_dbg(dev->net,
1946 "Huh? Data requested but thrown away.\n");
1947 }
1943 kfree(buf); 1948 kfree(buf);
1944out: 1949out:
1945 return err; 1950 return err;
@@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1960 buf = kmemdup(data, size, GFP_KERNEL); 1965 buf = kmemdup(data, size, GFP_KERNEL);
1961 if (!buf) 1966 if (!buf)
1962 goto out; 1967 goto out;
1963 } 1968 } else {
1969 if (size) {
1970 WARN_ON_ONCE(1);
1971 err = -EINVAL;
1972 goto out;
1973 }
1974 }
1964 1975
1965 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1976 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1966 cmd, reqtype, value, index, buf, size, 1977 cmd, reqtype, value, index, buf, size,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ea9890d61967..f36584616e7d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
2230#define MIN_MTU ETH_MIN_MTU 2230#define MIN_MTU ETH_MIN_MTU
2231#define MAX_MTU ETH_MAX_MTU 2231#define MAX_MTU ETH_MAX_MTU
2232 2232
2233static int virtnet_probe(struct virtio_device *vdev) 2233static int virtnet_validate(struct virtio_device *vdev)
2234{ 2234{
2235 int i, err;
2236 struct net_device *dev;
2237 struct virtnet_info *vi;
2238 u16 max_queue_pairs;
2239 int mtu;
2240
2241 if (!vdev->config->get) { 2235 if (!vdev->config->get) {
2242 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2236 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2243 __func__); 2237 __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
2247 if (!virtnet_validate_features(vdev)) 2241 if (!virtnet_validate_features(vdev))
2248 return -EINVAL; 2242 return -EINVAL;
2249 2243
2244 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2245 int mtu = virtio_cread16(vdev,
2246 offsetof(struct virtio_net_config,
2247 mtu));
2248 if (mtu < MIN_MTU)
2249 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2250 }
2251
2252 return 0;
2253}
2254
2255static int virtnet_probe(struct virtio_device *vdev)
2256{
2257 int i, err;
2258 struct net_device *dev;
2259 struct virtnet_info *vi;
2260 u16 max_queue_pairs;
2261 int mtu;
2262
2250 /* Find if host supports multiqueue virtio_net device */ 2263 /* Find if host supports multiqueue virtio_net device */
2251 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2264 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2252 struct virtio_net_config, 2265 struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
2362 offsetof(struct virtio_net_config, 2375 offsetof(struct virtio_net_config,
2363 mtu)); 2376 mtu));
2364 if (mtu < dev->min_mtu) { 2377 if (mtu < dev->min_mtu) {
2365 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2378 /* Should never trigger: MTU was previously validated
2366 } else { 2379 * in virtnet_validate.
2367 dev->mtu = mtu; 2380 */
2368 dev->max_mtu = mtu; 2381 dev_err(&vdev->dev, "device MTU appears to have changed "
2382 "it is now %d < %d", mtu, dev->min_mtu);
2383 goto free_stats;
2369 } 2384 }
2385
2386 dev->mtu = mtu;
2387 dev->max_mtu = mtu;
2388
2389 /* TODO: size buffers correctly in this case. */
2390 if (dev->mtu > ETH_DATA_LEN)
2391 vi->big_packets = true;
2370 } 2392 }
2371 2393
2372 if (vi->any_header_sg) 2394 if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
2544 .driver.name = KBUILD_MODNAME, 2566 .driver.name = KBUILD_MODNAME,
2545 .driver.owner = THIS_MODULE, 2567 .driver.owner = THIS_MODULE,
2546 .id_table = id_table, 2568 .id_table = id_table,
2569 .validate = virtnet_validate,
2547 .probe = virtnet_probe, 2570 .probe = virtnet_probe,
2548 .remove = virtnet_remove, 2571 .remove = virtnet_remove,
2549 .config_changed = virtnet_config_changed, 2572 .config_changed = virtnet_config_changed,
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 23d4a1728cdf..351bac8f6503 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); 934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
935 if (rc < 0) 935 if (rc < 0)
936 goto out_unlock; 936 goto out_unlock;
937 nvdimm_bus_unlock(&nvdimm_bus->dev);
938
937 if (copy_to_user(p, buf, buf_len)) 939 if (copy_to_user(p, buf, buf_len))
938 rc = -EFAULT; 940 rc = -EFAULT;
941
942 vfree(buf);
943 return rc;
944
939 out_unlock: 945 out_unlock:
940 nvdimm_bus_unlock(&nvdimm_bus->dev); 946 nvdimm_bus_unlock(&nvdimm_bus->dev);
941 out: 947 out:
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index b3323c0697f6..ca6d572c48fc 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
243 } 243 }
244 244
245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
246 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { 246 /*
247 * FIXME: nsio_rw_bytes() may be called from atomic
248 * context in the btt case and nvdimm_clear_poison()
249 * takes a sleeping lock. Until the locking can be
250 * reworked this capability requires that the namespace
251 * is not claimed by btt.
252 */
253 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
254 && (!ndns->claim || !is_nd_btt(ndns->claim))) {
247 long cleared; 255 long cleared;
248 256
249 cleared = nvdimm_clear_poison(&ndns->dev, offset, size); 257 cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 0eedc49e0d47..8b721321be5b 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
395 395
396int alias_dpa_busy(struct device *dev, void *data) 396int alias_dpa_busy(struct device *dev, void *data)
397{ 397{
398 resource_size_t map_end, blk_start, new, busy; 398 resource_size_t map_end, blk_start, new;
399 struct blk_alloc_info *info = data; 399 struct blk_alloc_info *info = data;
400 struct nd_mapping *nd_mapping; 400 struct nd_mapping *nd_mapping;
401 struct nd_region *nd_region; 401 struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
436 retry: 436 retry:
437 /* 437 /*
438 * Find the free dpa from the end of the last pmem allocation to 438 * Find the free dpa from the end of the last pmem allocation to
439 * the end of the interleave-set mapping that is not already 439 * the end of the interleave-set mapping.
440 * covered by a blk allocation.
441 */ 440 */
442 busy = 0;
443 for_each_dpa_resource(ndd, res) { 441 for_each_dpa_resource(ndd, res) {
442 if (strncmp(res->name, "pmem", 4) != 0)
443 continue;
444 if ((res->start >= blk_start && res->start < map_end) 444 if ((res->start >= blk_start && res->start < map_end)
445 || (res->end >= blk_start 445 || (res->end >= blk_start
446 && res->end <= map_end)) { 446 && res->end <= map_end)) {
447 if (strncmp(res->name, "pmem", 4) == 0) { 447 new = max(blk_start, min(map_end + 1, res->end + 1));
448 new = max(blk_start, min(map_end + 1, 448 if (new != blk_start) {
449 res->end + 1)); 449 blk_start = new;
450 if (new != blk_start) { 450 goto retry;
451 blk_start = new; 451 }
452 goto retry;
453 }
454 } else
455 busy += min(map_end, res->end)
456 - max(nd_mapping->start, res->start) + 1;
457 } else if (nd_mapping->start > res->start
458 && map_end < res->end) {
459 /* total eclipse of the PMEM region mapping */
460 busy += nd_mapping->size;
461 break;
462 } 452 }
463 } 453 }
464 454
@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
470 return 1; 460 return 1;
471 } 461 }
472 462
473 info->available -= blk_start - nd_mapping->start + busy; 463 info->available -= blk_start - nd_mapping->start;
474 464
475 return 0; 465 return 0;
476} 466}
477 467
478static int blk_dpa_busy(struct device *dev, void *data)
479{
480 struct blk_alloc_info *info = data;
481 struct nd_mapping *nd_mapping;
482 struct nd_region *nd_region;
483 resource_size_t map_end;
484 int i;
485
486 if (!is_nd_pmem(dev))
487 return 0;
488
489 nd_region = to_nd_region(dev);
490 for (i = 0; i < nd_region->ndr_mappings; i++) {
491 nd_mapping = &nd_region->mapping[i];
492 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493 break;
494 }
495
496 if (i >= nd_region->ndr_mappings)
497 return 0;
498
499 map_end = nd_mapping->start + nd_mapping->size - 1;
500 if (info->res->start >= nd_mapping->start
501 && info->res->start < map_end) {
502 if (info->res->end <= map_end) {
503 info->busy = 0;
504 return 1;
505 } else {
506 info->busy -= info->res->end - map_end;
507 return 0;
508 }
509 } else if (info->res->end >= nd_mapping->start
510 && info->res->end <= map_end) {
511 info->busy -= nd_mapping->start - info->res->start;
512 return 0;
513 } else {
514 info->busy -= nd_mapping->size;
515 return 0;
516 }
517}
518
519/** 468/**
520 * nd_blk_available_dpa - account the unused dpa of BLK region 469 * nd_blk_available_dpa - account the unused dpa of BLK region
521 * @nd_mapping: container of dpa-resource-root + labels 470 * @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
545 for_each_dpa_resource(ndd, res) { 494 for_each_dpa_resource(ndd, res) {
546 if (strncmp(res->name, "blk", 3) != 0) 495 if (strncmp(res->name, "blk", 3) != 0)
547 continue; 496 continue;
548 497 info.available -= resource_size(res);
549 info.res = res;
550 info.busy = resource_size(res);
551 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552 info.available -= info.busy;
553 } 498 }
554 499
555 return info.available; 500 return info.available;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9690beb15e69..d996ca73d3be 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
2023 } 2023 }
2024 2024
2025 ctrl->ctrl.sqsize = 2025 ctrl->ctrl.sqsize =
2026 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 2026 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
2027 2027
2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2029 if (error) 2029 if (error)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 47a479f26e5d..16f84eb0b95e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1606 } 1606 }
1607 1607
1608 ctrl->ctrl.sqsize = 1608 ctrl->ctrl.sqsize =
1609 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 1609 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
1610 1610
1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
1612 if (error) 1612 if (error)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 22f7bc6bac7f..c7b0b6a52708 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
392 } 392 }
393 393
394 ctrl->ctrl.sqsize = 394 ctrl->ctrl.sqsize =
395 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 395 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
396 396
397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
398 if (error) 398 if (error)
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index f80134e3e0b6..9ff790174906 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/dmi.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
1524 chained_irq_exit(chip, desc); 1525 chained_irq_exit(chip, desc);
1525} 1526}
1526 1527
1528/*
1529 * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
1530 * tables. Since we leave GPIOs that are not capable of generating
1531 * interrupts out of the irqdomain the numbering will be different and
1532 * cause devices using the hardcoded IRQ numbers fail. In order not to
1533 * break such machines we will only mask pins from irqdomain if the machine
1534 * is not listed below.
1535 */
1536static const struct dmi_system_id chv_no_valid_mask[] = {
1537 {
1538 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
1539 .ident = "Acer Chromebook (CYAN)",
1540 .matches = {
1541 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
1543 DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
1544 },
1545 }
1546};
1547
1527static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) 1548static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1528{ 1549{
1529 const struct chv_gpio_pinrange *range; 1550 const struct chv_gpio_pinrange *range;
1530 struct gpio_chip *chip = &pctrl->chip; 1551 struct gpio_chip *chip = &pctrl->chip;
1552 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1531 int ret, i, offset; 1553 int ret, i, offset;
1532 1554
1533 *chip = chv_gpio_chip; 1555 *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1536 chip->label = dev_name(pctrl->dev); 1558 chip->label = dev_name(pctrl->dev);
1537 chip->parent = pctrl->dev; 1559 chip->parent = pctrl->dev;
1538 chip->base = -1; 1560 chip->base = -1;
1539 chip->irq_need_valid_mask = true; 1561 chip->irq_need_valid_mask = need_valid_mask;
1540 1562
1541 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); 1563 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
1542 if (ret) { 1564 if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1567 intsel &= CHV_PADCTRL0_INTSEL_MASK; 1589 intsel &= CHV_PADCTRL0_INTSEL_MASK;
1568 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1590 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1569 1591
1570 if (intsel >= pctrl->community->nirqs) 1592 if (need_valid_mask && intsel >= pctrl->community->nirqs)
1571 clear_bit(i, chip->irq_valid_mask); 1593 clear_bit(i, chip->irq_valid_mask);
1572 } 1594 }
1573 1595
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f9b49967f512..63e51b56a22a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1468 1468
1469/* pin banks of exynos5433 pin-controller - ALIVE */ 1469/* pin banks of exynos5433 pin-controller - ALIVE */
1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { 1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
1471 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 1471 EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1472 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), 1472 EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1473 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), 1473 EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1474 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), 1474 EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1475 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), 1475 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
1476 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), 1476 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
1477 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), 1477 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
1478 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), 1478 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
1479 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), 1479 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
1480}; 1480};
1481 1481
1482/* pin banks of exynos5433 pin-controller - AUD */ 1482/* pin banks of exynos5433 pin-controller - AUD */
1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { 1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
1484 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1484 EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1485 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1485 EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1486}; 1486};
1487 1487
1488/* pin banks of exynos5433 pin-controller - CPIF */ 1488/* pin banks of exynos5433 pin-controller - CPIF */
1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { 1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
1490 EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), 1490 EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
1491}; 1491};
1492 1492
1493/* pin banks of exynos5433 pin-controller - eSE */ 1493/* pin banks of exynos5433 pin-controller - eSE */
1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { 1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
1495 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), 1495 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
1496}; 1496};
1497 1497
1498/* pin banks of exynos5433 pin-controller - FINGER */ 1498/* pin banks of exynos5433 pin-controller - FINGER */
1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { 1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
1500 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), 1500 EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
1501}; 1501};
1502 1502
1503/* pin banks of exynos5433 pin-controller - FSYS */ 1503/* pin banks of exynos5433 pin-controller - FSYS */
1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { 1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
1505 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), 1505 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
1506 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), 1506 EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
1507 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), 1507 EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
1508 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), 1508 EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
1509 EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), 1509 EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
1510 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), 1510 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
1511}; 1511};
1512 1512
1513/* pin banks of exynos5433 pin-controller - IMEM */ 1513/* pin banks of exynos5433 pin-controller - IMEM */
1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { 1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
1515 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), 1515 EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
1516}; 1516};
1517 1517
1518/* pin banks of exynos5433 pin-controller - NFC */ 1518/* pin banks of exynos5433 pin-controller - NFC */
1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { 1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
1520 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), 1520 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1521}; 1521};
1522 1522
1523/* pin banks of exynos5433 pin-controller - PERIC */ 1523/* pin banks of exynos5433 pin-controller - PERIC */
1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { 1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
1525 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), 1525 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
1526 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), 1526 EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
1527 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), 1527 EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
1528 EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), 1528 EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
1529 EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), 1529 EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
1530 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), 1530 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
1531 EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), 1531 EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
1532 EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), 1532 EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
1533 EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), 1533 EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
1534 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), 1534 EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
1535 EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), 1535 EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
1536 EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), 1536 EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
1537 EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), 1537 EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
1538 EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), 1538 EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
1539 EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), 1539 EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
1540 EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), 1540 EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
1541 EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), 1541 EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
1542}; 1542};
1543 1543
1544/* pin banks of exynos5433 pin-controller - TOUCH */ 1544/* pin banks of exynos5433 pin-controller - TOUCH */
1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { 1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
1546 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), 1546 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1547}; 1547};
1548 1548
1549/* 1549/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index a473092fb8d2..cd046eb7d705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -79,17 +79,6 @@
79 .name = id \ 79 .name = id \
80 } 80 }
81 81
82#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
83 { \
84 .type = &bank_type_alive, \
85 .pctl_offset = reg, \
86 .nr_pins = pins, \
87 .eint_type = EINT_TYPE_WKUP, \
88 .eint_offset = offs, \
89 .name = id, \
90 .pctl_res_idx = pctl_idx, \
91 } \
92
93#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ 82#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
94 { \ 83 { \
95 .type = &exynos5433_bank_type_off, \ 84 .type = &exynos5433_bank_type_off, \
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 053088b9b66e..c1527cb645be 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
36 .clk_rate = 19200000, 36 .clk_rate = 19200000,
37 .npwm = 4, 37 .npwm = 4,
38 .base_unit_bits = 22, 38 .base_unit_bits = 22,
39 .bypass = true,
40};
41
42/* Tangier */
43static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
44 .clk_rate = 19200000,
45 .npwm = 4,
46 .base_unit_bits = 22,
39}; 47};
40 48
41static int pwm_lpss_probe_pci(struct pci_dev *pdev, 49static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
97 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, 105 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
98 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, 106 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
99 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, 107 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
100 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, 108 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
101 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, 109 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
102 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, 110 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
103 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, 111 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index b22b6fdadb9a..5d6ed1507d29 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
37 .clk_rate = 19200000, 37 .clk_rate = 19200000,
38 .npwm = 4, 38 .npwm = 4,
39 .base_unit_bits = 22, 39 .base_unit_bits = 22,
40 .bypass = true,
40}; 41};
41 42
42static int pwm_lpss_probe_platform(struct platform_device *pdev) 43static int pwm_lpss_probe_platform(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 689d2c1cbead..8db0d40ccacd 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); 57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
58} 58}
59 59
60static int pwm_lpss_update(struct pwm_device *pwm) 60static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
61{ 61{
62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); 62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; 63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
65 u32 val; 65 u32 val;
66 int err; 66 int err;
67 67
68 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
69
70 /* 68 /*
71 * PWM Configuration register has SW_UPDATE bit that is set when a new 69 * PWM Configuration register has SW_UPDATE bit that is set when a new
72 * configuration is written to the register. The bit is automatically 70 * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
122 pwm_lpss_write(pwm, ctrl); 120 pwm_lpss_write(pwm, ctrl);
123} 121}
124 122
123static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
124{
125 if (cond)
126 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
127}
128
125static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, 129static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
126 struct pwm_state *state) 130 struct pwm_state *state)
127{ 131{
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
137 return ret; 141 return ret;
138 } 142 }
139 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 143 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
140 ret = pwm_lpss_update(pwm); 144 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
145 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
146 ret = pwm_lpss_wait_for_update(pwm);
141 if (ret) { 147 if (ret) {
142 pm_runtime_put(chip->dev); 148 pm_runtime_put(chip->dev);
143 return ret; 149 return ret;
144 } 150 }
145 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); 151 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
146 } else { 152 } else {
147 ret = pwm_lpss_is_updating(pwm); 153 ret = pwm_lpss_is_updating(pwm);
148 if (ret) 154 if (ret)
149 return ret; 155 return ret;
150 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 156 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
151 return pwm_lpss_update(pwm); 157 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
158 return pwm_lpss_wait_for_update(pwm);
152 } 159 }
153 } else if (pwm_is_enabled(pwm)) { 160 } else if (pwm_is_enabled(pwm)) {
154 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); 161 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index c94cd7c2695d..98306bb02cfe 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
22 unsigned long clk_rate; 22 unsigned long clk_rate;
23 unsigned int npwm; 23 unsigned int npwm;
24 unsigned long base_unit_bits; 24 unsigned long base_unit_bits;
25 bool bypass;
25}; 26};
26 27
27struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, 28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1f7336..744d56197286 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
191 return 0; 191 return 0;
192} 192}
193 193
194static int rockchip_pwm_enable(struct pwm_chip *chip,
195 struct pwm_device *pwm,
196 bool enable,
197 enum pwm_polarity polarity)
198{
199 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
200 int ret;
201
202 if (enable) {
203 ret = clk_enable(pc->clk);
204 if (ret)
205 return ret;
206 }
207
208 pc->data->set_enable(chip, pwm, enable, polarity);
209
210 if (!enable)
211 clk_disable(pc->clk);
212
213 return 0;
214}
215
194static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, 216static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
195 struct pwm_state *state) 217 struct pwm_state *state)
196{ 218{
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
207 return ret; 229 return ret;
208 230
209 if (state->polarity != curstate.polarity && enabled) { 231 if (state->polarity != curstate.polarity && enabled) {
210 pc->data->set_enable(chip, pwm, false, state->polarity); 232 ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
233 if (ret)
234 goto out;
211 enabled = false; 235 enabled = false;
212 } 236 }
213 237
214 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); 238 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
215 if (ret) { 239 if (ret) {
216 if (enabled != curstate.enabled) 240 if (enabled != curstate.enabled)
217 pc->data->set_enable(chip, pwm, !enabled, 241 rockchip_pwm_enable(chip, pwm, !enabled,
218 state->polarity); 242 state->polarity);
219
220 goto out; 243 goto out;
221 } 244 }
222 245
223 if (state->enabled != enabled) 246 if (state->enabled != enabled) {
224 pc->data->set_enable(chip, pwm, state->enabled, 247 ret = rockchip_pwm_enable(chip, pwm, state->enabled,
225 state->polarity); 248 state->polarity);
249 if (ret)
250 goto out;
251 }
226 252
227 /* 253 /*
228 * Update the state with the real hardware, which can differ a bit 254 * Update the state with the real hardware, which can differ a bit
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index f1e5e65388bb..cd739d2fa160 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
275} 275}
276EXPORT_SYMBOL_GPL(reset_control_status); 276EXPORT_SYMBOL_GPL(reset_control_status);
277 277
278static struct reset_control *__reset_control_get( 278static struct reset_control *__reset_control_get_internal(
279 struct reset_controller_dev *rcdev, 279 struct reset_controller_dev *rcdev,
280 unsigned int index, bool shared) 280 unsigned int index, bool shared)
281{ 281{
@@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
308 return rstc; 308 return rstc;
309} 309}
310 310
311static void __reset_control_put(struct reset_control *rstc) 311static void __reset_control_put_internal(struct reset_control *rstc)
312{ 312{
313 lockdep_assert_held(&reset_list_mutex); 313 lockdep_assert_held(&reset_list_mutex);
314 314
@@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
377 } 377 }
378 378
379 /* reset_list_mutex also protects the rcdev's reset_control list */ 379 /* reset_list_mutex also protects the rcdev's reset_control list */
380 rstc = __reset_control_get(rcdev, rstc_id, shared); 380 rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
381 381
382 mutex_unlock(&reset_list_mutex); 382 mutex_unlock(&reset_list_mutex);
383 383
@@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
385} 385}
386EXPORT_SYMBOL_GPL(__of_reset_control_get); 386EXPORT_SYMBOL_GPL(__of_reset_control_get);
387 387
388struct reset_control *__reset_control_get(struct device *dev, const char *id,
389 int index, bool shared, bool optional)
390{
391 if (dev->of_node)
392 return __of_reset_control_get(dev->of_node, id, index, shared,
393 optional);
394
395 return optional ? NULL : ERR_PTR(-EINVAL);
396}
397EXPORT_SYMBOL_GPL(__reset_control_get);
398
388/** 399/**
389 * reset_control_put - free the reset controller 400 * reset_control_put - free the reset controller
390 * @rstc: reset controller 401 * @rstc: reset controller
@@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
396 return; 407 return;
397 408
398 mutex_lock(&reset_list_mutex); 409 mutex_lock(&reset_list_mutex);
399 __reset_control_put(rstc); 410 __reset_control_put_internal(rstc);
400 mutex_unlock(&reset_list_mutex); 411 mutex_unlock(&reset_list_mutex);
401} 412}
402EXPORT_SYMBOL_GPL(reset_control_put); 413EXPORT_SYMBOL_GPL(reset_control_put);
@@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
417 if (!ptr) 428 if (!ptr)
418 return ERR_PTR(-ENOMEM); 429 return ERR_PTR(-ENOMEM);
419 430
420 rstc = __of_reset_control_get(dev ? dev->of_node : NULL, 431 rstc = __reset_control_get(dev, id, index, shared, optional);
421 id, index, shared, optional);
422 if (!IS_ERR(rstc)) { 432 if (!IS_ERR(rstc)) {
423 *ptr = rstc; 433 *ptr = rstc;
424 devres_add(dev, ptr); 434 devres_add(dev, ptr);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d036a806f31c..d281492009fb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1690,9 +1690,6 @@ struct aac_dev
1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
1692 1692
1693#define aac_adapter_check_health(dev) \
1694 (dev)->a_ops.adapter_check_health(dev)
1695
1696#define aac_adapter_restart(dev, bled, reset_type) \ 1693#define aac_adapter_restart(dev, bled, reset_type) \
1697 ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) 1694 ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
1698 1695
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2615 return capacity; 2612 return capacity;
2616} 2613}
2617 2614
2615static inline int aac_adapter_check_health(struct aac_dev *dev)
2616{
2617 if (unlikely(pci_channel_offline(dev->pdev)))
2618 return -1;
2619
2620 return (dev)->a_ops.adapter_check_health(dev);
2621}
2622
2618/* SCp.phase values */ 2623/* SCp.phase values */
2619#define AAC_OWNER_MIDLEVEL 0x101 2624#define AAC_OWNER_MIDLEVEL 0x101
2620#define AAC_OWNER_LOWLEVEL 0x102 2625#define AAC_OWNER_LOWLEVEL 0x102
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index c8172f16cf33..1f4918355fdb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
1873 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1873 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1874 1874
1875 if (BlinkLED < 0) { 1875 if (BlinkLED < 0) {
1876 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); 1876 printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1877 aac->name, BlinkLED);
1877 goto out; 1878 goto out;
1878 } 1879 }
1879 1880
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b29afafc2885..5d5e272fd815 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6293 break; 6293 break;
6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6296 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6296 /*
6297 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6298 * so SCSI mid-layer and upper layers handle it accordingly.
6299 */
6300 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6301 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6297 break; 6302 break;
6298 case IPR_IOASC_BUS_WAS_RESET: 6303 case IPR_IOASC_BUS_WAS_RESET:
6299 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6304 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index ed58b9104f58..e10b91cc3c62 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
99 qedf_set_vlan_id(qedf, vid); 99 qedf_set_vlan_id(qedf, vid);
100 100
101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ 101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
102 complete(&qedf->fipvlan_compl); 102 if (!completion_done(&qedf->fipvlan_compl))
103 complete(&qedf->fipvlan_compl);
103 } 104 }
104} 105}
105 106
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 8e2a160490e6..cceddd995a4b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2803 atomic_set(&qedf->num_offloads, 0); 2803 atomic_set(&qedf->num_offloads, 0);
2804 qedf->stop_io_on_error = false; 2804 qedf->stop_io_on_error = false;
2805 pci_set_drvdata(pdev, qedf); 2805 pci_set_drvdata(pdev, qedf);
2806 init_completion(&qedf->fipvlan_compl);
2806 2807
2807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, 2808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
2808 "QLogic FastLinQ FCoE Module qedf %s, " 2809 "QLogic FastLinQ FCoE Module qedf %s, "
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3e7011757c82..83d61d2142e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1160,8 +1160,13 @@ static inline
1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1161{ 1161{
1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1163 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1163 1164
1164 return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT); 1165 if (IS_P3P_TYPE(ha))
1166 return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1167 else
1168 return ((RD_REG_DWORD(&reg->host_status)) ==
1169 ISP_REG_DISCONNECT);
1165} 1170}
1166 1171
1167/************************************************************************** 1172/**************************************************************************
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fcfeddc79331..35ad5e8a31ab 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2102 2102
2103#define READ_CAPACITY_RETRIES_ON_RESET 10 2103#define READ_CAPACITY_RETRIES_ON_RESET 10
2104 2104
2105/*
2106 * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
2107 * and the reported logical block size is bigger than 512 bytes. Note
2108 * that last_sector is a u64 and therefore logical_to_sectors() is not
2109 * applicable.
2110 */
2111static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
2112{
2113 u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
2114
2115 if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
2116 return false;
2117
2118 return true;
2119}
2120
2105static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2121static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2106 unsigned char *buffer) 2122 unsigned char *buffer)
2107{ 2123{
@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2167 return -ENODEV; 2183 return -ENODEV;
2168 } 2184 }
2169 2185
2170 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { 2186 if (!sd_addressable_capacity(lba, sector_size)) {
2171 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2187 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2172 "kernel compiled with support for large block " 2188 "kernel compiled with support for large block "
2173 "devices.\n"); 2189 "devices.\n");
@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2256 return sector_size; 2272 return sector_size;
2257 } 2273 }
2258 2274
2259 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { 2275 if (!sd_addressable_capacity(lba, sector_size)) {
2260 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2276 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2261 "kernel compiled with support for large block " 2277 "kernel compiled with support for large block "
2262 "devices.\n"); 2278 "devices.\n");
@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
2956 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 2972 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2957 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 2973 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2958 } else 2974 } else
2959 rw_max = BLK_DEF_MAX_SECTORS; 2975 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
2976 (sector_t)BLK_DEF_MAX_SECTORS);
2960 2977
2961 /* Combine with controller limits */ 2978 /* Combine with controller limits */
2962 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2979 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0b29b9329b1c..a8f630213a1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
836 unsigned char *buffer; 836 unsigned char *buffer;
837 struct scsi_mode_data data; 837 struct scsi_mode_data data;
838 struct scsi_sense_hdr sshdr; 838 struct scsi_sense_hdr sshdr;
839 unsigned int ms_len = 128;
839 int rc, n; 840 int rc, n;
840 841
841 static const char *loadmech[] = 842 static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
862 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); 863 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
863 864
864 /* ask for mode page 0x2a */ 865 /* ask for mode page 0x2a */
865 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 866 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
866 SR_TIMEOUT, 3, &data, NULL); 867 SR_TIMEOUT, 3, &data, NULL);
867 868
868 if (!scsi_status_is_good(rc)) { 869 if (!scsi_status_is_good(rc) || data.length > ms_len ||
870 data.header_length + data.block_descriptor_length > data.length) {
869 /* failed, drive doesn't have capabilities mode page */ 871 /* failed, drive doesn't have capabilities mode page */
870 cd->cdi.speed = 1; 872 cd->cdi.speed = 1;
871 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | 873 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index a91802432f2f..e3f9ed3690b7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
485 485
486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
487{ 487{
488 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 488 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
489 return 0;
490} 489}
491EXPORT_SYMBOL(iscsit_queue_rsp); 490EXPORT_SYMBOL(iscsit_queue_rsp);
492 491
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bf40f03755dd..344e8448869c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
1398static int lio_queue_data_in(struct se_cmd *se_cmd) 1398static int lio_queue_data_in(struct se_cmd *se_cmd)
1399{ 1399{
1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1401 struct iscsi_conn *conn = cmd->conn;
1401 1402
1402 cmd->i_state = ISTATE_SEND_DATAIN; 1403 cmd->i_state = ISTATE_SEND_DATAIN;
1403 cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); 1404 return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
1404
1405 return 0;
1406} 1405}
1407 1406
1408static int lio_write_pending(struct se_cmd *se_cmd) 1407static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
1431static int lio_queue_status(struct se_cmd *se_cmd) 1430static int lio_queue_status(struct se_cmd *se_cmd)
1432{ 1431{
1433 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1432 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1433 struct iscsi_conn *conn = cmd->conn;
1434 1434
1435 cmd->i_state = ISTATE_SEND_STATUS; 1435 cmd->i_state = ISTATE_SEND_STATUS;
1436 1436
1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) { 1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1438 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1438 return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1439 return 0;
1440 } 1439 }
1441 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1440 return conn->conn_transport->iscsit_queue_status(conn, cmd);
1442
1443 return 0;
1444} 1441}
1445 1442
1446static void lio_queue_tm_rsp(struct se_cmd *se_cmd) 1443static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e65bf78ceef3..fce627628200 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param); 783 SET_PSTATE_REPLY_OPTIONAL(param);
784 /* 784 /*
785 * The GlobalSAN iSCSI Initiator for MacOSX does
786 * not respond to MaxBurstLength, FirstBurstLength,
787 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
788 * So, we set them to 'reply optional' here, and assume the
789 * the defaults from iscsi_parameters.h if the initiator
790 * is not RFC compliant and the keys are not negotiated.
791 */
792 if (!strcmp(param->name, MAXBURSTLENGTH))
793 SET_PSTATE_REPLY_OPTIONAL(param);
794 if (!strcmp(param->name, FIRSTBURSTLENGTH))
795 SET_PSTATE_REPLY_OPTIONAL(param);
796 if (!strcmp(param->name, DEFAULTTIME2WAIT))
797 SET_PSTATE_REPLY_OPTIONAL(param);
798 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
799 SET_PSTATE_REPLY_OPTIONAL(param);
800 /*
801 * Required for gPXE iSCSI boot client 785 * Required for gPXE iSCSI boot client
802 */ 786 */
803 if (!strcmp(param->name, MAXCONNECTIONS)) 787 if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 5041a9c8bdcb..7d3e2fcc26a0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
567 } 567 }
568} 568}
569 569
570void iscsit_add_cmd_to_response_queue( 570int iscsit_add_cmd_to_response_queue(
571 struct iscsi_cmd *cmd, 571 struct iscsi_cmd *cmd,
572 struct iscsi_conn *conn, 572 struct iscsi_conn *conn,
573 u8 state) 573 u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
578 if (!qr) { 578 if (!qr) {
579 pr_err("Unable to allocate memory for" 579 pr_err("Unable to allocate memory for"
580 " struct iscsi_queue_req\n"); 580 " struct iscsi_queue_req\n");
581 return; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&qr->qr_list); 583 INIT_LIST_HEAD(&qr->qr_list);
584 qr->cmd = cmd; 584 qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
590 spin_unlock_bh(&conn->response_queue_lock); 590 spin_unlock_bh(&conn->response_queue_lock);
591 591
592 wake_up(&conn->queues_wq); 592 wake_up(&conn->queues_wq);
593 return 0;
593} 594}
594 595
595struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 596struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
737{ 738{
738 struct se_cmd *se_cmd = NULL; 739 struct se_cmd *se_cmd = NULL;
739 int rc; 740 int rc;
741 bool op_scsi = false;
740 /* 742 /*
741 * Determine if a struct se_cmd is associated with 743 * Determine if a struct se_cmd is associated with
742 * this struct iscsi_cmd. 744 * this struct iscsi_cmd.
743 */ 745 */
744 switch (cmd->iscsi_opcode) { 746 switch (cmd->iscsi_opcode) {
745 case ISCSI_OP_SCSI_CMD: 747 case ISCSI_OP_SCSI_CMD:
746 se_cmd = &cmd->se_cmd; 748 op_scsi = true;
747 __iscsit_free_cmd(cmd, true, shutdown);
748 /* 749 /*
749 * Fallthrough 750 * Fallthrough
750 */ 751 */
751 case ISCSI_OP_SCSI_TMFUNC: 752 case ISCSI_OP_SCSI_TMFUNC:
752 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 753 se_cmd = &cmd->se_cmd;
753 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 754 __iscsit_free_cmd(cmd, op_scsi, shutdown);
754 __iscsit_free_cmd(cmd, true, shutdown); 755 rc = transport_generic_free_cmd(se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, op_scsi, shutdown);
755 target_put_sess_cmd(se_cmd); 758 target_put_sess_cmd(se_cmd);
756 } 759 }
757 break; 760 break;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 8ff08856516a..9e4197af8708 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
31 struct iscsi_conn_recovery **, itt_t); 31 struct iscsi_conn_recovery **, itt_t);
32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); 33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
34extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 34extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fd7c16a7ca6e..fc4a9c303d55 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
197 /* 197 /*
198 * Set the ASYMMETRIC ACCESS State 198 * Set the ASYMMETRIC ACCESS State
199 */ 199 */
200 buf[off++] |= (atomic_read( 200 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
201 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
202 /* 201 /*
203 * Set supported ASYMMETRIC ACCESS State bits 202 * Set supported ASYMMETRIC ACCESS State bits
204 */ 203 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
710 709
711 spin_lock(&lun->lun_tg_pt_gp_lock); 710 spin_lock(&lun->lun_tg_pt_gp_lock);
712 tg_pt_gp = lun->lun_tg_pt_gp; 711 tg_pt_gp = lun->lun_tg_pt_gp;
713 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 712 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
714 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 713 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
715 714
716 // XXX: keeps using tg_pt_gp witout reference after unlock 715 // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
911} 910}
912 911
913/* 912/*
914 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 913 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
915 */ 914 */
916static int core_alua_update_tpg_primary_metadata( 915static int core_alua_update_tpg_primary_metadata(
917 struct t10_alua_tg_pt_gp *tg_pt_gp) 916 struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
934 "alua_access_state=0x%02x\n" 933 "alua_access_state=0x%02x\n"
935 "alua_access_status=0x%02x\n", 934 "alua_access_status=0x%02x\n",
936 tg_pt_gp->tg_pt_gp_id, 935 tg_pt_gp->tg_pt_gp_id,
937 tg_pt_gp->tg_pt_gp_alua_pending_state, 936 tg_pt_gp->tg_pt_gp_alua_access_state,
938 tg_pt_gp->tg_pt_gp_alua_access_status); 937 tg_pt_gp->tg_pt_gp_alua_access_status);
939 938
940 snprintf(path, ALUA_METADATA_PATH_LEN, 939 snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1012 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1014} 1013}
1015 1014
1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1017{
1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1023
1024 /*
1025 * Update the ALUA metadata buf that has been allocated in
1026 * core_alua_do_port_transition(), this metadata will be written
1027 * to struct file.
1028 *
1029 * Note that there is the case where we do not want to update the
1030 * metadata when the saved metadata is being parsed in userspace
1031 * when setting the existing port access state and access status.
1032 *
1033 * Also note that the failure to write out the ALUA metadata to
1034 * struct file does NOT affect the actual ALUA transition.
1035 */
1036 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1037 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1038 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1039 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1040 }
1041 /*
1042 * Set the current primary ALUA access state to the requested new state
1043 */
1044 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1045 tg_pt_gp->tg_pt_gp_alua_pending_state);
1046
1047 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1048 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1049 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1050 tg_pt_gp->tg_pt_gp_id,
1051 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1052 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1053
1054 core_alua_queue_state_change_ua(tg_pt_gp);
1055
1056 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1057 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1059
1060 if (tg_pt_gp->tg_pt_gp_transition_complete)
1061 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1062}
1063
1064static int core_alua_do_transition_tg_pt( 1015static int core_alua_do_transition_tg_pt(
1065 struct t10_alua_tg_pt_gp *tg_pt_gp, 1016 struct t10_alua_tg_pt_gp *tg_pt_gp,
1066 int new_state, 1017 int new_state,
1067 int explicit) 1018 int explicit)
1068{ 1019{
1069 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 int prev_state;
1070 DECLARE_COMPLETION_ONSTACK(wait);
1071 1021
1022 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1072 /* Nothing to be done here */ 1023 /* Nothing to be done here */
1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1024 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1025 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1074 return 0; 1026 return 0;
1027 }
1075 1028
1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) 1029 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1030 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1077 return -EAGAIN; 1031 return -EAGAIN;
1078 1032 }
1079 /*
1080 * Flush any pending transitions
1081 */
1082 if (!explicit)
1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1084 1033
1085 /* 1034 /*
1086 * Save the old primary ALUA access state, and set the current state 1035 * Save the old primary ALUA access state, and set the current state
1087 * to ALUA_ACCESS_STATE_TRANSITION. 1036 * to ALUA_ACCESS_STATE_TRANSITION.
1088 */ 1037 */
1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1038 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1090 ALUA_ACCESS_STATE_TRANSITION); 1039 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1040 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1092 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1041 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1093 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1042 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1094 1043
1095 core_alua_queue_state_change_ua(tg_pt_gp); 1044 core_alua_queue_state_change_ua(tg_pt_gp);
1096 1045
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1046 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1047 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1098 return 0; 1048 return 0;
1099 1049 }
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103 1050
1104 /* 1051 /*
1105 * Check for the optional ALUA primary state transition delay 1052 * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
1108 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1055 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1109 1056
1110 /* 1057 /*
1111 * Take a reference for workqueue item 1058 * Set the current primary ALUA access state to the requested new state
1112 */ 1059 */
1113 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1116 1061
1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); 1062 /*
1118 if (explicit) { 1063 * Update the ALUA metadata buf that has been allocated in
1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1064 * core_alua_do_port_transition(), this metadata will be written
1120 wait_for_completion(&wait); 1065 * to struct file.
1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1066 *
1067 * Note that there is the case where we do not want to update the
1068 * metadata when the saved metadata is being parsed in userspace
1069 * when setting the existing port access state and access status.
1070 *
1071 * Also note that the failure to write out the ALUA metadata to
1072 * struct file does NOT affect the actual ALUA transition.
1073 */
1074 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1075 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1122 } 1076 }
1123 1077
1078 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1079 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1080 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1081 tg_pt_gp->tg_pt_gp_id,
1082 core_alua_dump_state(prev_state),
1083 core_alua_dump_state(new_state));
1084
1085 core_alua_queue_state_change_ua(tg_pt_gp);
1086
1087 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1124 return 0; 1088 return 0;
1125} 1089}
1126 1090
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1685 } 1649 }
1686 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1650 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1687 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1651 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1652 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1653 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1654 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1692 core_alua_do_transition_tg_pt_work);
1693 tg_pt_gp->tg_pt_gp_dev = dev; 1655 tg_pt_gp->tg_pt_gp_dev = dev;
1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1656 tg_pt_gp->tg_pt_gp_alua_access_state =
1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1657 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1696 /* 1658 /*
1697 * Enable both explicit and implicit ALUA support by default 1659 * Enable both explicit and implicit ALUA support by default
1698 */ 1660 */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
1797 dev->t10_alua.alua_tg_pt_gps_counter--; 1759 dev->t10_alua.alua_tg_pt_gps_counter--;
1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1760 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1799 1761
1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1801
1802 /* 1762 /*
1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1763 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1804 * core_alua_get_tg_pt_gp_by_name() in 1764 * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1938 "Primary Access Status: %s\nTG Port Secondary Access" 1898 "Primary Access Status: %s\nTG Port Secondary Access"
1939 " State: %s\nTG Port Secondary Access Status: %s\n", 1899 " State: %s\nTG Port Secondary Access Status: %s\n",
1940 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1900 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1941 core_alua_dump_state(atomic_read( 1901 core_alua_dump_state(
1942 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1902 tg_pt_gp->tg_pt_gp_alua_access_state),
1943 core_alua_dump_status( 1903 core_alua_dump_status(
1944 tg_pt_gp->tg_pt_gp_alua_access_status), 1904 tg_pt_gp->tg_pt_gp_alua_access_status),
1945 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1905 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 38b5025e4c7a..70657fd56440 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2392 char *page) 2392 char *page)
2393{ 2393{
2394 return sprintf(page, "%d\n", 2394 return sprintf(page, "%d\n",
2395 atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); 2395 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2396} 2396}
2397 2397
2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d8a16ca6baa5..d1e6cab8e3d3 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
92 pr_err("Source se_lun->lun_se_dev does not exist\n"); 92 pr_err("Source se_lun->lun_se_dev does not exist\n");
93 return -EINVAL; 93 return -EINVAL;
94 } 94 }
95 if (lun->lun_shutdown) {
96 pr_err("Unable to create mappedlun symlink because"
97 " lun->lun_shutdown=true\n");
98 return -EINVAL;
99 }
95 se_tpg = lun->lun_tpg; 100 se_tpg = lun->lun_tpg;
96 101
97 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 102 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 6fb191914f45..dfaef4d3b2d2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
642 */ 642 */
643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
644 644
645 lun->lun_shutdown = true;
646
645 core_clear_lun_from_tpg(lun, tpg); 647 core_clear_lun_from_tpg(lun, tpg);
646 /* 648 /*
647 * Wait for any active I/O references to percpu se_lun->lun_ref to 649 * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
663 } 665 }
664 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 666 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
665 hlist_del_rcu(&lun->link); 667 hlist_del_rcu(&lun->link);
668
669 lun->lun_shutdown = false;
666 mutex_unlock(&tpg->tpg_lun_mutex); 670 mutex_unlock(&tpg->tpg_lun_mutex);
667 671
668 percpu_ref_exit(&lun->lun_ref); 672 percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b1a3cdb29468..a0cd56ee5fe9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache; 64struct kmem_cache *t10_alua_lba_map_mem_cache;
65 65
66static void transport_complete_task_attr(struct se_cmd *cmd); 66static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
67static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev); 69 struct se_device *dev, int err, bool write_pending);
69static int transport_put_cmd(struct se_cmd *cmd); 70static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work); 71static void target_complete_ok_work(struct work_struct *work);
71 72
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
804 805
805 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 806 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
806 transport_write_pending_qf(cmd); 807 transport_write_pending_qf(cmd);
807 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 808 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
809 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
808 transport_complete_qf(cmd); 810 transport_complete_qf(cmd);
809 } 811 }
810} 812}
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1719 } 1721 }
1720 trace_target_cmd_complete(cmd); 1722 trace_target_cmd_complete(cmd);
1721 ret = cmd->se_tfo->queue_status(cmd); 1723 ret = cmd->se_tfo->queue_status(cmd);
1722 if (ret == -EAGAIN || ret == -ENOMEM) 1724 if (ret)
1723 goto queue_full; 1725 goto queue_full;
1724 goto check_stop; 1726 goto check_stop;
1725 default: 1727 default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730 } 1732 }
1731 1733
1732 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1734 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1733 if (ret == -EAGAIN || ret == -ENOMEM) 1735 if (ret)
1734 goto queue_full; 1736 goto queue_full;
1735 1737
1736check_stop: 1738check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
1739 return; 1741 return;
1740 1742
1741queue_full: 1743queue_full:
1742 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1744 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1743 transport_handle_queue_full(cmd, cmd->se_dev);
1744} 1745}
1745EXPORT_SYMBOL(transport_generic_request_failure); 1746EXPORT_SYMBOL(transport_generic_request_failure);
1746 1747
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
1977 int ret = 0; 1978 int ret = 0;
1978 1979
1979 transport_complete_task_attr(cmd); 1980 transport_complete_task_attr(cmd);
1981 /*
1982 * If a fabric driver ->write_pending() or ->queue_data_in() callback
1983 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
1984 * the same callbacks should not be retried. Return CHECK_CONDITION
1985 * if a scsi_status is not already set.
1986 *
1987 * If a fabric driver ->queue_status() has returned non zero, always
1988 * keep retrying no matter what..
1989 */
1990 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
1991 if (cmd->scsi_status)
1992 goto queue_status;
1980 1993
1981 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1994 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
1982 trace_target_cmd_complete(cmd); 1995 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1983 ret = cmd->se_tfo->queue_status(cmd); 1996 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
1984 goto out; 1997 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
1998 goto queue_status;
1985 } 1999 }
1986 2000
2001 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2002 goto queue_status;
2003
1987 switch (cmd->data_direction) { 2004 switch (cmd->data_direction) {
1988 case DMA_FROM_DEVICE: 2005 case DMA_FROM_DEVICE:
1989 if (cmd->scsi_status) 2006 if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
2007 break; 2024 break;
2008 } 2025 }
2009 2026
2010out:
2011 if (ret < 0) { 2027 if (ret < 0) {
2012 transport_handle_queue_full(cmd, cmd->se_dev); 2028 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2013 return; 2029 return;
2014 } 2030 }
2015 transport_lun_remove_cmd(cmd); 2031 transport_lun_remove_cmd(cmd);
2016 transport_cmd_check_stop_to_fabric(cmd); 2032 transport_cmd_check_stop_to_fabric(cmd);
2017} 2033}
2018 2034
2019static void transport_handle_queue_full( 2035static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2020 struct se_cmd *cmd, 2036 int err, bool write_pending)
2021 struct se_device *dev)
2022{ 2037{
2038 /*
2039 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2040 * ->queue_data_in() callbacks from new process context.
2041 *
2042 * Otherwise for other errors, transport_complete_qf() will send
2043 * CHECK_CONDITION via ->queue_status() instead of attempting to
2044 * retry associated fabric driver data-transfer callbacks.
2045 */
2046 if (err == -EAGAIN || err == -ENOMEM) {
2047 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2048 TRANSPORT_COMPLETE_QF_OK;
2049 } else {
2050 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2051 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2052 }
2053
2023 spin_lock_irq(&dev->qf_cmd_lock); 2054 spin_lock_irq(&dev->qf_cmd_lock);
2024 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2055 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2025 atomic_inc_mb(&dev->dev_qf_count); 2056 atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
2083 WARN_ON(!cmd->scsi_status); 2114 WARN_ON(!cmd->scsi_status);
2084 ret = transport_send_check_condition_and_sense( 2115 ret = transport_send_check_condition_and_sense(
2085 cmd, 0, 1); 2116 cmd, 0, 1);
2086 if (ret == -EAGAIN || ret == -ENOMEM) 2117 if (ret)
2087 goto queue_full; 2118 goto queue_full;
2088 2119
2089 transport_lun_remove_cmd(cmd); 2120 transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
2109 } else if (rc) { 2140 } else if (rc) {
2110 ret = transport_send_check_condition_and_sense(cmd, 2141 ret = transport_send_check_condition_and_sense(cmd,
2111 rc, 0); 2142 rc, 0);
2112 if (ret == -EAGAIN || ret == -ENOMEM) 2143 if (ret)
2113 goto queue_full; 2144 goto queue_full;
2114 2145
2115 transport_lun_remove_cmd(cmd); 2146 transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
2134 if (target_read_prot_action(cmd)) { 2165 if (target_read_prot_action(cmd)) {
2135 ret = transport_send_check_condition_and_sense(cmd, 2166 ret = transport_send_check_condition_and_sense(cmd,
2136 cmd->pi_err, 0); 2167 cmd->pi_err, 0);
2137 if (ret == -EAGAIN || ret == -ENOMEM) 2168 if (ret)
2138 goto queue_full; 2169 goto queue_full;
2139 2170
2140 transport_lun_remove_cmd(cmd); 2171 transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
2144 2175
2145 trace_target_cmd_complete(cmd); 2176 trace_target_cmd_complete(cmd);
2146 ret = cmd->se_tfo->queue_data_in(cmd); 2177 ret = cmd->se_tfo->queue_data_in(cmd);
2147 if (ret == -EAGAIN || ret == -ENOMEM) 2178 if (ret)
2148 goto queue_full; 2179 goto queue_full;
2149 break; 2180 break;
2150 case DMA_TO_DEVICE: 2181 case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
2157 atomic_long_add(cmd->data_length, 2188 atomic_long_add(cmd->data_length,
2158 &cmd->se_lun->lun_stats.tx_data_octets); 2189 &cmd->se_lun->lun_stats.tx_data_octets);
2159 ret = cmd->se_tfo->queue_data_in(cmd); 2190 ret = cmd->se_tfo->queue_data_in(cmd);
2160 if (ret == -EAGAIN || ret == -ENOMEM) 2191 if (ret)
2161 goto queue_full; 2192 goto queue_full;
2162 break; 2193 break;
2163 } 2194 }
@@ -2166,7 +2197,7 @@ queue_rsp:
2166queue_status: 2197queue_status:
2167 trace_target_cmd_complete(cmd); 2198 trace_target_cmd_complete(cmd);
2168 ret = cmd->se_tfo->queue_status(cmd); 2199 ret = cmd->se_tfo->queue_status(cmd);
2169 if (ret == -EAGAIN || ret == -ENOMEM) 2200 if (ret)
2170 goto queue_full; 2201 goto queue_full;
2171 break; 2202 break;
2172 default: 2203 default:
@@ -2180,8 +2211,8 @@ queue_status:
2180queue_full: 2211queue_full:
2181 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2212 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2182 " data_direction: %d\n", cmd, cmd->data_direction); 2213 " data_direction: %d\n", cmd, cmd->data_direction);
2183 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2214
2184 transport_handle_queue_full(cmd, cmd->se_dev); 2215 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2185} 2216}
2186 2217
2187void target_free_sgl(struct scatterlist *sgl, int nents) 2218void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2449 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2480 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2450 2481
2451 ret = cmd->se_tfo->write_pending(cmd); 2482 ret = cmd->se_tfo->write_pending(cmd);
2452 if (ret == -EAGAIN || ret == -ENOMEM) 2483 if (ret)
2453 goto queue_full; 2484 goto queue_full;
2454 2485
2455 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2486 return 0;
2456 WARN_ON(ret);
2457
2458 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2459 2487
2460queue_full: 2488queue_full:
2461 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2489 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2462 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2490 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2463 transport_handle_queue_full(cmd, cmd->se_dev);
2464 return 0; 2491 return 0;
2465} 2492}
2466EXPORT_SYMBOL(transport_generic_new_cmd); 2493EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2470 int ret; 2497 int ret;
2471 2498
2472 ret = cmd->se_tfo->write_pending(cmd); 2499 ret = cmd->se_tfo->write_pending(cmd);
2473 if (ret == -EAGAIN || ret == -ENOMEM) { 2500 if (ret) {
2474 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2501 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2475 cmd); 2502 cmd);
2476 transport_handle_queue_full(cmd, cmd->se_dev); 2503 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2477 } 2504 }
2478} 2505}
2479 2506
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3011 __releases(&cmd->t_state_lock) 3038 __releases(&cmd->t_state_lock)
3012 __acquires(&cmd->t_state_lock) 3039 __acquires(&cmd->t_state_lock)
3013{ 3040{
3041 int ret;
3042
3014 assert_spin_locked(&cmd->t_state_lock); 3043 assert_spin_locked(&cmd->t_state_lock);
3015 WARN_ON_ONCE(!irqs_disabled()); 3044 WARN_ON_ONCE(!irqs_disabled());
3016 3045
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3034 trace_target_cmd_complete(cmd); 3063 trace_target_cmd_complete(cmd);
3035 3064
3036 spin_unlock_irq(&cmd->t_state_lock); 3065 spin_unlock_irq(&cmd->t_state_lock);
3037 cmd->se_tfo->queue_status(cmd); 3066 ret = cmd->se_tfo->queue_status(cmd);
3067 if (ret)
3068 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3038 spin_lock_irq(&cmd->t_state_lock); 3069 spin_lock_irq(&cmd->t_state_lock);
3039 3070
3040 return 1; 3071 return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
3055void transport_send_task_abort(struct se_cmd *cmd) 3086void transport_send_task_abort(struct se_cmd *cmd)
3056{ 3087{
3057 unsigned long flags; 3088 unsigned long flags;
3089 int ret;
3058 3090
3059 spin_lock_irqsave(&cmd->t_state_lock, flags); 3091 spin_lock_irqsave(&cmd->t_state_lock, flags);
3060 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3092 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
3090 cmd->t_task_cdb[0], cmd->tag); 3122 cmd->t_task_cdb[0], cmd->tag);
3091 3123
3092 trace_target_cmd_complete(cmd); 3124 trace_target_cmd_complete(cmd);
3093 cmd->se_tfo->queue_status(cmd); 3125 ret = cmd->se_tfo->queue_status(cmd);
3126 if (ret)
3127 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3094} 3128}
3095 3129
3096static void target_tmr_work(struct work_struct *work) 3130static void target_tmr_work(struct work_struct *work)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c6874c38a10b..f615c3bbb73e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
311 DATA_BLOCK_BITS); 311 DATA_BLOCK_BITS);
312} 312}
313 313
314static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, 314static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
315 struct scatterlist *data_sg, unsigned int data_nents) 315 bool bidi)
316{ 316{
317 struct se_cmd *se_cmd = cmd->se_cmd;
317 int i, block; 318 int i, block;
318 int block_remaining = 0; 319 int block_remaining = 0;
319 void *from, *to; 320 void *from, *to;
320 size_t copy_bytes, from_offset; 321 size_t copy_bytes, from_offset;
321 struct scatterlist *sg; 322 struct scatterlist *sg, *data_sg;
323 unsigned int data_nents;
324 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
325
326 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
327
328 if (!bidi) {
329 data_sg = se_cmd->t_data_sg;
330 data_nents = se_cmd->t_data_nents;
331 } else {
332 uint32_t count;
333
334 /*
335 * For bidi case, the first count blocks are for Data-Out
336 * buffer blocks, and before gathering the Data-In buffer
337 * the Data-Out buffer blocks should be discarded.
338 */
339 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
340 while (count--) {
341 block = find_first_bit(bitmap, DATA_BLOCK_BITS);
342 clear_bit(block, bitmap);
343 }
344
345 data_sg = se_cmd->t_bidi_data_sg;
346 data_nents = se_cmd->t_bidi_data_nents;
347 }
322 348
323 for_each_sg(data_sg, sg, data_nents, i) { 349 for_each_sg(data_sg, sg, data_nents, i) {
324 int sg_remaining = sg->length; 350 int sg_remaining = sg->length;
325 to = kmap_atomic(sg_page(sg)) + sg->offset; 351 to = kmap_atomic(sg_page(sg)) + sg->offset;
326 while (sg_remaining > 0) { 352 while (sg_remaining > 0) {
327 if (block_remaining == 0) { 353 if (block_remaining == 0) {
328 block = find_first_bit(cmd_bitmap, 354 block = find_first_bit(bitmap,
329 DATA_BLOCK_BITS); 355 DATA_BLOCK_BITS);
330 block_remaining = DATA_BLOCK_SIZE; 356 block_remaining = DATA_BLOCK_SIZE;
331 clear_bit(block, cmd_bitmap); 357 clear_bit(block, bitmap);
332 } 358 }
333 copy_bytes = min_t(size_t, sg_remaining, 359 copy_bytes = min_t(size_t, sg_remaining,
334 block_remaining); 360 block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
394 return true; 420 return true;
395} 421}
396 422
423static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
424{
425 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
426 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
427
428 if (se_cmd->se_cmd_flags & SCF_BIDI) {
429 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
430 data_length += round_up(se_cmd->t_bidi_data_sg->length,
431 DATA_BLOCK_SIZE);
432 }
433
434 return data_length;
435}
436
437static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
438{
439 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
440
441 return data_length / DATA_BLOCK_SIZE;
442}
443
397static sense_reason_t 444static sense_reason_t
398tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 445tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
399{ 446{
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
407 uint32_t cmd_head; 454 uint32_t cmd_head;
408 uint64_t cdb_off; 455 uint64_t cdb_off;
409 bool copy_to_data_area; 456 bool copy_to_data_area;
410 size_t data_length; 457 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
411 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); 458 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
412 459
413 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 460 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
421 * expensive to tell how many regions are freed in the bitmap 468 * expensive to tell how many regions are freed in the bitmap
422 */ 469 */
423 base_command_size = max(offsetof(struct tcmu_cmd_entry, 470 base_command_size = max(offsetof(struct tcmu_cmd_entry,
424 req.iov[se_cmd->t_bidi_data_nents + 471 req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
425 se_cmd->t_data_nents]),
426 sizeof(struct tcmu_cmd_entry)); 472 sizeof(struct tcmu_cmd_entry));
427 command_size = base_command_size 473 command_size = base_command_size
428 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 474 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
433 479
434 mb = udev->mb_addr; 480 mb = udev->mb_addr;
435 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 481 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
436 data_length = se_cmd->data_length;
437 if (se_cmd->se_cmd_flags & SCF_BIDI) {
438 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
439 data_length += se_cmd->t_bidi_data_sg->length;
440 }
441 if ((command_size > (udev->cmdr_size / 2)) || 482 if ((command_size > (udev->cmdr_size / 2)) ||
442 data_length > udev->data_size) { 483 data_length > udev->data_size) {
443 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 484 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
511 entry->req.iov_dif_cnt = 0; 552 entry->req.iov_dif_cnt = 0;
512 553
513 /* Handle BIDI commands */ 554 /* Handle BIDI commands */
514 iov_cnt = 0; 555 if (se_cmd->se_cmd_flags & SCF_BIDI) {
515 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 556 iov_cnt = 0;
516 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 557 iov++;
517 entry->req.iov_bidi_cnt = iov_cnt; 558 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
518 559 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
560 false);
561 entry->req.iov_bidi_cnt = iov_cnt;
562 }
519 /* cmd's data_bitmap is what changed in process */ 563 /* cmd's data_bitmap is what changed in process */
520 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, 564 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
521 DATA_BLOCK_BITS); 565 DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
592 se_cmd->scsi_sense_length); 636 se_cmd->scsi_sense_length);
593 free_data_area(udev, cmd); 637 free_data_area(udev, cmd);
594 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 638 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
595 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
596
597 /* Get Data-In buffer before clean up */ 639 /* Get Data-In buffer before clean up */
598 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); 640 gather_data_area(udev, cmd, true);
599 gather_data_area(udev, bitmap,
600 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
601 free_data_area(udev, cmd); 641 free_data_area(udev, cmd);
602 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 642 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
603 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); 643 gather_data_area(udev, cmd, false);
604
605 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
606 gather_data_area(udev, bitmap,
607 se_cmd->t_data_sg, se_cmd->t_data_nents);
608 free_data_area(udev, cmd); 644 free_data_area(udev, cmd);
609 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 645 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
610 free_data_area(udev, cmd); 646 free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
1196 if (ret < 0) 1232 if (ret < 0)
1197 return ret; 1233 return ret;
1198 1234
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC; 1235 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count; 1236 return count;
1206} 1237}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index b0500a0a87b8..e4603b09863a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -492,6 +492,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
492} 492}
493 493
494/** 494/**
495 * tty_ldisc_restore - helper for tty ldisc change
496 * @tty: tty to recover
497 * @old: previous ldisc
498 *
499 * Restore the previous line discipline or N_TTY when a line discipline
500 * change fails due to an open error
501 */
502
503static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
504{
505 struct tty_ldisc *new_ldisc;
506 int r;
507
508 /* There is an outstanding reference here so this is safe */
509 old = tty_ldisc_get(tty, old->ops->num);
510 WARN_ON(IS_ERR(old));
511 tty->ldisc = old;
512 tty_set_termios_ldisc(tty, old->ops->num);
513 if (tty_ldisc_open(tty, old) < 0) {
514 tty_ldisc_put(old);
515 /* This driver is always present */
516 new_ldisc = tty_ldisc_get(tty, N_TTY);
517 if (IS_ERR(new_ldisc))
518 panic("n_tty: get");
519 tty->ldisc = new_ldisc;
520 tty_set_termios_ldisc(tty, N_TTY);
521 r = tty_ldisc_open(tty, new_ldisc);
522 if (r < 0)
523 panic("Couldn't open N_TTY ldisc for "
524 "%s --- error %d.",
525 tty_name(tty), r);
526 }
527}
528
529/**
495 * tty_set_ldisc - set line discipline 530 * tty_set_ldisc - set line discipline
496 * @tty: the terminal to set 531 * @tty: the terminal to set
497 * @ldisc: the line discipline 532 * @ldisc: the line discipline
@@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
504 539
505int tty_set_ldisc(struct tty_struct *tty, int disc) 540int tty_set_ldisc(struct tty_struct *tty, int disc)
506{ 541{
507 int retval, old_disc; 542 int retval;
543 struct tty_ldisc *old_ldisc, *new_ldisc;
544
545 new_ldisc = tty_ldisc_get(tty, disc);
546 if (IS_ERR(new_ldisc))
547 return PTR_ERR(new_ldisc);
508 548
509 tty_lock(tty); 549 tty_lock(tty);
510 retval = tty_ldisc_lock(tty, 5 * HZ); 550 retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
517 } 557 }
518 558
519 /* Check the no-op case */ 559 /* Check the no-op case */
520 old_disc = tty->ldisc->ops->num; 560 if (tty->ldisc->ops->num == disc)
521 if (old_disc == disc)
522 goto out; 561 goto out;
523 562
524 if (test_bit(TTY_HUPPED, &tty->flags)) { 563 if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
527 goto out; 566 goto out;
528 } 567 }
529 568
530 retval = tty_ldisc_reinit(tty, disc); 569 old_ldisc = tty->ldisc;
570
571 /* Shutdown the old discipline. */
572 tty_ldisc_close(tty, old_ldisc);
573
574 /* Now set up the new line discipline. */
575 tty->ldisc = new_ldisc;
576 tty_set_termios_ldisc(tty, disc);
577
578 retval = tty_ldisc_open(tty, new_ldisc);
531 if (retval < 0) { 579 if (retval < 0) {
532 /* Back to the old one or N_TTY if we can't */ 580 /* Back to the old one or N_TTY if we can't */
533 if (tty_ldisc_reinit(tty, old_disc) < 0) { 581 tty_ldisc_put(new_ldisc);
534 pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); 582 tty_ldisc_restore(tty, old_ldisc);
535 if (tty_ldisc_reinit(tty, N_TTY) < 0) {
536 /* At this point we have tty->ldisc == NULL. */
537 pr_err("tty: reinitializing N_TTY failed\n");
538 }
539 }
540 } 583 }
541 584
542 if (tty->ldisc && tty->ldisc->ops->num != old_disc && 585 if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
543 tty->ops->set_ldisc) {
544 down_read(&tty->termios_rwsem); 586 down_read(&tty->termios_rwsem);
545 tty->ops->set_ldisc(tty); 587 tty->ops->set_ldisc(tty);
546 up_read(&tty->termios_rwsem); 588 up_read(&tty->termios_rwsem);
547 } 589 }
548 590
591 /* At this point we hold a reference to the new ldisc and a
592 reference to the old ldisc, or we hold two references to
593 the old ldisc (if it was restored as part of error cleanup
594 above). In either case, releasing a single reference from
595 the old ldisc is correct. */
596 new_ldisc = old_ldisc;
549out: 597out:
550 tty_ldisc_unlock(tty); 598 tty_ldisc_unlock(tty);
551 599
@@ -553,6 +601,7 @@ out:
553 already running */ 601 already running */
554 tty_buffer_restart_work(tty->port); 602 tty_buffer_restart_work(tty->port);
555err: 603err:
604 tty_ldisc_put(new_ldisc); /* drop the extra reference */
556 tty_unlock(tty); 605 tty_unlock(tty);
557 return retval; 606 return retval;
558} 607}
@@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
613 int retval; 662 int retval;
614 663
615 ld = tty_ldisc_get(tty, disc); 664 ld = tty_ldisc_get(tty, disc);
616 if (IS_ERR(ld)) 665 if (IS_ERR(ld)) {
666 BUG_ON(disc == N_TTY);
617 return PTR_ERR(ld); 667 return PTR_ERR(ld);
668 }
618 669
619 if (tty->ldisc) { 670 if (tty->ldisc) {
620 tty_ldisc_close(tty, tty->ldisc); 671 tty_ldisc_close(tty, tty->ldisc);
@@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
626 tty_set_termios_ldisc(tty, disc); 677 tty_set_termios_ldisc(tty, disc);
627 retval = tty_ldisc_open(tty, tty->ldisc); 678 retval = tty_ldisc_open(tty, tty->ldisc);
628 if (retval) { 679 if (retval) {
629 tty_ldisc_put(tty->ldisc); 680 if (!WARN_ON(disc == N_TTY)) {
630 tty->ldisc = NULL; 681 tty_ldisc_put(tty->ldisc);
682 tty->ldisc = NULL;
683 }
631 } 684 }
632 return retval; 685 return retval;
633} 686}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d2351139342f..a82e2bd5ea34 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
373 usb_ep_free_request(fu->ep_in, fu->bot_req_in); 373 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
374 usb_ep_free_request(fu->ep_out, fu->bot_req_out); 374 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
375 usb_ep_free_request(fu->ep_out, fu->cmd.req); 375 usb_ep_free_request(fu->ep_out, fu->cmd.req);
376 usb_ep_free_request(fu->ep_out, fu->bot_status.req); 376 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
377 377
378 kfree(fu->cmd.buf); 378 kfree(fu->cmd.buf);
379 379
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8c4dc1e1f94f..b827a8113e26 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/fb.h> 12#include <linux/fb.h>
13#include <linux/pci.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/screen_info.h> 15#include <linux/screen_info.h>
15#include <video/vga.h> 16#include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
143}; 144};
144ATTRIBUTE_GROUPS(efifb); 145ATTRIBUTE_GROUPS(efifb);
145 146
147static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
148
146static int efifb_probe(struct platform_device *dev) 149static int efifb_probe(struct platform_device *dev)
147{ 150{
148 struct fb_info *info; 151 struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
152 unsigned int size_total; 155 unsigned int size_total;
153 char *option = NULL; 156 char *option = NULL;
154 157
155 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 158 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
156 return -ENODEV; 159 return -ENODEV;
157 160
158 if (fb_get_options("efifb", &option)) 161 if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
360}; 363};
361 364
362builtin_platform_driver(efifb_driver); 365builtin_platform_driver(efifb_driver);
366
367#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
368
369static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
370
371static void claim_efifb_bar(struct pci_dev *dev, int idx)
372{
373 u16 word;
374
375 pci_bar_found = true;
376
377 pci_read_config_word(dev, PCI_COMMAND, &word);
378 if (!(word & PCI_COMMAND_MEMORY)) {
379 pci_dev_disabled = true;
380 dev_err(&dev->dev,
381 "BAR %d: assigned to efifb but device is disabled!\n",
382 idx);
383 return;
384 }
385
386 if (pci_claim_resource(dev, idx)) {
387 pci_dev_disabled = true;
388 dev_err(&dev->dev,
389 "BAR %d: failed to claim resource for efifb!\n", idx);
390 return;
391 }
392
393 dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
394}
395
396static void efifb_fixup_resources(struct pci_dev *dev)
397{
398 u64 base = screen_info.lfb_base;
399 u64 size = screen_info.lfb_size;
400 int i;
401
402 if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
403 return;
404
405 if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
406 base |= (u64)screen_info.ext_lfb_base << 32;
407
408 if (!base)
409 return;
410
411 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
412 struct resource *res = &dev->resource[i];
413
414 if (!(res->flags & IORESOURCE_MEM))
415 continue;
416
417 if (res->start <= base && res->end >= base + size - 1) {
418 claim_efifb_bar(dev, i);
419 break;
420 }
421 }
422}
423DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
424 16, efifb_fixup_resources);
425
426#endif
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 1abba07b84b3..f4cbfb3b8a09 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1608 return 0; 1608 return 0;
1609} 1609}
1610 1610
1611static void check_required_callbacks(struct omapfb_device *fbdev)
1612{
1613#define _C(x) (fbdev->ctrl->x != NULL)
1614#define _P(x) (fbdev->panel->x != NULL)
1615 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1616 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1617 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1618 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1619 _P(get_caps)));
1620#undef _P
1621#undef _C
1622}
1623
1624/* 1611/*
1625 * Called by LDM binding to probe and attach a new device. 1612 * Called by LDM binding to probe and attach a new device.
1626 * Initialization sequence: 1613 * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
1705 omapfb_ops.fb_mmap = omapfb_mmap; 1692 omapfb_ops.fb_mmap = omapfb_mmap;
1706 init_state++; 1693 init_state++;
1707 1694
1708 check_required_callbacks(fbdev);
1709
1710 r = planes_init(fbdev); 1695 r = planes_init(fbdev);
1711 if (r) 1696 if (r)
1712 goto cleanup; 1697 goto cleanup;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index bd017b57c47f..f599520374dd 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
578 578
579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); 579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
580 if (IS_ERR(par->vbat_reg)) { 580 if (IS_ERR(par->vbat_reg)) {
581 dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
582 PTR_ERR(par->vbat_reg));
583 ret = PTR_ERR(par->vbat_reg); 581 ret = PTR_ERR(par->vbat_reg);
584 goto fb_alloc_error; 582 if (ret == -ENODEV) {
583 par->vbat_reg = NULL;
584 } else {
585 dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
586 ret);
587 goto fb_alloc_error;
588 }
585 } 589 }
586 590
587 if (of_property_read_u32(node, "solomon,width", &par->width)) 591 if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
668 udelay(4); 672 udelay(4);
669 } 673 }
670 674
671 ret = regulator_enable(par->vbat_reg); 675 if (par->vbat_reg) {
672 if (ret) { 676 ret = regulator_enable(par->vbat_reg);
673 dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); 677 if (ret) {
674 goto reset_oled_error; 678 dev_err(&client->dev, "failed to enable VBAT: %d\n",
679 ret);
680 goto reset_oled_error;
681 }
675 } 682 }
676 683
677 ret = ssd1307fb_init(par); 684 ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
710 pwm_put(par->pwm); 717 pwm_put(par->pwm);
711 }; 718 };
712regulator_enable_error: 719regulator_enable_error:
713 regulator_disable(par->vbat_reg); 720 if (par->vbat_reg)
721 regulator_disable(par->vbat_reg);
714reset_oled_error: 722reset_oled_error:
715 fb_deferred_io_cleanup(info); 723 fb_deferred_io_cleanup(info);
716fb_alloc_error: 724fb_alloc_error:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index d0115a7af0a9..3ee309c50b2d 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
643 break; 643 break;
644 644
645 case XenbusStateInitWait: 645 case XenbusStateInitWait:
646InitWait:
647 xenbus_switch_state(dev, XenbusStateConnected); 646 xenbus_switch_state(dev, XenbusStateConnected);
648 break; 647 break;
649 648
@@ -654,7 +653,8 @@ InitWait:
654 * get Connected twice here. 653 * get Connected twice here.
655 */ 654 */
656 if (dev->state != XenbusStateConnected) 655 if (dev->state != XenbusStateConnected)
657 goto InitWait; /* no InitWait seen yet, fudge it */ 656 /* no InitWait seen yet, fudge it */
657 xenbus_switch_state(dev, XenbusStateConnected);
658 658
659 if (xenbus_read_unsigned(info->xbdev->otherend, 659 if (xenbus_read_unsigned(info->xbdev->otherend,
660 "request-update", 0)) 660 "request-update", 0))
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 400d70b69379..48230a5e12f2 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
232 if (device_features & (1ULL << i)) 232 if (device_features & (1ULL << i))
233 __virtio_set_bit(dev, i); 233 __virtio_set_bit(dev, i);
234 234
235 if (drv->validate) {
236 err = drv->validate(dev);
237 if (err)
238 goto err;
239 }
240
235 err = virtio_finalize_features(dev); 241 err = virtio_finalize_features(dev);
236 if (err) 242 if (err)
237 goto err; 243 goto err;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 590534910dc6..698d5d06fa03 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i; 34 int i;
35 35
36 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); 36 if (vp_dev->intx_enabled)
37 for (i = 1; i < vp_dev->msix_vectors; i++) 37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
38 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
39} 41}
40 42
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
60static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
61{ 63{
62 struct virtio_pci_device *vp_dev = opaque; 64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE; 66 irqreturn_t ret = IRQ_NONE;
64 struct virtqueue *vq; 67 unsigned long flags;
65 68
66 list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { 69 spin_lock_irqsave(&vp_dev->lock, flags);
67 if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) 70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
68 ret = IRQ_HANDLED; 72 ret = IRQ_HANDLED;
69 } 73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
70 75
71 return ret; 76 return ret;
72} 77}
@@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97 return vp_vring_interrupt(irq, opaque); 102 return vp_vring_interrupt(irq, opaque);
98} 103}
99 104
100static void vp_remove_vqs(struct virtio_device *vdev) 105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
101{ 107{
102 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
103 struct virtqueue *vq, *n; 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned i, v;
111 int err = -ENOMEM;
104 112
105 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 113 vp_dev->msix_vectors = nvectors;
106 if (vp_dev->msix_vector_map) {
107 int v = vp_dev->msix_vector_map[vq->index];
108 114
109 if (v != VIRTIO_MSI_NO_VECTOR) 115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
110 free_irq(pci_irq_vector(vp_dev->pci_dev, v), 116 GFP_KERNEL);
111 vq); 117 if (!vp_dev->msix_names)
112 } 118 goto error;
113 vp_dev->del_vq(vq); 119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
121 GFP_KERNEL);
122 if (!vp_dev->msix_affinity_masks)
123 goto error;
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
126 GFP_KERNEL))
127 goto error;
128
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0)
134 goto error;
135 vp_dev->msix_enabled = 1;
136
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
140 "%s-config", name);
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
143 vp_dev);
144 if (err)
145 goto error;
146 ++vp_dev->msix_used_vectors;
147
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
151 err = -EBUSY;
152 goto error;
114 } 153 }
154
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
162 vp_dev);
163 if (err)
164 goto error;
165 ++vp_dev->msix_used_vectors;
166 }
167 return 0;
168error:
169 return err;
170}
171
172static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
174 const char *name,
175 u16 msix_vec)
176{
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
180 unsigned long flags;
181
182 /* fill out our structure that represents an active queue */
183 if (!info)
184 return ERR_PTR(-ENOMEM);
185
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
187 msix_vec);
188 if (IS_ERR(vq))
189 goto out_info;
190
191 info->vq = vq;
192 if (callback) {
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
196 } else {
197 INIT_LIST_HEAD(&info->node);
198 }
199
200 vp_dev->vqs[index] = info;
201 return vq;
202
203out_info:
204 kfree(info);
205 return vq;
206}
207
208static void vp_del_vq(struct virtqueue *vq)
209{
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
212 unsigned long flags;
213
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
217
218 vp_dev->del_vq(info);
219 kfree(info);
115} 220}
116 221
117/* the config->del_vqs() implementation */ 222/* the config->del_vqs() implementation */
118void vp_del_vqs(struct virtio_device *vdev) 223void vp_del_vqs(struct virtio_device *vdev)
119{ 224{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 struct virtqueue *vq, *n;
121 int i; 227 int i;
122 228
123 if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) 229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
124 return; 230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
125 232
126 vp_remove_vqs(vdev); 233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
235
236 irq_set_affinity_hint(irq, NULL);
237 free_irq(irq, vq);
238 }
239 }
240 vp_del_vq(vq);
241 }
242 vp_dev->per_vq_vectors = false;
243
244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
247 }
127 248
128 if (vp_dev->pci_dev->msix_enabled) { 249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
129 for (i = 0; i < vp_dev->msix_vectors; i++) 250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
251
252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
130 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
131 255
256 if (vp_dev->msix_enabled) {
132 /* Disable the vector used for configuration */ 257 /* Disable the vector used for configuration */
133 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
134 259
135 kfree(vp_dev->msix_affinity_masks); 260 pci_free_irq_vectors(vp_dev->pci_dev);
136 kfree(vp_dev->msix_names); 261 vp_dev->msix_enabled = 0;
137 kfree(vp_dev->msix_vector_map);
138 } 262 }
139 263
140 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); 264 vp_dev->msix_vectors = 0;
141 pci_free_irq_vectors(vp_dev->pci_dev); 265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
270 kfree(vp_dev->vqs);
271 vp_dev->vqs = NULL;
142} 272}
143 273
144static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 274static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
145 struct virtqueue *vqs[], vq_callback_t *callbacks[], 275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
146 const char * const names[], struct irq_affinity *desc) 276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
147{ 278{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, j, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false;
153 u16 msix_vec; 280 u16 msix_vec;
281 int i, err, nvectors, allocated_vectors;
154 282
155 if (desc) { 283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
156 flags |= PCI_IRQ_AFFINITY; 284 if (!vp_dev->vqs)
157 desc->pre_vectors++; /* virtio config vector */ 285 return -ENOMEM;
158 }
159
160 nvectors = 1;
161 for (i = 0; i < nvqs; i++)
162 if (callbacks[i])
163 nvectors++;
164
165 /* Try one vector per queue first. */
166 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
167 nvectors, flags, desc);
168 if (err < 0) {
169 /* Fallback to one vector for config, one shared for queues. */
170 shared = true;
171 err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
172 PCI_IRQ_MSIX);
173 if (err < 0)
174 return err;
175 }
176 if (err < 0)
177 return err;
178
179 vp_dev->msix_vectors = nvectors;
180 vp_dev->msix_names = kmalloc_array(nvectors,
181 sizeof(*vp_dev->msix_names), GFP_KERNEL);
182 if (!vp_dev->msix_names)
183 goto out_free_irq_vectors;
184
185 vp_dev->msix_affinity_masks = kcalloc(nvectors,
186 sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
187 if (!vp_dev->msix_affinity_masks)
188 goto out_free_msix_names;
189 286
190 for (i = 0; i < nvectors; ++i) { 287 if (per_vq_vectors) {
191 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 288 /* Best option: one for change interrupt, one per vq. */
192 GFP_KERNEL)) 289 nvectors = 1;
193 goto out_free_msix_affinity_masks; 290 for (i = 0; i < nvqs; ++i)
291 if (callbacks[i])
292 ++nvectors;
293 } else {
294 /* Second best: one for change, shared for all vqs. */
295 nvectors = 2;
194 } 296 }
195 297
196 /* Set the vector used for configuration */ 298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
197 snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), 299 per_vq_vectors ? desc : NULL);
198 "%s-config", name);
199 err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
200 0, vp_dev->msix_names[0], vp_dev);
201 if (err) 300 if (err)
202 goto out_free_msix_affinity_masks; 301 goto error_find;
203 302
204 /* Verify we had enough resources to assign the vector */ 303 vp_dev->per_vq_vectors = per_vq_vectors;
205 if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { 304 allocated_vectors = vp_dev->msix_used_vectors;
206 err = -EBUSY;
207 goto out_free_config_irq;
208 }
209
210 vp_dev->msix_vector_map = kmalloc_array(nvqs,
211 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq;
214
215 allocated_vectors = j = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 305 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 306 if (!names[i]) {
218 vqs[i] = NULL; 307 vqs[i] = NULL;
219 continue; 308 continue;
220 } 309 }
221 310
222 if (callbacks[i]) 311 if (!callbacks[i])
223 msix_vec = allocated_vectors;
224 else
225 msix_vec = VIRTIO_MSI_NO_VECTOR; 312 msix_vec = VIRTIO_MSI_NO_VECTOR;
226 313 else if (vp_dev->per_vq_vectors)
227 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 314 msix_vec = allocated_vectors++;
228 msix_vec); 315 else
316 msix_vec = VP_MSIX_VQ_VECTOR;
317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
318 msix_vec);
229 if (IS_ERR(vqs[i])) { 319 if (IS_ERR(vqs[i])) {
230 err = PTR_ERR(vqs[i]); 320 err = PTR_ERR(vqs[i]);
231 goto out_remove_vqs; 321 goto error_find;
232 } 322 }
233 323
234 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
235 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
236 continue; 325 continue;
237 }
238 326
239 snprintf(vp_dev->msix_names[j], 327 /* allocate per-vq irq if available and necessary */
240 sizeof(*vp_dev->msix_names), "%s-%s", 328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
330 "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 331 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 333 vring_interrupt, 0,
244 vp_dev->msix_names[j], vqs[i]); 334 vp_dev->msix_names[msix_vec],
245 if (err) { 335 vqs[i]);
246 /* don't free this irq on error */ 336 if (err)
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 337 goto error_find;
248 goto out_remove_vqs;
249 }
250 vp_dev->msix_vector_map[i] = msix_vec;
251 j++;
252
253 /*
254 * Use a different vector for each queue if they are available,
255 * else share the same vector for all VQs.
256 */
257 if (!shared)
258 allocated_vectors++;
259 } 338 }
260
261 return 0; 339 return 0;
262 340
263out_remove_vqs: 341error_find:
264 vp_remove_vqs(vdev); 342 vp_del_vqs(vdev);
265 kfree(vp_dev->msix_vector_map);
266out_disable_config_irq:
267 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
268out_free_config_irq:
269 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
270out_free_msix_affinity_masks:
271 for (i = 0; i < nvectors; i++) {
272 if (vp_dev->msix_affinity_masks[i])
273 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
274 }
275 kfree(vp_dev->msix_affinity_masks);
276out_free_msix_names:
277 kfree(vp_dev->msix_names);
278out_free_irq_vectors:
279 pci_free_irq_vectors(vp_dev->pci_dev);
280 return err; 343 return err;
281} 344}
282 345
@@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 int i, err; 351 int i, err;
289 352
353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
354 if (!vp_dev->vqs)
355 return -ENOMEM;
356
290 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
291 dev_name(&vdev->dev), vp_dev); 358 dev_name(&vdev->dev), vp_dev);
292 if (err) 359 if (err)
293 return err; 360 goto out_del_vqs;
294 361
362 vp_dev->intx_enabled = 1;
363 vp_dev->per_vq_vectors = false;
295 for (i = 0; i < nvqs; ++i) { 364 for (i = 0; i < nvqs; ++i) {
296 if (!names[i]) { 365 if (!names[i]) {
297 vqs[i] = NULL; 366 vqs[i] = NULL;
298 continue; 367 continue;
299 } 368 }
300 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
301 VIRTIO_MSI_NO_VECTOR); 370 VIRTIO_MSI_NO_VECTOR);
302 if (IS_ERR(vqs[i])) { 371 if (IS_ERR(vqs[i])) {
303 err = PTR_ERR(vqs[i]); 372 err = PTR_ERR(vqs[i]);
304 goto out_remove_vqs; 373 goto out_del_vqs;
305 } 374 }
306 } 375 }
307 376
308 return 0; 377 return 0;
309 378out_del_vqs:
310out_remove_vqs: 379 vp_del_vqs(vdev);
311 vp_remove_vqs(vdev);
312 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
313 return err; 380 return err;
314} 381}
315 382
@@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
320{ 387{
321 int err; 388 int err;
322 389
323 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); 390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
324 if (!err) 392 if (!err)
325 return 0; 393 return 0;
394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
396 if (!err)
397 return 0;
398 /* Finally fall back to regular interrupts. */
326 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); 399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
327} 400}
328 401
@@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
342{ 415{
343 struct virtio_device *vdev = vq->vdev; 416 struct virtio_device *vdev = vq->vdev;
344 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
420 unsigned int irq;
345 421
346 if (!vq->callback) 422 if (!vq->callback)
347 return -EINVAL; 423 return -EINVAL;
348 424
349 if (vp_dev->pci_dev->msix_enabled) { 425 if (vp_dev->msix_enabled) {
350 int vec = vp_dev->msix_vector_map[vq->index]; 426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
351 struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; 427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
352 unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
353
354 if (cpu == -1) 428 if (cpu == -1)
355 irq_set_affinity_hint(irq, NULL); 429 irq_set_affinity_hint(irq, NULL);
356 else { 430 else {
@@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
365const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 439const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
366{ 440{
367 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
368 unsigned int *map = vp_dev->msix_vector_map;
369 442
370 if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) 443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
371 return NULL; 445 return NULL;
372 446
373 return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); 447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
374} 449}
375 450
376#ifdef CONFIG_PM_SLEEP 451#ifdef CONFIG_PM_SLEEP
@@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
441 vp_dev->vdev.dev.parent = &pci_dev->dev; 516 vp_dev->vdev.dev.parent = &pci_dev->dev;
442 vp_dev->vdev.dev.release = virtio_pci_release_dev; 517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
443 vp_dev->pci_dev = pci_dev; 518 vp_dev->pci_dev = pci_dev;
519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
444 521
445 /* enable the device */ 522 /* enable the device */
446 rc = pci_enable_device(pci_dev); 523 rc = pci_enable_device(pci_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index ac8c9d788964..e96334aec1e0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -31,6 +31,17 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34struct virtio_pci_vq_info {
35 /* the actual virtqueue */
36 struct virtqueue *vq;
37
38 /* the list node for the virtqueues list */
39 struct list_head node;
40
41 /* MSI-X vector (or none) */
42 unsigned msix_vector;
43};
44
34/* Our device structure */ 45/* Our device structure */
35struct virtio_pci_device { 46struct virtio_pci_device {
36 struct virtio_device vdev; 47 struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
64 /* the IO mapping for the PCI config space */ 75 /* the IO mapping for the PCI config space */
65 void __iomem *ioaddr; 76 void __iomem *ioaddr;
66 77
78 /* a list of queues so we can dispatch IRQs */
79 spinlock_t lock;
80 struct list_head virtqueues;
81
82 /* array of all queues for house-keeping */
83 struct virtio_pci_vq_info **vqs;
84
85 /* MSI-X support */
86 int msix_enabled;
87 int intx_enabled;
67 cpumask_var_t *msix_affinity_masks; 88 cpumask_var_t *msix_affinity_masks;
68 /* Name strings for interrupts. This size should be enough, 89 /* Name strings for interrupts. This size should be enough,
69 * and I'm too lazy to allocate each name separately. */ 90 * and I'm too lazy to allocate each name separately. */
70 char (*msix_names)[256]; 91 char (*msix_names)[256];
71 /* Total Number of MSI-X vectors (including per-VQ ones). */ 92 /* Number of available vectors */
72 int msix_vectors; 93 unsigned msix_vectors;
73 /* Map of per-VQ MSI-X vectors, may be NULL */ 94 /* Vectors allocated, excluding per-vq vectors if any */
74 unsigned *msix_vector_map; 95 unsigned msix_used_vectors;
96
97 /* Whether we have vector per vq */
98 bool per_vq_vectors;
75 99
76 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, 100 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
101 struct virtio_pci_vq_info *info,
77 unsigned idx, 102 unsigned idx,
78 void (*callback)(struct virtqueue *vq), 103 void (*callback)(struct virtqueue *vq),
79 const char *name, 104 const char *name,
80 u16 msix_vec); 105 u16 msix_vec);
81 void (*del_vq)(struct virtqueue *vq); 106 void (*del_vq)(struct virtio_pci_vq_info *info);
82 107
83 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 108 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
84}; 109};
85 110
111/* Constants for MSI-X */
112/* Use first vector for configuration changes, second and the rest for
113 * virtqueues Thus, we need at least 2 vectors for MSI. */
114enum {
115 VP_MSIX_CONFIG_VECTOR = 0,
116 VP_MSIX_VQ_VECTOR = 1,
117};
118
86/* Convert a generic virtio device to our structure */ 119/* Convert a generic virtio device to our structure */
87static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 120static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
88{ 121{
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index f7362c5fe18a..4bfa48fb1324 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
112} 112}
113 113
114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
115 struct virtio_pci_vq_info *info,
115 unsigned index, 116 unsigned index,
116 void (*callback)(struct virtqueue *vq), 117 void (*callback)(struct virtqueue *vq),
117 const char *name, 118 const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
129 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
130 return ERR_PTR(-ENOENT); 131 return ERR_PTR(-ENOENT);
131 132
133 info->msix_vector = msix_vec;
134
132 /* create the vring */ 135 /* create the vring */
133 vq = vring_create_virtqueue(index, num, 136 vq = vring_create_virtqueue(index, num,
134 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, 137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
159 return ERR_PTR(err); 162 return ERR_PTR(err);
160} 163}
161 164
162static void del_vq(struct virtqueue *vq) 165static void del_vq(struct virtio_pci_vq_info *info)
163{ 166{
167 struct virtqueue *vq = info->vq;
164 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
165 169
166 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
167 171
168 if (vp_dev->pci_dev->msix_enabled) { 172 if (vp_dev->msix_enabled) {
169 iowrite16(VIRTIO_MSI_NO_VECTOR, 173 iowrite16(VIRTIO_MSI_NO_VECTOR,
170 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 174 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
171 /* Flush the write out to device */ 175 /* Flush the write out to device */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7bc3004b840e..8978f109d2d7 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
293} 293}
294 294
295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
296 struct virtio_pci_vq_info *info,
296 unsigned index, 297 unsigned index,
297 void (*callback)(struct virtqueue *vq), 298 void (*callback)(struct virtqueue *vq),
298 const char *name, 299 const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
322 /* get offset of notification word for this vq */ 323 /* get offset of notification word for this vq */
323 off = vp_ioread16(&cfg->queue_notify_off); 324 off = vp_ioread16(&cfg->queue_notify_off);
324 325
326 info->msix_vector = msix_vec;
327
325 /* create the vring */ 328 /* create the vring */
326 vq = vring_create_virtqueue(index, num, 329 vq = vring_create_virtqueue(index, num,
327 SMP_CACHE_BYTES, &vp_dev->vdev, 330 SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
405 return 0; 408 return 0;
406} 409}
407 410
408static void del_vq(struct virtqueue *vq) 411static void del_vq(struct virtio_pci_vq_info *info)
409{ 412{
413 struct virtqueue *vq = info->vq;
410 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 414 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
411 415
412 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 416 vp_iowrite16(vq->index, &vp_dev->common->queue_select);
413 417
414 if (vp_dev->pci_dev->msix_enabled) { 418 if (vp_dev->msix_enabled) {
415 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 419 vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
416 &vp_dev->common->queue_msix_vector); 420 &vp_dev->common->queue_msix_vector);
417 /* Flush the write out to device */ 421 /* Flush the write out to device */