aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c817
-rw-r--r--drivers/iommu/amd_iommu_init.c113
-rw-r--r--drivers/iommu/amd_iommu_proto.h24
-rw-r--r--drivers/iommu/amd_iommu_types.h108
-rw-r--r--drivers/iommu/amd_iommu_v2.c959
-rw-r--r--drivers/iommu/intel-iommu.c32
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/iommu/iommu.c119
-rw-r--r--drivers/iommu/msm_iommu.c25
-rw-r--r--drivers/iommu/omap-iommu.c18
-rw-r--r--drivers/iommu/omap-iovmm.c17
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/realtek/r8169.c53
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h4
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/of/irq.c13
-rw-r--r--drivers/oprofile/oprof.c29
-rw-r--r--drivers/oprofile/timer_int.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c44
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
77 files changed, 2939 insertions, 927 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d8b3d89db043..919daa7cd5b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
1743 */ 1743 */
1744 list_del_init(&dev->kobj.entry); 1744 list_del_init(&dev->kobj.entry);
1745 spin_unlock(&devices_kset->list_lock); 1745 spin_unlock(&devices_kset->list_lock);
1746 /* Disable all device's runtime power management */ 1746
1747 pm_runtime_disable(dev); 1747 /* Don't allow any more runtime suspends */
1748 pm_runtime_get_noresume(dev);
1749 pm_runtime_barrier(dev);
1748 1750
1749 if (dev->bus && dev->bus->shutdown) { 1751 if (dev->bus && dev->bus->shutdown) {
1750 dev_dbg(dev, "shutdown\n"); 1752 dev_dbg(dev, "shutdown\n");
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8da..4e018d6a7639 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
21obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o 21obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
22obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o 22obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
23obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o 23obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
24obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o 24obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8359dc777041..60ff1b63b568 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2026,8 +2026,13 @@ i915_wait_request(struct intel_ring_buffer *ring,
2026 * to handle this, the waiter on a request often wants an associated 2026 * to handle this, the waiter on a request often wants an associated
2027 * buffer to have made it to the inactive list, and we would need 2027 * buffer to have made it to the inactive list, and we would need
2028 * a separate wait queue to handle that. 2028 * a separate wait queue to handle that.
2029 *
2030 * To avoid a recursion with the ilk VT-d workaround (that calls
2031 * gpu_idle when unbinding objects with interruptible==false) don't
2032 * retire requests in that case (because it might call unbind if the
2033 * active list holds the last reference to the object).
2029 */ 2034 */
2030 if (ret == 0) 2035 if (ret == 0 && dev_priv->mm.interruptible)
2031 i915_gem_retire_requests_ring(ring); 2036 i915_gem_retire_requests_ring(ring);
2032 2037
2033 return ret; 2038 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
233 switch (radeon_encoder->encoder_id) { 233 switch (radeon_encoder->encoder_id) {
234 case ENCODER_OBJECT_ID_TRAVIS: 234 case ENCODER_OBJECT_ID_TRAVIS:
235 case ENCODER_OBJECT_ID_NUTMEG: 235 case ENCODER_OBJECT_ID_NUTMEG:
236 return true; 236 return radeon_encoder->encoder_id;
237 default: 237 default:
238 return false; 238 return ENCODER_OBJECT_ID_NONE;
239 } 239 }
240 } 240 }
241 241 return ENCODER_OBJECT_ID_NONE;
242 return false;
243} 242}
244 243
245void radeon_panel_mode_fixup(struct drm_encoder *encoder, 244void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..5ff561d4e0b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
140 goto out_clips; 140 goto out_clips;
141 } 141 }
142 142
143 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 143 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
144 if (clips == NULL) { 144 if (clips == NULL) {
145 DRM_ERROR("Failed to allocate clip rect list.\n"); 145 DRM_ERROR("Failed to allocate clip rect list.\n");
146 ret = -ENOMEM; 146 ret = -ENOMEM;
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
232 goto out_clips; 232 goto out_clips;
233 } 233 }
234 234
235 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 235 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
236 if (clips == NULL) { 236 if (clips == NULL) {
237 DRM_ERROR("Failed to allocate clip rect list.\n"); 237 DRM_ERROR("Failed to allocate clip rect list.\n");
238 ret = -ENOMEM; 238 ret = -ENOMEM;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5414253b185a..6bea6962f8ee 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -34,7 +34,9 @@ config AMD_IOMMU
34 bool "AMD IOMMU support" 34 bool "AMD IOMMU support"
35 select SWIOTLB 35 select SWIOTLB
36 select PCI_MSI 36 select PCI_MSI
37 select PCI_IOV 37 select PCI_ATS
38 select PCI_PRI
39 select PCI_PASID
38 select IOMMU_API 40 select IOMMU_API
39 depends on X86_64 && PCI && ACPI 41 depends on X86_64 && PCI && ACPI
40 ---help--- 42 ---help---
@@ -58,6 +60,15 @@ config AMD_IOMMU_STATS
58 information to userspace via debugfs. 60 information to userspace via debugfs.
59 If unsure, say N. 61 If unsure, say N.
60 62
63config AMD_IOMMU_V2
64 tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
65 depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
66 select MMU_NOTIFIER
67 ---help---
68 This option enables support for the AMD IOMMUv2 features of the IOMMU
69 hardware. Select this option if you want to use devices that support
70 the the PCI PRI and PASID interface.
71
61# Intel IOMMU support 72# Intel IOMMU support
62config DMAR_TABLE 73config DMAR_TABLE
63 bool 74 bool
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 2f4448794bc7..0e36b4934aff 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,7 @@
1obj-$(CONFIG_IOMMU_API) += iommu.o 1obj-$(CONFIG_IOMMU_API) += iommu.o
2obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o 2obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
4obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
4obj-$(CONFIG_DMAR_TABLE) += dmar.o 5obj-$(CONFIG_DMAR_TABLE) += dmar.o
5obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o 6obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
6obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o 7obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4ee277a8521a..a7cbcd46af9e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -17,6 +17,7 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#include <linux/ratelimit.h>
20#include <linux/pci.h> 21#include <linux/pci.h>
21#include <linux/pci-ats.h> 22#include <linux/pci-ats.h>
22#include <linux/bitmap.h> 23#include <linux/bitmap.h>
@@ -28,6 +29,8 @@
28#include <linux/iommu.h> 29#include <linux/iommu.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include <linux/amd-iommu.h> 31#include <linux/amd-iommu.h>
32#include <linux/notifier.h>
33#include <linux/export.h>
31#include <asm/msidef.h> 34#include <asm/msidef.h>
32#include <asm/proto.h> 35#include <asm/proto.h>
33#include <asm/iommu.h> 36#include <asm/iommu.h>
@@ -41,6 +44,24 @@
41 44
42#define LOOP_TIMEOUT 100000 45#define LOOP_TIMEOUT 100000
43 46
47/*
48 * This bitmap is used to advertise the page sizes our hardware support
49 * to the IOMMU core, which will then use this information to split
50 * physically contiguous memory regions it is mapping into page sizes
51 * that we support.
52 *
53 * Traditionally the IOMMU core just handed us the mappings directly,
54 * after making sure the size is an order of a 4KiB page and that the
55 * mapping has natural alignment.
56 *
57 * To retain this behavior, we currently advertise that we support
58 * all page sizes that are an order of 4KiB.
59 *
60 * If at some point we'd like to utilize the IOMMU core's new behavior,
61 * we could change this to advertise the real page sizes we support.
62 */
63#define AMD_IOMMU_PGSIZES (~0xFFFUL)
64
44static DEFINE_RWLOCK(amd_iommu_devtable_lock); 65static DEFINE_RWLOCK(amd_iommu_devtable_lock);
45 66
46/* A list of preallocated protection domains */ 67/* A list of preallocated protection domains */
@@ -59,6 +80,9 @@ static struct protection_domain *pt_domain;
59 80
60static struct iommu_ops amd_iommu_ops; 81static struct iommu_ops amd_iommu_ops;
61 82
83static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
84int amd_iommu_max_glx_val = -1;
85
62/* 86/*
63 * general struct to manage commands send to an IOMMU 87 * general struct to manage commands send to an IOMMU
64 */ 88 */
@@ -67,6 +91,7 @@ struct iommu_cmd {
67}; 91};
68 92
69static void update_domain(struct protection_domain *domain); 93static void update_domain(struct protection_domain *domain);
94static int __init alloc_passthrough_domain(void);
70 95
71/**************************************************************************** 96/****************************************************************************
72 * 97 *
@@ -147,6 +172,33 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
147 return dev->archdata.iommu; 172 return dev->archdata.iommu;
148} 173}
149 174
175static bool pci_iommuv2_capable(struct pci_dev *pdev)
176{
177 static const int caps[] = {
178 PCI_EXT_CAP_ID_ATS,
179 PCI_PRI_CAP,
180 PCI_PASID_CAP,
181 };
182 int i, pos;
183
184 for (i = 0; i < 3; ++i) {
185 pos = pci_find_ext_capability(pdev, caps[i]);
186 if (pos == 0)
187 return false;
188 }
189
190 return true;
191}
192
193static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
194{
195 struct iommu_dev_data *dev_data;
196
197 dev_data = get_dev_data(&pdev->dev);
198
199 return dev_data->errata & (1 << erratum) ? true : false;
200}
201
150/* 202/*
151 * In this function the list of preallocated protection domains is traversed to 203 * In this function the list of preallocated protection domains is traversed to
152 * find the domain for a specific device 204 * find the domain for a specific device
@@ -204,6 +256,7 @@ static bool check_device(struct device *dev)
204 256
205static int iommu_init_device(struct device *dev) 257static int iommu_init_device(struct device *dev)
206{ 258{
259 struct pci_dev *pdev = to_pci_dev(dev);
207 struct iommu_dev_data *dev_data; 260 struct iommu_dev_data *dev_data;
208 u16 alias; 261 u16 alias;
209 262
@@ -228,6 +281,13 @@ static int iommu_init_device(struct device *dev)
228 dev_data->alias_data = alias_data; 281 dev_data->alias_data = alias_data;
229 } 282 }
230 283
284 if (pci_iommuv2_capable(pdev)) {
285 struct amd_iommu *iommu;
286
287 iommu = amd_iommu_rlookup_table[dev_data->devid];
288 dev_data->iommu_v2 = iommu->is_iommu_v2;
289 }
290
231 dev->archdata.iommu = dev_data; 291 dev->archdata.iommu = dev_data;
232 292
233 return 0; 293 return 0;
@@ -317,6 +377,11 @@ DECLARE_STATS_COUNTER(domain_flush_single);
317DECLARE_STATS_COUNTER(domain_flush_all); 377DECLARE_STATS_COUNTER(domain_flush_all);
318DECLARE_STATS_COUNTER(alloced_io_mem); 378DECLARE_STATS_COUNTER(alloced_io_mem);
319DECLARE_STATS_COUNTER(total_map_requests); 379DECLARE_STATS_COUNTER(total_map_requests);
380DECLARE_STATS_COUNTER(complete_ppr);
381DECLARE_STATS_COUNTER(invalidate_iotlb);
382DECLARE_STATS_COUNTER(invalidate_iotlb_all);
383DECLARE_STATS_COUNTER(pri_requests);
384
320 385
321static struct dentry *stats_dir; 386static struct dentry *stats_dir;
322static struct dentry *de_fflush; 387static struct dentry *de_fflush;
@@ -351,6 +416,10 @@ static void amd_iommu_stats_init(void)
351 amd_iommu_stats_add(&domain_flush_all); 416 amd_iommu_stats_add(&domain_flush_all);
352 amd_iommu_stats_add(&alloced_io_mem); 417 amd_iommu_stats_add(&alloced_io_mem);
353 amd_iommu_stats_add(&total_map_requests); 418 amd_iommu_stats_add(&total_map_requests);
419 amd_iommu_stats_add(&complete_ppr);
420 amd_iommu_stats_add(&invalidate_iotlb);
421 amd_iommu_stats_add(&invalidate_iotlb_all);
422 amd_iommu_stats_add(&pri_requests);
354} 423}
355 424
356#endif 425#endif
@@ -365,8 +434,8 @@ static void dump_dte_entry(u16 devid)
365{ 434{
366 int i; 435 int i;
367 436
368 for (i = 0; i < 8; ++i) 437 for (i = 0; i < 4; ++i)
369 pr_err("AMD-Vi: DTE[%d]: %08x\n", i, 438 pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
370 amd_iommu_dev_table[devid].data[i]); 439 amd_iommu_dev_table[devid].data[i]);
371} 440}
372 441
@@ -461,12 +530,84 @@ static void iommu_poll_events(struct amd_iommu *iommu)
461 spin_unlock_irqrestore(&iommu->lock, flags); 530 spin_unlock_irqrestore(&iommu->lock, flags);
462} 531}
463 532
533static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
534{
535 struct amd_iommu_fault fault;
536 volatile u64 *raw;
537 int i;
538
539 INC_STATS_COUNTER(pri_requests);
540
541 raw = (u64 *)(iommu->ppr_log + head);
542
543 /*
544 * Hardware bug: Interrupt may arrive before the entry is written to
545 * memory. If this happens we need to wait for the entry to arrive.
546 */
547 for (i = 0; i < LOOP_TIMEOUT; ++i) {
548 if (PPR_REQ_TYPE(raw[0]) != 0)
549 break;
550 udelay(1);
551 }
552
553 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
554 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
555 return;
556 }
557
558 fault.address = raw[1];
559 fault.pasid = PPR_PASID(raw[0]);
560 fault.device_id = PPR_DEVID(raw[0]);
561 fault.tag = PPR_TAG(raw[0]);
562 fault.flags = PPR_FLAGS(raw[0]);
563
564 /*
565 * To detect the hardware bug we need to clear the entry
566 * to back to zero.
567 */
568 raw[0] = raw[1] = 0;
569
570 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
571}
572
573static void iommu_poll_ppr_log(struct amd_iommu *iommu)
574{
575 unsigned long flags;
576 u32 head, tail;
577
578 if (iommu->ppr_log == NULL)
579 return;
580
581 spin_lock_irqsave(&iommu->lock, flags);
582
583 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
584 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
585
586 while (head != tail) {
587
588 /* Handle PPR entry */
589 iommu_handle_ppr_entry(iommu, head);
590
591 /* Update and refresh ring-buffer state*/
592 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
593 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
594 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
595 }
596
597 /* enable ppr interrupts again */
598 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
599
600 spin_unlock_irqrestore(&iommu->lock, flags);
601}
602
464irqreturn_t amd_iommu_int_thread(int irq, void *data) 603irqreturn_t amd_iommu_int_thread(int irq, void *data)
465{ 604{
466 struct amd_iommu *iommu; 605 struct amd_iommu *iommu;
467 606
468 for_each_iommu(iommu) 607 for_each_iommu(iommu) {
469 iommu_poll_events(iommu); 608 iommu_poll_events(iommu);
609 iommu_poll_ppr_log(iommu);
610 }
470 611
471 return IRQ_HANDLED; 612 return IRQ_HANDLED;
472} 613}
@@ -595,6 +736,60 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
595 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 736 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
596} 737}
597 738
739static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
740 u64 address, bool size)
741{
742 memset(cmd, 0, sizeof(*cmd));
743
744 address &= ~(0xfffULL);
745
746 cmd->data[0] = pasid & PASID_MASK;
747 cmd->data[1] = domid;
748 cmd->data[2] = lower_32_bits(address);
749 cmd->data[3] = upper_32_bits(address);
750 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
751 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
752 if (size)
753 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
754 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
755}
756
757static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
758 int qdep, u64 address, bool size)
759{
760 memset(cmd, 0, sizeof(*cmd));
761
762 address &= ~(0xfffULL);
763
764 cmd->data[0] = devid;
765 cmd->data[0] |= (pasid & 0xff) << 16;
766 cmd->data[0] |= (qdep & 0xff) << 24;
767 cmd->data[1] = devid;
768 cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
769 cmd->data[2] = lower_32_bits(address);
770 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
771 cmd->data[3] = upper_32_bits(address);
772 if (size)
773 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
774 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
775}
776
777static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
778 int status, int tag, bool gn)
779{
780 memset(cmd, 0, sizeof(*cmd));
781
782 cmd->data[0] = devid;
783 if (gn) {
784 cmd->data[1] = pasid & PASID_MASK;
785 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
786 }
787 cmd->data[3] = tag & 0x1ff;
788 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
789
790 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
791}
792
598static void build_inv_all(struct iommu_cmd *cmd) 793static void build_inv_all(struct iommu_cmd *cmd)
599{ 794{
600 memset(cmd, 0, sizeof(*cmd)); 795 memset(cmd, 0, sizeof(*cmd));
@@ -1496,6 +1691,48 @@ static void free_pagetable(struct protection_domain *domain)
1496 domain->pt_root = NULL; 1691 domain->pt_root = NULL;
1497} 1692}
1498 1693
1694static void free_gcr3_tbl_level1(u64 *tbl)
1695{
1696 u64 *ptr;
1697 int i;
1698
1699 for (i = 0; i < 512; ++i) {
1700 if (!(tbl[i] & GCR3_VALID))
1701 continue;
1702
1703 ptr = __va(tbl[i] & PAGE_MASK);
1704
1705 free_page((unsigned long)ptr);
1706 }
1707}
1708
1709static void free_gcr3_tbl_level2(u64 *tbl)
1710{
1711 u64 *ptr;
1712 int i;
1713
1714 for (i = 0; i < 512; ++i) {
1715 if (!(tbl[i] & GCR3_VALID))
1716 continue;
1717
1718 ptr = __va(tbl[i] & PAGE_MASK);
1719
1720 free_gcr3_tbl_level1(ptr);
1721 }
1722}
1723
1724static void free_gcr3_table(struct protection_domain *domain)
1725{
1726 if (domain->glx == 2)
1727 free_gcr3_tbl_level2(domain->gcr3_tbl);
1728 else if (domain->glx == 1)
1729 free_gcr3_tbl_level1(domain->gcr3_tbl);
1730 else if (domain->glx != 0)
1731 BUG();
1732
1733 free_page((unsigned long)domain->gcr3_tbl);
1734}
1735
1499/* 1736/*
1500 * Free a domain, only used if something went wrong in the 1737 * Free a domain, only used if something went wrong in the
1501 * allocation path and we need to free an already allocated page table 1738 * allocation path and we need to free an already allocated page table
@@ -1582,20 +1819,52 @@ static bool dma_ops_domain(struct protection_domain *domain)
1582 1819
1583static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) 1820static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1584{ 1821{
1585 u64 pte_root = virt_to_phys(domain->pt_root); 1822 u64 pte_root = 0;
1586 u32 flags = 0; 1823 u64 flags = 0;
1824
1825 if (domain->mode != PAGE_MODE_NONE)
1826 pte_root = virt_to_phys(domain->pt_root);
1587 1827
1588 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 1828 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1589 << DEV_ENTRY_MODE_SHIFT; 1829 << DEV_ENTRY_MODE_SHIFT;
1590 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; 1830 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1591 1831
1832 flags = amd_iommu_dev_table[devid].data[1];
1833
1592 if (ats) 1834 if (ats)
1593 flags |= DTE_FLAG_IOTLB; 1835 flags |= DTE_FLAG_IOTLB;
1594 1836
1595 amd_iommu_dev_table[devid].data[3] |= flags; 1837 if (domain->flags & PD_IOMMUV2_MASK) {
1596 amd_iommu_dev_table[devid].data[2] = domain->id; 1838 u64 gcr3 = __pa(domain->gcr3_tbl);
1597 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); 1839 u64 glx = domain->glx;
1598 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1840 u64 tmp;
1841
1842 pte_root |= DTE_FLAG_GV;
1843 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1844
1845 /* First mask out possible old values for GCR3 table */
1846 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1847 flags &= ~tmp;
1848
1849 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1850 flags &= ~tmp;
1851
1852 /* Encode GCR3 table into DTE */
1853 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1854 pte_root |= tmp;
1855
1856 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1857 flags |= tmp;
1858
1859 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1860 flags |= tmp;
1861 }
1862
1863 flags &= ~(0xffffUL);
1864 flags |= domain->id;
1865
1866 amd_iommu_dev_table[devid].data[1] = flags;
1867 amd_iommu_dev_table[devid].data[0] = pte_root;
1599} 1868}
1600 1869
1601static void clear_dte_entry(u16 devid) 1870static void clear_dte_entry(u16 devid)
@@ -1603,7 +1872,6 @@ static void clear_dte_entry(u16 devid)
1603 /* remove entry from the device table seen by the hardware */ 1872 /* remove entry from the device table seen by the hardware */
1604 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; 1873 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1605 amd_iommu_dev_table[devid].data[1] = 0; 1874 amd_iommu_dev_table[devid].data[1] = 0;
1606 amd_iommu_dev_table[devid].data[2] = 0;
1607 1875
1608 amd_iommu_apply_erratum_63(devid); 1876 amd_iommu_apply_erratum_63(devid);
1609} 1877}
@@ -1696,6 +1964,93 @@ out_unlock:
1696 return ret; 1964 return ret;
1697} 1965}
1698 1966
1967
1968static void pdev_iommuv2_disable(struct pci_dev *pdev)
1969{
1970 pci_disable_ats(pdev);
1971 pci_disable_pri(pdev);
1972 pci_disable_pasid(pdev);
1973}
1974
1975/* FIXME: Change generic reset-function to do the same */
1976static int pri_reset_while_enabled(struct pci_dev *pdev)
1977{
1978 u16 control;
1979 int pos;
1980
1981 pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
1982 if (!pos)
1983 return -EINVAL;
1984
1985 pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
1986 control |= PCI_PRI_RESET;
1987 pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
1988
1989 return 0;
1990}
1991
1992static int pdev_iommuv2_enable(struct pci_dev *pdev)
1993{
1994 bool reset_enable;
1995 int reqs, ret;
1996
1997 /* FIXME: Hardcode number of outstanding requests for now */
1998 reqs = 32;
1999 if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2000 reqs = 1;
2001 reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2002
2003 /* Only allow access to user-accessible pages */
2004 ret = pci_enable_pasid(pdev, 0);
2005 if (ret)
2006 goto out_err;
2007
2008 /* First reset the PRI state of the device */
2009 ret = pci_reset_pri(pdev);
2010 if (ret)
2011 goto out_err;
2012
2013 /* Enable PRI */
2014 ret = pci_enable_pri(pdev, reqs);
2015 if (ret)
2016 goto out_err;
2017
2018 if (reset_enable) {
2019 ret = pri_reset_while_enabled(pdev);
2020 if (ret)
2021 goto out_err;
2022 }
2023
2024 ret = pci_enable_ats(pdev, PAGE_SHIFT);
2025 if (ret)
2026 goto out_err;
2027
2028 return 0;
2029
2030out_err:
2031 pci_disable_pri(pdev);
2032 pci_disable_pasid(pdev);
2033
2034 return ret;
2035}
2036
2037/* FIXME: Move this to PCI code */
2038#define PCI_PRI_TLP_OFF (1 << 2)
2039
2040bool pci_pri_tlp_required(struct pci_dev *pdev)
2041{
2042 u16 control;
2043 int pos;
2044
2045 pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
2046 if (!pos)
2047 return false;
2048
2049 pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
2050
2051 return (control & PCI_PRI_TLP_OFF) ? true : false;
2052}
2053
1699/* 2054/*
1700 * If a device is not yet associated with a domain, this function does 2055 * If a device is not yet associated with a domain, this function does
1701 * assigns it visible for the hardware 2056 * assigns it visible for the hardware
@@ -1710,7 +2065,18 @@ static int attach_device(struct device *dev,
1710 2065
1711 dev_data = get_dev_data(dev); 2066 dev_data = get_dev_data(dev);
1712 2067
1713 if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2068 if (domain->flags & PD_IOMMUV2_MASK) {
2069 if (!dev_data->iommu_v2 || !dev_data->passthrough)
2070 return -EINVAL;
2071
2072 if (pdev_iommuv2_enable(pdev) != 0)
2073 return -EINVAL;
2074
2075 dev_data->ats.enabled = true;
2076 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2077 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2078 } else if (amd_iommu_iotlb_sup &&
2079 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1714 dev_data->ats.enabled = true; 2080 dev_data->ats.enabled = true;
1715 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2081 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1716 } 2082 }
@@ -1760,7 +2126,7 @@ static void __detach_device(struct iommu_dev_data *dev_data)
1760 * passthrough domain if it is detached from any other domain. 2126 * passthrough domain if it is detached from any other domain.
1761 * Make sure we can deassign from the pt_domain itself. 2127 * Make sure we can deassign from the pt_domain itself.
1762 */ 2128 */
1763 if (iommu_pass_through && 2129 if (dev_data->passthrough &&
1764 (dev_data->domain == NULL && domain != pt_domain)) 2130 (dev_data->domain == NULL && domain != pt_domain))
1765 __attach_device(dev_data, pt_domain); 2131 __attach_device(dev_data, pt_domain);
1766} 2132}
@@ -1770,20 +2136,24 @@ static void __detach_device(struct iommu_dev_data *dev_data)
1770 */ 2136 */
1771static void detach_device(struct device *dev) 2137static void detach_device(struct device *dev)
1772{ 2138{
2139 struct protection_domain *domain;
1773 struct iommu_dev_data *dev_data; 2140 struct iommu_dev_data *dev_data;
1774 unsigned long flags; 2141 unsigned long flags;
1775 2142
1776 dev_data = get_dev_data(dev); 2143 dev_data = get_dev_data(dev);
2144 domain = dev_data->domain;
1777 2145
1778 /* lock device table */ 2146 /* lock device table */
1779 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2147 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1780 __detach_device(dev_data); 2148 __detach_device(dev_data);
1781 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2149 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1782 2150
1783 if (dev_data->ats.enabled) { 2151 if (domain->flags & PD_IOMMUV2_MASK)
2152 pdev_iommuv2_disable(to_pci_dev(dev));
2153 else if (dev_data->ats.enabled)
1784 pci_disable_ats(to_pci_dev(dev)); 2154 pci_disable_ats(to_pci_dev(dev));
1785 dev_data->ats.enabled = false; 2155
1786 } 2156 dev_data->ats.enabled = false;
1787} 2157}
1788 2158
1789/* 2159/*
@@ -1818,18 +2188,20 @@ static struct protection_domain *domain_for_device(struct device *dev)
1818static int device_change_notifier(struct notifier_block *nb, 2188static int device_change_notifier(struct notifier_block *nb,
1819 unsigned long action, void *data) 2189 unsigned long action, void *data)
1820{ 2190{
1821 struct device *dev = data;
1822 u16 devid;
1823 struct protection_domain *domain;
1824 struct dma_ops_domain *dma_domain; 2191 struct dma_ops_domain *dma_domain;
2192 struct protection_domain *domain;
2193 struct iommu_dev_data *dev_data;
2194 struct device *dev = data;
1825 struct amd_iommu *iommu; 2195 struct amd_iommu *iommu;
1826 unsigned long flags; 2196 unsigned long flags;
2197 u16 devid;
1827 2198
1828 if (!check_device(dev)) 2199 if (!check_device(dev))
1829 return 0; 2200 return 0;
1830 2201
1831 devid = get_device_id(dev); 2202 devid = get_device_id(dev);
1832 iommu = amd_iommu_rlookup_table[devid]; 2203 iommu = amd_iommu_rlookup_table[devid];
2204 dev_data = get_dev_data(dev);
1833 2205
1834 switch (action) { 2206 switch (action) {
1835 case BUS_NOTIFY_UNBOUND_DRIVER: 2207 case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -1838,7 +2210,7 @@ static int device_change_notifier(struct notifier_block *nb,
1838 2210
1839 if (!domain) 2211 if (!domain)
1840 goto out; 2212 goto out;
1841 if (iommu_pass_through) 2213 if (dev_data->passthrough)
1842 break; 2214 break;
1843 detach_device(dev); 2215 detach_device(dev);
1844 break; 2216 break;
@@ -2434,8 +2806,9 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2434 */ 2806 */
2435static void prealloc_protection_domains(void) 2807static void prealloc_protection_domains(void)
2436{ 2808{
2437 struct pci_dev *dev = NULL; 2809 struct iommu_dev_data *dev_data;
2438 struct dma_ops_domain *dma_dom; 2810 struct dma_ops_domain *dma_dom;
2811 struct pci_dev *dev = NULL;
2439 u16 devid; 2812 u16 devid;
2440 2813
2441 for_each_pci_dev(dev) { 2814 for_each_pci_dev(dev) {
@@ -2444,6 +2817,16 @@ static void prealloc_protection_domains(void)
2444 if (!check_device(&dev->dev)) 2817 if (!check_device(&dev->dev))
2445 continue; 2818 continue;
2446 2819
2820 dev_data = get_dev_data(&dev->dev);
2821 if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
2822 /* Make sure passthrough domain is allocated */
2823 alloc_passthrough_domain();
2824 dev_data->passthrough = true;
2825 attach_device(&dev->dev, pt_domain);
2826 pr_info("AMD-Vi: Using passthough domain for device %s\n",
2827 dev_name(&dev->dev));
2828 }
2829
2447 /* Is there already any domain for it? */ 2830 /* Is there already any domain for it? */
2448 if (domain_for_device(&dev->dev)) 2831 if (domain_for_device(&dev->dev))
2449 continue; 2832 continue;
@@ -2474,6 +2857,7 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2474 2857
2475static unsigned device_dma_ops_init(void) 2858static unsigned device_dma_ops_init(void)
2476{ 2859{
2860 struct iommu_dev_data *dev_data;
2477 struct pci_dev *pdev = NULL; 2861 struct pci_dev *pdev = NULL;
2478 unsigned unhandled = 0; 2862 unsigned unhandled = 0;
2479 2863
@@ -2483,7 +2867,12 @@ static unsigned device_dma_ops_init(void)
2483 continue; 2867 continue;
2484 } 2868 }
2485 2869
2486 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; 2870 dev_data = get_dev_data(&pdev->dev);
2871
2872 if (!dev_data->passthrough)
2873 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2874 else
2875 pdev->dev.archdata.dma_ops = &nommu_dma_ops;
2487 } 2876 }
2488 2877
2489 return unhandled; 2878 return unhandled;
@@ -2610,6 +2999,20 @@ out_err:
2610 return NULL; 2999 return NULL;
2611} 3000}
2612 3001
3002static int __init alloc_passthrough_domain(void)
3003{
3004 if (pt_domain != NULL)
3005 return 0;
3006
3007 /* allocate passthrough domain */
3008 pt_domain = protection_domain_alloc();
3009 if (!pt_domain)
3010 return -ENOMEM;
3011
3012 pt_domain->mode = PAGE_MODE_NONE;
3013
3014 return 0;
3015}
2613static int amd_iommu_domain_init(struct iommu_domain *dom) 3016static int amd_iommu_domain_init(struct iommu_domain *dom)
2614{ 3017{
2615 struct protection_domain *domain; 3018 struct protection_domain *domain;
@@ -2623,6 +3026,8 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
2623 if (!domain->pt_root) 3026 if (!domain->pt_root)
2624 goto out_free; 3027 goto out_free;
2625 3028
3029 domain->iommu_domain = dom;
3030
2626 dom->priv = domain; 3031 dom->priv = domain;
2627 3032
2628 return 0; 3033 return 0;
@@ -2645,7 +3050,11 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2645 3050
2646 BUG_ON(domain->dev_cnt != 0); 3051 BUG_ON(domain->dev_cnt != 0);
2647 3052
2648 free_pagetable(domain); 3053 if (domain->mode != PAGE_MODE_NONE)
3054 free_pagetable(domain);
3055
3056 if (domain->flags & PD_IOMMUV2_MASK)
3057 free_gcr3_table(domain);
2649 3058
2650 protection_domain_free(domain); 3059 protection_domain_free(domain);
2651 3060
@@ -2702,13 +3111,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2702} 3111}
2703 3112
2704static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 3113static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2705 phys_addr_t paddr, int gfp_order, int iommu_prot) 3114 phys_addr_t paddr, size_t page_size, int iommu_prot)
2706{ 3115{
2707 unsigned long page_size = 0x1000UL << gfp_order;
2708 struct protection_domain *domain = dom->priv; 3116 struct protection_domain *domain = dom->priv;
2709 int prot = 0; 3117 int prot = 0;
2710 int ret; 3118 int ret;
2711 3119
3120 if (domain->mode == PAGE_MODE_NONE)
3121 return -EINVAL;
3122
2712 if (iommu_prot & IOMMU_READ) 3123 if (iommu_prot & IOMMU_READ)
2713 prot |= IOMMU_PROT_IR; 3124 prot |= IOMMU_PROT_IR;
2714 if (iommu_prot & IOMMU_WRITE) 3125 if (iommu_prot & IOMMU_WRITE)
@@ -2721,13 +3132,14 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2721 return ret; 3132 return ret;
2722} 3133}
2723 3134
2724static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, 3135static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2725 int gfp_order) 3136 size_t page_size)
2726{ 3137{
2727 struct protection_domain *domain = dom->priv; 3138 struct protection_domain *domain = dom->priv;
2728 unsigned long page_size, unmap_size; 3139 size_t unmap_size;
2729 3140
2730 page_size = 0x1000UL << gfp_order; 3141 if (domain->mode == PAGE_MODE_NONE)
3142 return -EINVAL;
2731 3143
2732 mutex_lock(&domain->api_lock); 3144 mutex_lock(&domain->api_lock);
2733 unmap_size = iommu_unmap_page(domain, iova, page_size); 3145 unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +3147,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2735 3147
2736 domain_flush_tlb_pde(domain); 3148 domain_flush_tlb_pde(domain);
2737 3149
2738 return get_order(unmap_size); 3150 return unmap_size;
2739} 3151}
2740 3152
2741static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 3153static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2746,6 +3158,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2746 phys_addr_t paddr; 3158 phys_addr_t paddr;
2747 u64 *pte, __pte; 3159 u64 *pte, __pte;
2748 3160
3161 if (domain->mode == PAGE_MODE_NONE)
3162 return iova;
3163
2749 pte = fetch_pte(domain, iova); 3164 pte = fetch_pte(domain, iova);
2750 3165
2751 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 3166 if (!pte || !IOMMU_PTE_PRESENT(*pte))
@@ -2782,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
2782 .unmap = amd_iommu_unmap, 3197 .unmap = amd_iommu_unmap,
2783 .iova_to_phys = amd_iommu_iova_to_phys, 3198 .iova_to_phys = amd_iommu_iova_to_phys,
2784 .domain_has_cap = amd_iommu_domain_has_cap, 3199 .domain_has_cap = amd_iommu_domain_has_cap,
3200 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
2785}; 3201};
2786 3202
2787/***************************************************************************** 3203/*****************************************************************************
@@ -2796,21 +3212,23 @@ static struct iommu_ops amd_iommu_ops = {
2796 3212
2797int __init amd_iommu_init_passthrough(void) 3213int __init amd_iommu_init_passthrough(void)
2798{ 3214{
2799 struct amd_iommu *iommu; 3215 struct iommu_dev_data *dev_data;
2800 struct pci_dev *dev = NULL; 3216 struct pci_dev *dev = NULL;
3217 struct amd_iommu *iommu;
2801 u16 devid; 3218 u16 devid;
3219 int ret;
2802 3220
2803 /* allocate passthrough domain */ 3221 ret = alloc_passthrough_domain();
2804 pt_domain = protection_domain_alloc(); 3222 if (ret)
2805 if (!pt_domain) 3223 return ret;
2806 return -ENOMEM;
2807
2808 pt_domain->mode |= PAGE_MODE_NONE;
2809 3224
2810 for_each_pci_dev(dev) { 3225 for_each_pci_dev(dev) {
2811 if (!check_device(&dev->dev)) 3226 if (!check_device(&dev->dev))
2812 continue; 3227 continue;
2813 3228
3229 dev_data = get_dev_data(&dev->dev);
3230 dev_data->passthrough = true;
3231
2814 devid = get_device_id(&dev->dev); 3232 devid = get_device_id(&dev->dev);
2815 3233
2816 iommu = amd_iommu_rlookup_table[devid]; 3234 iommu = amd_iommu_rlookup_table[devid];
@@ -2824,3 +3242,326 @@ int __init amd_iommu_init_passthrough(void)
2824 3242
2825 return 0; 3243 return 0;
2826} 3244}
3245
3246/* IOMMUv2 specific functions */
3247int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3248{
3249 return atomic_notifier_chain_register(&ppr_notifier, nb);
3250}
3251EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3252
3253int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3254{
3255 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3256}
3257EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3258
3259void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3260{
3261 struct protection_domain *domain = dom->priv;
3262 unsigned long flags;
3263
3264 spin_lock_irqsave(&domain->lock, flags);
3265
3266 /* Update data structure */
3267 domain->mode = PAGE_MODE_NONE;
3268 domain->updated = true;
3269
3270 /* Make changes visible to IOMMUs */
3271 update_domain(domain);
3272
3273 /* Page-table is not visible to IOMMU anymore, so free it */
3274 free_pagetable(domain);
3275
3276 spin_unlock_irqrestore(&domain->lock, flags);
3277}
3278EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3279
3280int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3281{
3282 struct protection_domain *domain = dom->priv;
3283 unsigned long flags;
3284 int levels, ret;
3285
3286 if (pasids <= 0 || pasids > (PASID_MASK + 1))
3287 return -EINVAL;
3288
3289 /* Number of GCR3 table levels required */
3290 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3291 levels += 1;
3292
3293 if (levels > amd_iommu_max_glx_val)
3294 return -EINVAL;
3295
3296 spin_lock_irqsave(&domain->lock, flags);
3297
3298 /*
3299 * Save us all sanity checks whether devices already in the
3300 * domain support IOMMUv2. Just force that the domain has no
3301 * devices attached when it is switched into IOMMUv2 mode.
3302 */
3303 ret = -EBUSY;
3304 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3305 goto out;
3306
3307 ret = -ENOMEM;
3308 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3309 if (domain->gcr3_tbl == NULL)
3310 goto out;
3311
3312 domain->glx = levels;
3313 domain->flags |= PD_IOMMUV2_MASK;
3314 domain->updated = true;
3315
3316 update_domain(domain);
3317
3318 ret = 0;
3319
3320out:
3321 spin_unlock_irqrestore(&domain->lock, flags);
3322
3323 return ret;
3324}
3325EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3326
3327static int __flush_pasid(struct protection_domain *domain, int pasid,
3328 u64 address, bool size)
3329{
3330 struct iommu_dev_data *dev_data;
3331 struct iommu_cmd cmd;
3332 int i, ret;
3333
3334 if (!(domain->flags & PD_IOMMUV2_MASK))
3335 return -EINVAL;
3336
3337 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3338
3339 /*
3340 * IOMMU TLB needs to be flushed before Device TLB to
3341 * prevent device TLB refill from IOMMU TLB
3342 */
3343 for (i = 0; i < amd_iommus_present; ++i) {
3344 if (domain->dev_iommu[i] == 0)
3345 continue;
3346
3347 ret = iommu_queue_command(amd_iommus[i], &cmd);
3348 if (ret != 0)
3349 goto out;
3350 }
3351
3352 /* Wait until IOMMU TLB flushes are complete */
3353 domain_flush_complete(domain);
3354
3355 /* Now flush device TLBs */
3356 list_for_each_entry(dev_data, &domain->dev_list, list) {
3357 struct amd_iommu *iommu;
3358 int qdep;
3359
3360 BUG_ON(!dev_data->ats.enabled);
3361
3362 qdep = dev_data->ats.qdep;
3363 iommu = amd_iommu_rlookup_table[dev_data->devid];
3364
3365 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3366 qdep, address, size);
3367
3368 ret = iommu_queue_command(iommu, &cmd);
3369 if (ret != 0)
3370 goto out;
3371 }
3372
3373 /* Wait until all device TLBs are flushed */
3374 domain_flush_complete(domain);
3375
3376 ret = 0;
3377
3378out:
3379
3380 return ret;
3381}
3382
3383static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3384 u64 address)
3385{
3386 INC_STATS_COUNTER(invalidate_iotlb);
3387
3388 return __flush_pasid(domain, pasid, address, false);
3389}
3390
3391int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3392 u64 address)
3393{
3394 struct protection_domain *domain = dom->priv;
3395 unsigned long flags;
3396 int ret;
3397
3398 spin_lock_irqsave(&domain->lock, flags);
3399 ret = __amd_iommu_flush_page(domain, pasid, address);
3400 spin_unlock_irqrestore(&domain->lock, flags);
3401
3402 return ret;
3403}
3404EXPORT_SYMBOL(amd_iommu_flush_page);
3405
3406static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3407{
3408 INC_STATS_COUNTER(invalidate_iotlb_all);
3409
3410 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3411 true);
3412}
3413
3414int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3415{
3416 struct protection_domain *domain = dom->priv;
3417 unsigned long flags;
3418 int ret;
3419
3420 spin_lock_irqsave(&domain->lock, flags);
3421 ret = __amd_iommu_flush_tlb(domain, pasid);
3422 spin_unlock_irqrestore(&domain->lock, flags);
3423
3424 return ret;
3425}
3426EXPORT_SYMBOL(amd_iommu_flush_tlb);
3427
3428static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3429{
3430 int index;
3431 u64 *pte;
3432
3433 while (true) {
3434
3435 index = (pasid >> (9 * level)) & 0x1ff;
3436 pte = &root[index];
3437
3438 if (level == 0)
3439 break;
3440
3441 if (!(*pte & GCR3_VALID)) {
3442 if (!alloc)
3443 return NULL;
3444
3445 root = (void *)get_zeroed_page(GFP_ATOMIC);
3446 if (root == NULL)
3447 return NULL;
3448
3449 *pte = __pa(root) | GCR3_VALID;
3450 }
3451
3452 root = __va(*pte & PAGE_MASK);
3453
3454 level -= 1;
3455 }
3456
3457 return pte;
3458}
3459
3460static int __set_gcr3(struct protection_domain *domain, int pasid,
3461 unsigned long cr3)
3462{
3463 u64 *pte;
3464
3465 if (domain->mode != PAGE_MODE_NONE)
3466 return -EINVAL;
3467
3468 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3469 if (pte == NULL)
3470 return -ENOMEM;
3471
3472 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3473
3474 return __amd_iommu_flush_tlb(domain, pasid);
3475}
3476
3477static int __clear_gcr3(struct protection_domain *domain, int pasid)
3478{
3479 u64 *pte;
3480
3481 if (domain->mode != PAGE_MODE_NONE)
3482 return -EINVAL;
3483
3484 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3485 if (pte == NULL)
3486 return 0;
3487
3488 *pte = 0;
3489
3490 return __amd_iommu_flush_tlb(domain, pasid);
3491}
3492
3493int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3494 unsigned long cr3)
3495{
3496 struct protection_domain *domain = dom->priv;
3497 unsigned long flags;
3498 int ret;
3499
3500 spin_lock_irqsave(&domain->lock, flags);
3501 ret = __set_gcr3(domain, pasid, cr3);
3502 spin_unlock_irqrestore(&domain->lock, flags);
3503
3504 return ret;
3505}
3506EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3507
3508int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3509{
3510 struct protection_domain *domain = dom->priv;
3511 unsigned long flags;
3512 int ret;
3513
3514 spin_lock_irqsave(&domain->lock, flags);
3515 ret = __clear_gcr3(domain, pasid);
3516 spin_unlock_irqrestore(&domain->lock, flags);
3517
3518 return ret;
3519}
3520EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3521
3522int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3523 int status, int tag)
3524{
3525 struct iommu_dev_data *dev_data;
3526 struct amd_iommu *iommu;
3527 struct iommu_cmd cmd;
3528
3529 INC_STATS_COUNTER(complete_ppr);
3530
3531 dev_data = get_dev_data(&pdev->dev);
3532 iommu = amd_iommu_rlookup_table[dev_data->devid];
3533
3534 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3535 tag, dev_data->pri_tlp);
3536
3537 return iommu_queue_command(iommu, &cmd);
3538}
3539EXPORT_SYMBOL(amd_iommu_complete_ppr);
3540
3541struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3542{
3543 struct protection_domain *domain;
3544
3545 domain = get_domain(&pdev->dev);
3546 if (IS_ERR(domain))
3547 return NULL;
3548
3549 /* Only return IOMMUv2 domains */
3550 if (!(domain->flags & PD_IOMMUV2_MASK))
3551 return NULL;
3552
3553 return domain->iommu_domain;
3554}
3555EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3556
3557void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3558{
3559 struct iommu_dev_data *dev_data;
3560
3561 if (!amd_iommu_v2_supported())
3562 return;
3563
3564 dev_data = get_dev_data(&pdev->dev);
3565 dev_data->errata |= (1 << erratum);
3566}
3567EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 82d2410f4205..c7a5d7e14547 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <linux/amd-iommu.h> 27#include <linux/amd-iommu.h>
28#include <linux/export.h>
28#include <asm/pci-direct.h> 29#include <asm/pci-direct.h>
29#include <asm/iommu.h> 30#include <asm/iommu.h>
30#include <asm/gart.h> 31#include <asm/gart.h>
@@ -141,6 +142,12 @@ int amd_iommus_present;
141bool amd_iommu_np_cache __read_mostly; 142bool amd_iommu_np_cache __read_mostly;
142bool amd_iommu_iotlb_sup __read_mostly = true; 143bool amd_iommu_iotlb_sup __read_mostly = true;
143 144
145u32 amd_iommu_max_pasids __read_mostly = ~0;
146
147bool amd_iommu_v2_present __read_mostly;
148
149bool amd_iommu_force_isolation __read_mostly;
150
144/* 151/*
145 * The ACPI table parsing functions set this variable on an error 152 * The ACPI table parsing functions set this variable on an error
146 */ 153 */
@@ -581,21 +588,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
581 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 588 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
582} 589}
583 590
591/* allocates the memory where the IOMMU will log its events to */
592static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
593{
594 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
595 get_order(PPR_LOG_SIZE));
596
597 if (iommu->ppr_log == NULL)
598 return NULL;
599
600 return iommu->ppr_log;
601}
602
603static void iommu_enable_ppr_log(struct amd_iommu *iommu)
604{
605 u64 entry;
606
607 if (iommu->ppr_log == NULL)
608 return;
609
610 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
611
612 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
613 &entry, sizeof(entry));
614
615 /* set head and tail to zero manually */
616 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
617 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
618
619 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
620 iommu_feature_enable(iommu, CONTROL_PPR_EN);
621}
622
623static void __init free_ppr_log(struct amd_iommu *iommu)
624{
625 if (iommu->ppr_log == NULL)
626 return;
627
628 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
629}
630
631static void iommu_enable_gt(struct amd_iommu *iommu)
632{
633 if (!iommu_feature(iommu, FEATURE_GT))
634 return;
635
636 iommu_feature_enable(iommu, CONTROL_GT_EN);
637}
638
584/* sets a specific bit in the device table entry. */ 639/* sets a specific bit in the device table entry. */
585static void set_dev_entry_bit(u16 devid, u8 bit) 640static void set_dev_entry_bit(u16 devid, u8 bit)
586{ 641{
587 int i = (bit >> 5) & 0x07; 642 int i = (bit >> 6) & 0x03;
588 int _bit = bit & 0x1f; 643 int _bit = bit & 0x3f;
589 644
590 amd_iommu_dev_table[devid].data[i] |= (1 << _bit); 645 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
591} 646}
592 647
593static int get_dev_entry_bit(u16 devid, u8 bit) 648static int get_dev_entry_bit(u16 devid, u8 bit)
594{ 649{
595 int i = (bit >> 5) & 0x07; 650 int i = (bit >> 6) & 0x03;
596 int _bit = bit & 0x1f; 651 int _bit = bit & 0x3f;
597 652
598 return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; 653 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
599} 654}
600 655
601 656
@@ -699,6 +754,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
699 754
700 iommu->features = ((u64)high << 32) | low; 755 iommu->features = ((u64)high << 32) | low;
701 756
757 if (iommu_feature(iommu, FEATURE_GT)) {
758 int glxval;
759 u32 pasids;
760 u64 shift;
761
762 shift = iommu->features & FEATURE_PASID_MASK;
763 shift >>= FEATURE_PASID_SHIFT;
764 pasids = (1 << shift);
765
766 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
767
768 glxval = iommu->features & FEATURE_GLXVAL_MASK;
769 glxval >>= FEATURE_GLXVAL_SHIFT;
770
771 if (amd_iommu_max_glx_val == -1)
772 amd_iommu_max_glx_val = glxval;
773 else
774 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
775 }
776
777 if (iommu_feature(iommu, FEATURE_GT) &&
778 iommu_feature(iommu, FEATURE_PPR)) {
779 iommu->is_iommu_v2 = true;
780 amd_iommu_v2_present = true;
781 }
782
702 if (!is_rd890_iommu(iommu->dev)) 783 if (!is_rd890_iommu(iommu->dev))
703 return; 784 return;
704 785
@@ -901,6 +982,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
901{ 982{
902 free_command_buffer(iommu); 983 free_command_buffer(iommu);
903 free_event_buffer(iommu); 984 free_event_buffer(iommu);
985 free_ppr_log(iommu);
904 iommu_unmap_mmio_space(iommu); 986 iommu_unmap_mmio_space(iommu);
905} 987}
906 988
@@ -964,6 +1046,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
964 init_iommu_from_acpi(iommu, h); 1046 init_iommu_from_acpi(iommu, h);
965 init_iommu_devices(iommu); 1047 init_iommu_devices(iommu);
966 1048
1049 if (iommu_feature(iommu, FEATURE_PPR)) {
1050 iommu->ppr_log = alloc_ppr_log(iommu);
1051 if (!iommu->ppr_log)
1052 return -ENOMEM;
1053 }
1054
967 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1055 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
968 amd_iommu_np_cache = true; 1056 amd_iommu_np_cache = true;
969 1057
@@ -1050,6 +1138,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
1050 iommu->int_enabled = true; 1138 iommu->int_enabled = true;
1051 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 1139 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1052 1140
1141 if (iommu->ppr_log != NULL)
1142 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1143
1053 return 0; 1144 return 0;
1054} 1145}
1055 1146
@@ -1274,6 +1365,8 @@ static void enable_iommus(void)
1274 iommu_set_device_table(iommu); 1365 iommu_set_device_table(iommu);
1275 iommu_enable_command_buffer(iommu); 1366 iommu_enable_command_buffer(iommu);
1276 iommu_enable_event_buffer(iommu); 1367 iommu_enable_event_buffer(iommu);
1368 iommu_enable_ppr_log(iommu);
1369 iommu_enable_gt(iommu);
1277 iommu_set_exclusion_range(iommu); 1370 iommu_set_exclusion_range(iommu);
1278 iommu_init_msi(iommu); 1371 iommu_init_msi(iommu);
1279 iommu_enable(iommu); 1372 iommu_enable(iommu);
@@ -1560,6 +1653,8 @@ static int __init parse_amd_iommu_options(char *str)
1560 amd_iommu_unmap_flush = true; 1653 amd_iommu_unmap_flush = true;
1561 if (strncmp(str, "off", 3) == 0) 1654 if (strncmp(str, "off", 3) == 0)
1562 amd_iommu_disabled = true; 1655 amd_iommu_disabled = true;
1656 if (strncmp(str, "force_isolation", 15) == 0)
1657 amd_iommu_force_isolation = true;
1563 } 1658 }
1564 1659
1565 return 1; 1660 return 1;
@@ -1572,3 +1667,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect,
1572 gart_iommu_hole_init, 1667 gart_iommu_hole_init,
1573 0, 1668 0,
1574 0); 1669 0);
1670
1671bool amd_iommu_v2_supported(void)
1672{
1673 return amd_iommu_v2_present;
1674}
1675EXPORT_SYMBOL(amd_iommu_v2_supported);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 7ffaa64410b0..1a7f41c6cc66 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void);
31extern void amd_iommu_uninit_devices(void); 31extern void amd_iommu_uninit_devices(void);
32extern void amd_iommu_init_notifier(void); 32extern void amd_iommu_init_notifier(void);
33extern void amd_iommu_init_api(void); 33extern void amd_iommu_init_api(void);
34
35/* IOMMUv2 specific functions */
36struct iommu_domain;
37
38extern bool amd_iommu_v2_supported(void);
39extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
40extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
41extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
42extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
43extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
44 u64 address);
45extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
46extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
47 unsigned long cr3);
48extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
49extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
50
51#define PPR_SUCCESS 0x0
52#define PPR_INVALID 0x1
53#define PPR_FAILURE 0xf
54
55extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
56 int status, int tag);
57
34#ifndef CONFIG_AMD_IOMMU_STATS 58#ifndef CONFIG_AMD_IOMMU_STATS
35 59
36static inline void amd_iommu_stats_init(void) { } 60static inline void amd_iommu_stats_init(void) { }
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 5b9c5075e81a..6ad8b10b3130 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -69,11 +69,14 @@
69#define MMIO_EXCL_BASE_OFFSET 0x0020 69#define MMIO_EXCL_BASE_OFFSET 0x0020
70#define MMIO_EXCL_LIMIT_OFFSET 0x0028 70#define MMIO_EXCL_LIMIT_OFFSET 0x0028
71#define MMIO_EXT_FEATURES 0x0030 71#define MMIO_EXT_FEATURES 0x0030
72#define MMIO_PPR_LOG_OFFSET 0x0038
72#define MMIO_CMD_HEAD_OFFSET 0x2000 73#define MMIO_CMD_HEAD_OFFSET 0x2000
73#define MMIO_CMD_TAIL_OFFSET 0x2008 74#define MMIO_CMD_TAIL_OFFSET 0x2008
74#define MMIO_EVT_HEAD_OFFSET 0x2010 75#define MMIO_EVT_HEAD_OFFSET 0x2010
75#define MMIO_EVT_TAIL_OFFSET 0x2018 76#define MMIO_EVT_TAIL_OFFSET 0x2018
76#define MMIO_STATUS_OFFSET 0x2020 77#define MMIO_STATUS_OFFSET 0x2020
78#define MMIO_PPR_HEAD_OFFSET 0x2030
79#define MMIO_PPR_TAIL_OFFSET 0x2038
77 80
78 81
79/* Extended Feature Bits */ 82/* Extended Feature Bits */
@@ -87,8 +90,17 @@
87#define FEATURE_HE (1ULL<<8) 90#define FEATURE_HE (1ULL<<8)
88#define FEATURE_PC (1ULL<<9) 91#define FEATURE_PC (1ULL<<9)
89 92
93#define FEATURE_PASID_SHIFT 32
94#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
95
96#define FEATURE_GLXVAL_SHIFT 14
97#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
98
99#define PASID_MASK 0x000fffff
100
90/* MMIO status bits */ 101/* MMIO status bits */
91#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 102#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
103#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
92 104
93/* event logging constants */ 105/* event logging constants */
94#define EVENT_ENTRY_SIZE 0x10 106#define EVENT_ENTRY_SIZE 0x10
@@ -122,18 +134,25 @@
122#define CONTROL_CMDBUF_EN 0x0cULL 134#define CONTROL_CMDBUF_EN 0x0cULL
123#define CONTROL_PPFLOG_EN 0x0dULL 135#define CONTROL_PPFLOG_EN 0x0dULL
124#define CONTROL_PPFINT_EN 0x0eULL 136#define CONTROL_PPFINT_EN 0x0eULL
137#define CONTROL_PPR_EN 0x0fULL
138#define CONTROL_GT_EN 0x10ULL
125 139
126/* command specific defines */ 140/* command specific defines */
127#define CMD_COMPL_WAIT 0x01 141#define CMD_COMPL_WAIT 0x01
128#define CMD_INV_DEV_ENTRY 0x02 142#define CMD_INV_DEV_ENTRY 0x02
129#define CMD_INV_IOMMU_PAGES 0x03 143#define CMD_INV_IOMMU_PAGES 0x03
130#define CMD_INV_IOTLB_PAGES 0x04 144#define CMD_INV_IOTLB_PAGES 0x04
145#define CMD_COMPLETE_PPR 0x07
131#define CMD_INV_ALL 0x08 146#define CMD_INV_ALL 0x08
132 147
133#define CMD_COMPL_WAIT_STORE_MASK 0x01 148#define CMD_COMPL_WAIT_STORE_MASK 0x01
134#define CMD_COMPL_WAIT_INT_MASK 0x02 149#define CMD_COMPL_WAIT_INT_MASK 0x02
135#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 150#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
136#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 151#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
152#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
153
154#define PPR_STATUS_MASK 0xf
155#define PPR_STATUS_SHIFT 12
137 156
138#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 157#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
139 158
@@ -165,6 +184,23 @@
165#define EVT_BUFFER_SIZE 8192 /* 512 entries */ 184#define EVT_BUFFER_SIZE 8192 /* 512 entries */
166#define EVT_LEN_MASK (0x9ULL << 56) 185#define EVT_LEN_MASK (0x9ULL << 56)
167 186
187/* Constants for PPR Log handling */
188#define PPR_LOG_ENTRIES 512
189#define PPR_LOG_SIZE_SHIFT 56
190#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
191#define PPR_ENTRY_SIZE 16
192#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
193
194#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
195#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
196#define PPR_DEVID(x) ((x) & 0xffffULL)
197#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
198#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
199#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
200#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
201
202#define PPR_REQ_FAULT 0x01
203
168#define PAGE_MODE_NONE 0x00 204#define PAGE_MODE_NONE 0x00
169#define PAGE_MODE_1_LEVEL 0x01 205#define PAGE_MODE_1_LEVEL 0x01
170#define PAGE_MODE_2_LEVEL 0x02 206#define PAGE_MODE_2_LEVEL 0x02
@@ -230,7 +266,24 @@
230#define IOMMU_PTE_IR (1ULL << 61) 266#define IOMMU_PTE_IR (1ULL << 61)
231#define IOMMU_PTE_IW (1ULL << 62) 267#define IOMMU_PTE_IW (1ULL << 62)
232 268
233#define DTE_FLAG_IOTLB 0x01 269#define DTE_FLAG_IOTLB (0x01UL << 32)
270#define DTE_FLAG_GV (0x01ULL << 55)
271#define DTE_GLX_SHIFT (56)
272#define DTE_GLX_MASK (3)
273
274#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
275#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
276#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
277
278#define DTE_GCR3_INDEX_A 0
279#define DTE_GCR3_INDEX_B 1
280#define DTE_GCR3_INDEX_C 1
281
282#define DTE_GCR3_SHIFT_A 58
283#define DTE_GCR3_SHIFT_B 16
284#define DTE_GCR3_SHIFT_C 43
285
286#define GCR3_VALID 0x01ULL
234 287
235#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 288#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
236#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 289#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
@@ -257,6 +310,7 @@
257 domain for an IOMMU */ 310 domain for an IOMMU */
258#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 311#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
259 translation */ 312 translation */
313#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
260 314
261extern bool amd_iommu_dump; 315extern bool amd_iommu_dump;
262#define DUMP_printk(format, arg...) \ 316#define DUMP_printk(format, arg...) \
@@ -285,6 +339,29 @@ extern bool amd_iommu_iotlb_sup;
285#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 339#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
286#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 340#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
287 341
342
343/*
344 * This struct is used to pass information about
345 * incoming PPR faults around.
346 */
347struct amd_iommu_fault {
348 u64 address; /* IO virtual address of the fault*/
349 u32 pasid; /* Address space identifier */
350 u16 device_id; /* Originating PCI device id */
351 u16 tag; /* PPR tag */
352 u16 flags; /* Fault flags */
353
354};
355
356#define PPR_FAULT_EXEC (1 << 1)
357#define PPR_FAULT_READ (1 << 2)
358#define PPR_FAULT_WRITE (1 << 5)
359#define PPR_FAULT_USER (1 << 6)
360#define PPR_FAULT_RSVD (1 << 7)
361#define PPR_FAULT_GN (1 << 8)
362
363struct iommu_domain;
364
288/* 365/*
289 * This structure contains generic data for IOMMU protection domains 366 * This structure contains generic data for IOMMU protection domains
290 * independent of their use. 367 * independent of their use.
@@ -297,11 +374,15 @@ struct protection_domain {
297 u16 id; /* the domain id written to the device table */ 374 u16 id; /* the domain id written to the device table */
298 int mode; /* paging mode (0-6 levels) */ 375 int mode; /* paging mode (0-6 levels) */
299 u64 *pt_root; /* page table root pointer */ 376 u64 *pt_root; /* page table root pointer */
377 int glx; /* Number of levels for GCR3 table */
378 u64 *gcr3_tbl; /* Guest CR3 table */
300 unsigned long flags; /* flags to find out type of domain */ 379 unsigned long flags; /* flags to find out type of domain */
301 bool updated; /* complete domain flush required */ 380 bool updated; /* complete domain flush required */
302 unsigned dev_cnt; /* devices assigned to this domain */ 381 unsigned dev_cnt; /* devices assigned to this domain */
303 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 382 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
304 void *priv; /* private data */ 383 void *priv; /* private data */
384 struct iommu_domain *iommu_domain; /* Pointer to generic
385 domain structure */
305 386
306}; 387};
307 388
@@ -315,10 +396,15 @@ struct iommu_dev_data {
315 struct protection_domain *domain; /* Domain the device is bound to */ 396 struct protection_domain *domain; /* Domain the device is bound to */
316 atomic_t bind; /* Domain attach reverent count */ 397 atomic_t bind; /* Domain attach reverent count */
317 u16 devid; /* PCI Device ID */ 398 u16 devid; /* PCI Device ID */
399 bool iommu_v2; /* Device can make use of IOMMUv2 */
400 bool passthrough; /* Default for device is pt_domain */
318 struct { 401 struct {
319 bool enabled; 402 bool enabled;
320 int qdep; 403 int qdep;
321 } ats; /* ATS state */ 404 } ats; /* ATS state */
405 bool pri_tlp; /* PASID TLB required for
406 PPR completions */
407 u32 errata; /* Bitmap for errata to apply */
322}; 408};
323 409
324/* 410/*
@@ -399,6 +485,9 @@ struct amd_iommu {
399 /* Extended features */ 485 /* Extended features */
400 u64 features; 486 u64 features;
401 487
488 /* IOMMUv2 */
489 bool is_iommu_v2;
490
402 /* 491 /*
403 * Capability pointer. There could be more than one IOMMU per PCI 492 * Capability pointer. There could be more than one IOMMU per PCI
404 * device function if there are more than one AMD IOMMU capability 493 * device function if there are more than one AMD IOMMU capability
@@ -431,6 +520,9 @@ struct amd_iommu {
431 /* MSI number for event interrupt */ 520 /* MSI number for event interrupt */
432 u16 evt_msi_num; 521 u16 evt_msi_num;
433 522
523 /* Base of the PPR log, if present */
524 u8 *ppr_log;
525
434 /* true if interrupts for this IOMMU are already enabled */ 526 /* true if interrupts for this IOMMU are already enabled */
435 bool int_enabled; 527 bool int_enabled;
436 528
@@ -484,7 +576,7 @@ extern struct list_head amd_iommu_pd_list;
484 * Structure defining one entry in the device table 576 * Structure defining one entry in the device table
485 */ 577 */
486struct dev_table_entry { 578struct dev_table_entry {
487 u32 data[8]; 579 u64 data[4];
488}; 580};
489 581
490/* 582/*
@@ -549,6 +641,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
549 */ 641 */
550extern bool amd_iommu_unmap_flush; 642extern bool amd_iommu_unmap_flush;
551 643
644/* Smallest number of PASIDs supported by any IOMMU in the system */
645extern u32 amd_iommu_max_pasids;
646
647extern bool amd_iommu_v2_present;
648
649extern bool amd_iommu_force_isolation;
650
651/* Max levels of glxval supported */
652extern int amd_iommu_max_glx_val;
653
552/* takes bus and device/function and returns the device id 654/* takes bus and device/function and returns the device id
553 * FIXME: should that be in generic PCI code? */ 655 * FIXME: should that be in generic PCI code? */
554static inline u16 calc_devid(u8 bus, u8 devfn) 656static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
new file mode 100644
index 000000000000..fe812e2a0474
--- /dev/null
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -0,0 +1,959 @@
1/*
2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/mmu_notifier.h>
20#include <linux/amd-iommu.h>
21#include <linux/mm_types.h>
22#include <linux/profile.h>
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/iommu.h>
26#include <linux/wait.h>
27#include <linux/pci.h>
28#include <linux/gfp.h>
29
30#include "amd_iommu_types.h"
31#include "amd_iommu_proto.h"
32
33MODULE_LICENSE("GPL v2");
34MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
35
36#define MAX_DEVICES 0x10000
37#define PRI_QUEUE_SIZE 512
38
39struct pri_queue {
40 atomic_t inflight;
41 bool finish;
42 int status;
43};
44
45struct pasid_state {
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 struct task_struct *task; /* Task bound to this PASID */
49 struct mm_struct *mm; /* mm_struct for the faults */
50 struct mmu_notifier mn; /* mmu_otifier handle */
51 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
52 struct device_state *device_state; /* Link to our device_state */
53 int pasid; /* PASID index */
54 spinlock_t lock; /* Protect pri_queues */
55 wait_queue_head_t wq; /* To wait for count == 0 */
56};
57
58struct device_state {
59 atomic_t count;
60 struct pci_dev *pdev;
61 struct pasid_state **states;
62 struct iommu_domain *domain;
63 int pasid_levels;
64 int max_pasids;
65 amd_iommu_invalid_ppr_cb inv_ppr_cb;
66 spinlock_t lock;
67 wait_queue_head_t wq;
68};
69
70struct fault {
71 struct work_struct work;
72 struct device_state *dev_state;
73 struct pasid_state *state;
74 struct mm_struct *mm;
75 u64 address;
76 u16 devid;
77 u16 pasid;
78 u16 tag;
79 u16 finish;
80 u16 flags;
81};
82
83struct device_state **state_table;
84static spinlock_t state_lock;
85
86/* List and lock for all pasid_states */
87static LIST_HEAD(pasid_state_list);
88static DEFINE_SPINLOCK(ps_lock);
89
90static struct workqueue_struct *iommu_wq;
91
92/*
93 * Empty page table - Used between
94 * mmu_notifier_invalidate_range_start and
95 * mmu_notifier_invalidate_range_end
96 */
97static u64 *empty_page_table;
98
99static void free_pasid_states(struct device_state *dev_state);
100static void unbind_pasid(struct device_state *dev_state, int pasid);
101static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
102
103static u16 device_id(struct pci_dev *pdev)
104{
105 u16 devid;
106
107 devid = pdev->bus->number;
108 devid = (devid << 8) | pdev->devfn;
109
110 return devid;
111}
112
113static struct device_state *get_device_state(u16 devid)
114{
115 struct device_state *dev_state;
116 unsigned long flags;
117
118 spin_lock_irqsave(&state_lock, flags);
119 dev_state = state_table[devid];
120 if (dev_state != NULL)
121 atomic_inc(&dev_state->count);
122 spin_unlock_irqrestore(&state_lock, flags);
123
124 return dev_state;
125}
126
127static void free_device_state(struct device_state *dev_state)
128{
129 /*
130 * First detach device from domain - No more PRI requests will arrive
131 * from that device after it is unbound from the IOMMUv2 domain.
132 */
133 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
134
135 /* Everything is down now, free the IOMMUv2 domain */
136 iommu_domain_free(dev_state->domain);
137
138 /* Finally get rid of the device-state */
139 kfree(dev_state);
140}
141
142static void put_device_state(struct device_state *dev_state)
143{
144 if (atomic_dec_and_test(&dev_state->count))
145 wake_up(&dev_state->wq);
146}
147
148static void put_device_state_wait(struct device_state *dev_state)
149{
150 DEFINE_WAIT(wait);
151
152 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
153 if (!atomic_dec_and_test(&dev_state->count))
154 schedule();
155 finish_wait(&dev_state->wq, &wait);
156
157 free_device_state(dev_state);
158}
159
160static struct notifier_block profile_nb = {
161 .notifier_call = task_exit,
162};
163
164static void link_pasid_state(struct pasid_state *pasid_state)
165{
166 spin_lock(&ps_lock);
167 list_add_tail(&pasid_state->list, &pasid_state_list);
168 spin_unlock(&ps_lock);
169}
170
171static void __unlink_pasid_state(struct pasid_state *pasid_state)
172{
173 list_del(&pasid_state->list);
174}
175
176static void unlink_pasid_state(struct pasid_state *pasid_state)
177{
178 spin_lock(&ps_lock);
179 __unlink_pasid_state(pasid_state);
180 spin_unlock(&ps_lock);
181}
182
183/* Must be called under dev_state->lock */
184static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
185 int pasid, bool alloc)
186{
187 struct pasid_state **root, **ptr;
188 int level, index;
189
190 level = dev_state->pasid_levels;
191 root = dev_state->states;
192
193 while (true) {
194
195 index = (pasid >> (9 * level)) & 0x1ff;
196 ptr = &root[index];
197
198 if (level == 0)
199 break;
200
201 if (*ptr == NULL) {
202 if (!alloc)
203 return NULL;
204
205 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
206 if (*ptr == NULL)
207 return NULL;
208 }
209
210 root = (struct pasid_state **)*ptr;
211 level -= 1;
212 }
213
214 return ptr;
215}
216
217static int set_pasid_state(struct device_state *dev_state,
218 struct pasid_state *pasid_state,
219 int pasid)
220{
221 struct pasid_state **ptr;
222 unsigned long flags;
223 int ret;
224
225 spin_lock_irqsave(&dev_state->lock, flags);
226 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
227
228 ret = -ENOMEM;
229 if (ptr == NULL)
230 goto out_unlock;
231
232 ret = -ENOMEM;
233 if (*ptr != NULL)
234 goto out_unlock;
235
236 *ptr = pasid_state;
237
238 ret = 0;
239
240out_unlock:
241 spin_unlock_irqrestore(&dev_state->lock, flags);
242
243 return ret;
244}
245
246static void clear_pasid_state(struct device_state *dev_state, int pasid)
247{
248 struct pasid_state **ptr;
249 unsigned long flags;
250
251 spin_lock_irqsave(&dev_state->lock, flags);
252 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
253
254 if (ptr == NULL)
255 goto out_unlock;
256
257 *ptr = NULL;
258
259out_unlock:
260 spin_unlock_irqrestore(&dev_state->lock, flags);
261}
262
263static struct pasid_state *get_pasid_state(struct device_state *dev_state,
264 int pasid)
265{
266 struct pasid_state **ptr, *ret = NULL;
267 unsigned long flags;
268
269 spin_lock_irqsave(&dev_state->lock, flags);
270 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
271
272 if (ptr == NULL)
273 goto out_unlock;
274
275 ret = *ptr;
276 if (ret)
277 atomic_inc(&ret->count);
278
279out_unlock:
280 spin_unlock_irqrestore(&dev_state->lock, flags);
281
282 return ret;
283}
284
285static void free_pasid_state(struct pasid_state *pasid_state)
286{
287 kfree(pasid_state);
288}
289
290static void put_pasid_state(struct pasid_state *pasid_state)
291{
292 if (atomic_dec_and_test(&pasid_state->count)) {
293 put_device_state(pasid_state->device_state);
294 wake_up(&pasid_state->wq);
295 }
296}
297
298static void put_pasid_state_wait(struct pasid_state *pasid_state)
299{
300 DEFINE_WAIT(wait);
301
302 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
303
304 if (atomic_dec_and_test(&pasid_state->count))
305 put_device_state(pasid_state->device_state);
306 else
307 schedule();
308
309 finish_wait(&pasid_state->wq, &wait);
310 mmput(pasid_state->mm);
311 free_pasid_state(pasid_state);
312}
313
314static void __unbind_pasid(struct pasid_state *pasid_state)
315{
316 struct iommu_domain *domain;
317
318 domain = pasid_state->device_state->domain;
319
320 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
321 clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
322
323 /* Make sure no more pending faults are in the queue */
324 flush_workqueue(iommu_wq);
325
326 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
327
328 put_pasid_state(pasid_state); /* Reference taken in bind() function */
329}
330
331static void unbind_pasid(struct device_state *dev_state, int pasid)
332{
333 struct pasid_state *pasid_state;
334
335 pasid_state = get_pasid_state(dev_state, pasid);
336 if (pasid_state == NULL)
337 return;
338
339 unlink_pasid_state(pasid_state);
340 __unbind_pasid(pasid_state);
341 put_pasid_state_wait(pasid_state); /* Reference taken in this function */
342}
343
344static void free_pasid_states_level1(struct pasid_state **tbl)
345{
346 int i;
347
348 for (i = 0; i < 512; ++i) {
349 if (tbl[i] == NULL)
350 continue;
351
352 free_page((unsigned long)tbl[i]);
353 }
354}
355
356static void free_pasid_states_level2(struct pasid_state **tbl)
357{
358 struct pasid_state **ptr;
359 int i;
360
361 for (i = 0; i < 512; ++i) {
362 if (tbl[i] == NULL)
363 continue;
364
365 ptr = (struct pasid_state **)tbl[i];
366 free_pasid_states_level1(ptr);
367 }
368}
369
370static void free_pasid_states(struct device_state *dev_state)
371{
372 struct pasid_state *pasid_state;
373 int i;
374
375 for (i = 0; i < dev_state->max_pasids; ++i) {
376 pasid_state = get_pasid_state(dev_state, i);
377 if (pasid_state == NULL)
378 continue;
379
380 put_pasid_state(pasid_state);
381 unbind_pasid(dev_state, i);
382 }
383
384 if (dev_state->pasid_levels == 2)
385 free_pasid_states_level2(dev_state->states);
386 else if (dev_state->pasid_levels == 1)
387 free_pasid_states_level1(dev_state->states);
388 else if (dev_state->pasid_levels != 0)
389 BUG();
390
391 free_page((unsigned long)dev_state->states);
392}
393
394static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
395{
396 return container_of(mn, struct pasid_state, mn);
397}
398
399static void __mn_flush_page(struct mmu_notifier *mn,
400 unsigned long address)
401{
402 struct pasid_state *pasid_state;
403 struct device_state *dev_state;
404
405 pasid_state = mn_to_state(mn);
406 dev_state = pasid_state->device_state;
407
408 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
409}
410
411static int mn_clear_flush_young(struct mmu_notifier *mn,
412 struct mm_struct *mm,
413 unsigned long address)
414{
415 __mn_flush_page(mn, address);
416
417 return 0;
418}
419
420static void mn_change_pte(struct mmu_notifier *mn,
421 struct mm_struct *mm,
422 unsigned long address,
423 pte_t pte)
424{
425 __mn_flush_page(mn, address);
426}
427
428static void mn_invalidate_page(struct mmu_notifier *mn,
429 struct mm_struct *mm,
430 unsigned long address)
431{
432 __mn_flush_page(mn, address);
433}
434
435static void mn_invalidate_range_start(struct mmu_notifier *mn,
436 struct mm_struct *mm,
437 unsigned long start, unsigned long end)
438{
439 struct pasid_state *pasid_state;
440 struct device_state *dev_state;
441
442 pasid_state = mn_to_state(mn);
443 dev_state = pasid_state->device_state;
444
445 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
446 __pa(empty_page_table));
447}
448
449static void mn_invalidate_range_end(struct mmu_notifier *mn,
450 struct mm_struct *mm,
451 unsigned long start, unsigned long end)
452{
453 struct pasid_state *pasid_state;
454 struct device_state *dev_state;
455
456 pasid_state = mn_to_state(mn);
457 dev_state = pasid_state->device_state;
458
459 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
460 __pa(pasid_state->mm->pgd));
461}
462
463static struct mmu_notifier_ops iommu_mn = {
464 .clear_flush_young = mn_clear_flush_young,
465 .change_pte = mn_change_pte,
466 .invalidate_page = mn_invalidate_page,
467 .invalidate_range_start = mn_invalidate_range_start,
468 .invalidate_range_end = mn_invalidate_range_end,
469};
470
471static void set_pri_tag_status(struct pasid_state *pasid_state,
472 u16 tag, int status)
473{
474 unsigned long flags;
475
476 spin_lock_irqsave(&pasid_state->lock, flags);
477 pasid_state->pri[tag].status = status;
478 spin_unlock_irqrestore(&pasid_state->lock, flags);
479}
480
481static void finish_pri_tag(struct device_state *dev_state,
482 struct pasid_state *pasid_state,
483 u16 tag)
484{
485 unsigned long flags;
486
487 spin_lock_irqsave(&pasid_state->lock, flags);
488 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
489 pasid_state->pri[tag].finish) {
490 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
491 pasid_state->pri[tag].status, tag);
492 pasid_state->pri[tag].finish = false;
493 pasid_state->pri[tag].status = PPR_SUCCESS;
494 }
495 spin_unlock_irqrestore(&pasid_state->lock, flags);
496}
497
498static void do_fault(struct work_struct *work)
499{
500 struct fault *fault = container_of(work, struct fault, work);
501 int npages, write;
502 struct page *page;
503
504 write = !!(fault->flags & PPR_FAULT_WRITE);
505
506 npages = get_user_pages(fault->state->task, fault->state->mm,
507 fault->address, 1, write, 0, &page, NULL);
508
509 if (npages == 1) {
510 put_page(page);
511 } else if (fault->dev_state->inv_ppr_cb) {
512 int status;
513
514 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
515 fault->pasid,
516 fault->address,
517 fault->flags);
518 switch (status) {
519 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
520 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
521 break;
522 case AMD_IOMMU_INV_PRI_RSP_INVALID:
523 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
524 break;
525 case AMD_IOMMU_INV_PRI_RSP_FAIL:
526 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
527 break;
528 default:
529 BUG();
530 }
531 } else {
532 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
533 }
534
535 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
536
537 put_pasid_state(fault->state);
538
539 kfree(fault);
540}
541
542static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
543{
544 struct amd_iommu_fault *iommu_fault;
545 struct pasid_state *pasid_state;
546 struct device_state *dev_state;
547 unsigned long flags;
548 struct fault *fault;
549 bool finish;
550 u16 tag;
551 int ret;
552
553 iommu_fault = data;
554 tag = iommu_fault->tag & 0x1ff;
555 finish = (iommu_fault->tag >> 9) & 1;
556
557 ret = NOTIFY_DONE;
558 dev_state = get_device_state(iommu_fault->device_id);
559 if (dev_state == NULL)
560 goto out;
561
562 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
563 if (pasid_state == NULL) {
564 /* We know the device but not the PASID -> send INVALID */
565 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
566 PPR_INVALID, tag);
567 goto out_drop_state;
568 }
569
570 spin_lock_irqsave(&pasid_state->lock, flags);
571 atomic_inc(&pasid_state->pri[tag].inflight);
572 if (finish)
573 pasid_state->pri[tag].finish = true;
574 spin_unlock_irqrestore(&pasid_state->lock, flags);
575
576 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
577 if (fault == NULL) {
578 /* We are OOM - send success and let the device re-fault */
579 finish_pri_tag(dev_state, pasid_state, tag);
580 goto out_drop_state;
581 }
582
583 fault->dev_state = dev_state;
584 fault->address = iommu_fault->address;
585 fault->state = pasid_state;
586 fault->tag = tag;
587 fault->finish = finish;
588 fault->flags = iommu_fault->flags;
589 INIT_WORK(&fault->work, do_fault);
590
591 queue_work(iommu_wq, &fault->work);
592
593 ret = NOTIFY_OK;
594
595out_drop_state:
596 put_device_state(dev_state);
597
598out:
599 return ret;
600}
601
602static struct notifier_block ppr_nb = {
603 .notifier_call = ppr_notifier,
604};
605
606static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
607{
608 struct pasid_state *pasid_state;
609 struct task_struct *task;
610
611 task = data;
612
613 /*
614 * Using this notifier is a hack - but there is no other choice
615 * at the moment. What I really want is a sleeping notifier that
616 * is called when an MM goes down. But such a notifier doesn't
617 * exist yet. The notifier needs to sleep because it has to make
618 * sure that the device does not use the PASID and the address
619 * space anymore before it is destroyed. This includes waiting
620 * for pending PRI requests to pass the workqueue. The
621 * MMU-Notifiers would be a good fit, but they use RCU and so
622 * they are not allowed to sleep. Lets see how we can solve this
623 * in a more intelligent way in the future.
624 */
625again:
626 spin_lock(&ps_lock);
627 list_for_each_entry(pasid_state, &pasid_state_list, list) {
628 struct device_state *dev_state;
629 int pasid;
630
631 if (pasid_state->task != task)
632 continue;
633
634 /* Drop Lock and unbind */
635 spin_unlock(&ps_lock);
636
637 dev_state = pasid_state->device_state;
638 pasid = pasid_state->pasid;
639
640 unbind_pasid(dev_state, pasid);
641
642 /* Task may be in the list multiple times */
643 goto again;
644 }
645 spin_unlock(&ps_lock);
646
647 return NOTIFY_OK;
648}
649
650int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
651 struct task_struct *task)
652{
653 struct pasid_state *pasid_state;
654 struct device_state *dev_state;
655 u16 devid;
656 int ret;
657
658 might_sleep();
659
660 if (!amd_iommu_v2_supported())
661 return -ENODEV;
662
663 devid = device_id(pdev);
664 dev_state = get_device_state(devid);
665
666 if (dev_state == NULL)
667 return -EINVAL;
668
669 ret = -EINVAL;
670 if (pasid < 0 || pasid >= dev_state->max_pasids)
671 goto out;
672
673 ret = -ENOMEM;
674 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
675 if (pasid_state == NULL)
676 goto out;
677
678 atomic_set(&pasid_state->count, 1);
679 init_waitqueue_head(&pasid_state->wq);
680 pasid_state->task = task;
681 pasid_state->mm = get_task_mm(task);
682 pasid_state->device_state = dev_state;
683 pasid_state->pasid = pasid;
684 pasid_state->mn.ops = &iommu_mn;
685
686 if (pasid_state->mm == NULL)
687 goto out_free;
688
689 mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
690
691 ret = set_pasid_state(dev_state, pasid_state, pasid);
692 if (ret)
693 goto out_unregister;
694
695 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
696 __pa(pasid_state->mm->pgd));
697 if (ret)
698 goto out_clear_state;
699
700 link_pasid_state(pasid_state);
701
702 return 0;
703
704out_clear_state:
705 clear_pasid_state(dev_state, pasid);
706
707out_unregister:
708 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
709
710out_free:
711 free_pasid_state(pasid_state);
712
713out:
714 put_device_state(dev_state);
715
716 return ret;
717}
718EXPORT_SYMBOL(amd_iommu_bind_pasid);
719
720void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
721{
722 struct device_state *dev_state;
723 u16 devid;
724
725 might_sleep();
726
727 if (!amd_iommu_v2_supported())
728 return;
729
730 devid = device_id(pdev);
731 dev_state = get_device_state(devid);
732 if (dev_state == NULL)
733 return;
734
735 if (pasid < 0 || pasid >= dev_state->max_pasids)
736 goto out;
737
738 unbind_pasid(dev_state, pasid);
739
740out:
741 put_device_state(dev_state);
742}
743EXPORT_SYMBOL(amd_iommu_unbind_pasid);
744
745int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
746{
747 struct device_state *dev_state;
748 unsigned long flags;
749 int ret, tmp;
750 u16 devid;
751
752 might_sleep();
753
754 if (!amd_iommu_v2_supported())
755 return -ENODEV;
756
757 if (pasids <= 0 || pasids > (PASID_MASK + 1))
758 return -EINVAL;
759
760 devid = device_id(pdev);
761
762 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
763 if (dev_state == NULL)
764 return -ENOMEM;
765
766 spin_lock_init(&dev_state->lock);
767 init_waitqueue_head(&dev_state->wq);
768 dev_state->pdev = pdev;
769
770 tmp = pasids;
771 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
772 dev_state->pasid_levels += 1;
773
774 atomic_set(&dev_state->count, 1);
775 dev_state->max_pasids = pasids;
776
777 ret = -ENOMEM;
778 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
779 if (dev_state->states == NULL)
780 goto out_free_dev_state;
781
782 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
783 if (dev_state->domain == NULL)
784 goto out_free_states;
785
786 amd_iommu_domain_direct_map(dev_state->domain);
787
788 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
789 if (ret)
790 goto out_free_domain;
791
792 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
793 if (ret != 0)
794 goto out_free_domain;
795
796 spin_lock_irqsave(&state_lock, flags);
797
798 if (state_table[devid] != NULL) {
799 spin_unlock_irqrestore(&state_lock, flags);
800 ret = -EBUSY;
801 goto out_free_domain;
802 }
803
804 state_table[devid] = dev_state;
805
806 spin_unlock_irqrestore(&state_lock, flags);
807
808 return 0;
809
810out_free_domain:
811 iommu_domain_free(dev_state->domain);
812
813out_free_states:
814 free_page((unsigned long)dev_state->states);
815
816out_free_dev_state:
817 kfree(dev_state);
818
819 return ret;
820}
821EXPORT_SYMBOL(amd_iommu_init_device);
822
823void amd_iommu_free_device(struct pci_dev *pdev)
824{
825 struct device_state *dev_state;
826 unsigned long flags;
827 u16 devid;
828
829 if (!amd_iommu_v2_supported())
830 return;
831
832 devid = device_id(pdev);
833
834 spin_lock_irqsave(&state_lock, flags);
835
836 dev_state = state_table[devid];
837 if (dev_state == NULL) {
838 spin_unlock_irqrestore(&state_lock, flags);
839 return;
840 }
841
842 state_table[devid] = NULL;
843
844 spin_unlock_irqrestore(&state_lock, flags);
845
846 /* Get rid of any remaining pasid states */
847 free_pasid_states(dev_state);
848
849 put_device_state_wait(dev_state);
850}
851EXPORT_SYMBOL(amd_iommu_free_device);
852
853int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
854 amd_iommu_invalid_ppr_cb cb)
855{
856 struct device_state *dev_state;
857 unsigned long flags;
858 u16 devid;
859 int ret;
860
861 if (!amd_iommu_v2_supported())
862 return -ENODEV;
863
864 devid = device_id(pdev);
865
866 spin_lock_irqsave(&state_lock, flags);
867
868 ret = -EINVAL;
869 dev_state = state_table[devid];
870 if (dev_state == NULL)
871 goto out_unlock;
872
873 dev_state->inv_ppr_cb = cb;
874
875 ret = 0;
876
877out_unlock:
878 spin_unlock_irqrestore(&state_lock, flags);
879
880 return ret;
881}
882EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
883
884static int __init amd_iommu_v2_init(void)
885{
886 size_t state_table_size;
887 int ret;
888
889 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>");
890
891 spin_lock_init(&state_lock);
892
893 state_table_size = MAX_DEVICES * sizeof(struct device_state *);
894 state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
895 get_order(state_table_size));
896 if (state_table == NULL)
897 return -ENOMEM;
898
899 ret = -ENOMEM;
900 iommu_wq = create_workqueue("amd_iommu_v2");
901 if (iommu_wq == NULL)
902 goto out_free;
903
904 ret = -ENOMEM;
905 empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
906 if (empty_page_table == NULL)
907 goto out_destroy_wq;
908
909 amd_iommu_register_ppr_notifier(&ppr_nb);
910 profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
911
912 return 0;
913
914out_destroy_wq:
915 destroy_workqueue(iommu_wq);
916
917out_free:
918 free_pages((unsigned long)state_table, get_order(state_table_size));
919
920 return ret;
921}
922
923static void __exit amd_iommu_v2_exit(void)
924{
925 struct device_state *dev_state;
926 size_t state_table_size;
927 int i;
928
929 profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
930 amd_iommu_unregister_ppr_notifier(&ppr_nb);
931
932 flush_workqueue(iommu_wq);
933
934 /*
935 * The loop below might call flush_workqueue(), so call
936 * destroy_workqueue() after it
937 */
938 for (i = 0; i < MAX_DEVICES; ++i) {
939 dev_state = get_device_state(i);
940
941 if (dev_state == NULL)
942 continue;
943
944 WARN_ON_ONCE(1);
945
946 put_device_state(dev_state);
947 amd_iommu_free_device(dev_state->pdev);
948 }
949
950 destroy_workqueue(iommu_wq);
951
952 state_table_size = MAX_DEVICES * sizeof(struct device_state *);
953 free_pages((unsigned long)state_table, get_order(state_table_size));
954
955 free_page((unsigned long)empty_page_table);
956}
957
958module_init(amd_iommu_v2_init);
959module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c46..c181883c2f9a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -78,6 +78,24 @@
78#define LEVEL_STRIDE (9) 78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) 79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80 80
81/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
81static inline int agaw_to_level(int agaw) 99static inline int agaw_to_level(int agaw)
82{ 100{
83 return agaw + 2; 101 return agaw + 2;
@@ -3524,7 +3542,7 @@ found:
3524 return 0; 3542 return 0;
3525} 3543}
3526 3544
3527int dmar_parse_rmrr_atsr_dev(void) 3545int __init dmar_parse_rmrr_atsr_dev(void)
3528{ 3546{
3529 struct dmar_rmrr_unit *rmrr, *rmrr_n; 3547 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3530 struct dmar_atsr_unit *atsr, *atsr_n; 3548 struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3979 3997
3980static int intel_iommu_map(struct iommu_domain *domain, 3998static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa, 3999 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot) 4000 size_t size, int iommu_prot)
3983{ 4001{
3984 struct dmar_domain *dmar_domain = domain->priv; 4002 struct dmar_domain *dmar_domain = domain->priv;
3985 u64 max_addr; 4003 u64 max_addr;
3986 int prot = 0; 4004 int prot = 0;
3987 size_t size;
3988 int ret; 4005 int ret;
3989 4006
3990 if (iommu_prot & IOMMU_READ) 4007 if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 4011 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP; 4012 prot |= DMA_PTE_SNP;
3996 4013
3997 size = PAGE_SIZE << gfp_order;
3998 max_addr = iova + size; 4014 max_addr = iova + size;
3999 if (dmar_domain->max_addr < max_addr) { 4015 if (dmar_domain->max_addr < max_addr) {
4000 u64 end; 4016 u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
4017 return ret; 4033 return ret;
4018} 4034}
4019 4035
4020static int intel_iommu_unmap(struct iommu_domain *domain, 4036static size_t intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order) 4037 unsigned long iova, size_t size)
4022{ 4038{
4023 struct dmar_domain *dmar_domain = domain->priv; 4039 struct dmar_domain *dmar_domain = domain->priv;
4024 size_t size = PAGE_SIZE << gfp_order;
4025 int order; 4040 int order;
4026 4041
4027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 4042 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
4030 if (dmar_domain->max_addr == iova + size) 4045 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova; 4046 dmar_domain->max_addr = iova;
4032 4047
4033 return order; 4048 return PAGE_SIZE << order;
4034} 4049}
4035 4050
4036static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 4051static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
4069 .unmap = intel_iommu_unmap, 4084 .unmap = intel_iommu_unmap,
4070 .iova_to_phys = intel_iommu_iova_to_phys, 4085 .iova_to_phys = intel_iommu_iova_to_phys,
4071 .domain_has_cap = intel_iommu_domain_has_cap, 4086 .domain_has_cap = intel_iommu_domain_has_cap,
4087 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4072}; 4088};
4073 4089
4074static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 4090static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f314..6777ca049471 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
773 return ir_supported; 773 return ir_supported;
774} 774}
775 775
776int ir_dev_scope_init(void) 776int __init ir_dev_scope_init(void)
777{ 777{
778 if (!intr_remapping_enabled) 778 if (!intr_remapping_enabled)
779 return 0; 779 return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df553..84cdd8ac81f1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#define pr_fmt(fmt) "%s: " fmt, __func__
20
19#include <linux/device.h> 21#include <linux/device.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/bug.h> 23#include <linux/bug.h>
@@ -157,32 +159,125 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
157EXPORT_SYMBOL_GPL(iommu_domain_has_cap); 159EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
158 160
159int iommu_map(struct iommu_domain *domain, unsigned long iova, 161int iommu_map(struct iommu_domain *domain, unsigned long iova,
160 phys_addr_t paddr, int gfp_order, int prot) 162 phys_addr_t paddr, size_t size, int prot)
161{ 163{
162 size_t size; 164 unsigned long orig_iova = iova;
165 unsigned int min_pagesz;
166 size_t orig_size = size;
167 int ret = 0;
163 168
164 if (unlikely(domain->ops->map == NULL)) 169 if (unlikely(domain->ops->map == NULL))
165 return -ENODEV; 170 return -ENODEV;
166 171
167 size = PAGE_SIZE << gfp_order; 172 /* find out the minimum page size supported */
173 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
174
175 /*
176 * both the virtual address and the physical one, as well as
177 * the size of the mapping, must be aligned (at least) to the
178 * size of the smallest page supported by the hardware
179 */
180 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
181 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
182 "0x%x\n", iova, (unsigned long)paddr,
183 (unsigned long)size, min_pagesz);
184 return -EINVAL;
185 }
186
187 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
188 (unsigned long)paddr, (unsigned long)size);
189
190 while (size) {
191 unsigned long pgsize, addr_merge = iova | paddr;
192 unsigned int pgsize_idx;
193
194 /* Max page size that still fits into 'size' */
195 pgsize_idx = __fls(size);
196
197 /* need to consider alignment requirements ? */
198 if (likely(addr_merge)) {
199 /* Max page size allowed by both iova and paddr */
200 unsigned int align_pgsize_idx = __ffs(addr_merge);
201
202 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
203 }
204
205 /* build a mask of acceptable page sizes */
206 pgsize = (1UL << (pgsize_idx + 1)) - 1;
207
208 /* throw away page sizes not supported by the hardware */
209 pgsize &= domain->ops->pgsize_bitmap;
168 210
169 BUG_ON(!IS_ALIGNED(iova | paddr, size)); 211 /* make sure we're still sane */
212 BUG_ON(!pgsize);
170 213
171 return domain->ops->map(domain, iova, paddr, gfp_order, prot); 214 /* pick the biggest page */
215 pgsize_idx = __fls(pgsize);
216 pgsize = 1UL << pgsize_idx;
217
218 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
219 (unsigned long)paddr, pgsize);
220
221 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
222 if (ret)
223 break;
224
225 iova += pgsize;
226 paddr += pgsize;
227 size -= pgsize;
228 }
229
230 /* unroll mapping in case something went wrong */
231 if (ret)
232 iommu_unmap(domain, orig_iova, orig_size - size);
233
234 return ret;
172} 235}
173EXPORT_SYMBOL_GPL(iommu_map); 236EXPORT_SYMBOL_GPL(iommu_map);
174 237
175int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) 238size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
176{ 239{
177 size_t size; 240 size_t unmapped_page, unmapped = 0;
241 unsigned int min_pagesz;
178 242
179 if (unlikely(domain->ops->unmap == NULL)) 243 if (unlikely(domain->ops->unmap == NULL))
180 return -ENODEV; 244 return -ENODEV;
181 245
182 size = PAGE_SIZE << gfp_order; 246 /* find out the minimum page size supported */
183 247 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
184 BUG_ON(!IS_ALIGNED(iova, size)); 248
185 249 /*
186 return domain->ops->unmap(domain, iova, gfp_order); 250 * The virtual address, as well as the size of the mapping, must be
251 * aligned (at least) to the size of the smallest page supported
252 * by the hardware
253 */
254 if (!IS_ALIGNED(iova | size, min_pagesz)) {
255 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
256 iova, (unsigned long)size, min_pagesz);
257 return -EINVAL;
258 }
259
260 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
261 (unsigned long)size);
262
263 /*
264 * Keep iterating until we either unmap 'size' bytes (or more)
265 * or we hit an area that isn't mapped.
266 */
267 while (unmapped < size) {
268 size_t left = size - unmapped;
269
270 unmapped_page = domain->ops->unmap(domain, iova, left);
271 if (!unmapped_page)
272 break;
273
274 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
275 (unsigned long)unmapped_page);
276
277 iova += unmapped_page;
278 unmapped += unmapped_page;
279 }
280
281 return unmapped;
187} 282}
188EXPORT_SYMBOL_GPL(iommu_unmap); 283EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2e28f9..08a90b88e40d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) 42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) 43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44 44
45/* bitmap of the page sizes currently supported */
46#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
47
45static int msm_iommu_tex_class[4]; 48static int msm_iommu_tex_class[4];
46 49
47DEFINE_SPINLOCK(msm_iommu_lock); 50DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
352} 355}
353 356
354static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, 357static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
355 phys_addr_t pa, int order, int prot) 358 phys_addr_t pa, size_t len, int prot)
356{ 359{
357 struct msm_priv *priv; 360 struct msm_priv *priv;
358 unsigned long flags; 361 unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
363 unsigned long *sl_pte; 366 unsigned long *sl_pte;
364 unsigned long sl_offset; 367 unsigned long sl_offset;
365 unsigned int pgprot; 368 unsigned int pgprot;
366 size_t len = 0x1000UL << order;
367 int ret = 0, tex, sh; 369 int ret = 0, tex, sh;
368 370
369 spin_lock_irqsave(&msm_iommu_lock, flags); 371 spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
463 return ret; 465 return ret;
464} 466}
465 467
466static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, 468static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
467 int order) 469 size_t len)
468{ 470{
469 struct msm_priv *priv; 471 struct msm_priv *priv;
470 unsigned long flags; 472 unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
474 unsigned long *sl_table; 476 unsigned long *sl_table;
475 unsigned long *sl_pte; 477 unsigned long *sl_pte;
476 unsigned long sl_offset; 478 unsigned long sl_offset;
477 size_t len = 0x1000UL << order;
478 int i, ret = 0; 479 int i, ret = 0;
479 480
480 spin_lock_irqsave(&msm_iommu_lock, flags); 481 spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
544 545
545 ret = __flush_iotlb(domain); 546 ret = __flush_iotlb(domain);
546 547
547 /*
548 * the IOMMU API requires us to return the order of the unmapped
549 * page (on success).
550 */
551 if (!ret)
552 ret = order;
553fail: 548fail:
554 spin_unlock_irqrestore(&msm_iommu_lock, flags); 549 spin_unlock_irqrestore(&msm_iommu_lock, flags);
555 return ret; 550
551 /* the IOMMU API requires us to return how many bytes were unmapped */
552 len = ret ? 0 : len;
553 return len;
556} 554}
557 555
558static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, 556static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
684 .map = msm_iommu_map, 682 .map = msm_iommu_map,
685 .unmap = msm_iommu_unmap, 683 .unmap = msm_iommu_unmap,
686 .iova_to_phys = msm_iommu_iova_to_phys, 684 .iova_to_phys = msm_iommu_iova_to_phys,
687 .domain_has_cap = msm_iommu_domain_has_cap 685 .domain_has_cap = msm_iommu_domain_has_cap,
686 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
688}; 687};
689 688
690static int __init get_tex_class(int icp, int ocp, int mt, int nos) 689static int __init get_tex_class(int icp, int ocp, int mt, int nos)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2bf7587..08cf7ec5b4a5 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -33,6 +33,9 @@
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++) 34 __i++)
35 35
36/* bitmap of the page sizes currently supported */
37#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
36/** 39/**
37 * struct omap_iommu_domain - omap iommu domain 40 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table 41 * @pgtable: the page table
@@ -1019,12 +1022,11 @@ static void iopte_cachep_ctor(void *iopte)
1019} 1022}
1020 1023
1021static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1024static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1022 phys_addr_t pa, int order, int prot) 1025 phys_addr_t pa, size_t bytes, int prot)
1023{ 1026{
1024 struct omap_iommu_domain *omap_domain = domain->priv; 1027 struct omap_iommu_domain *omap_domain = domain->priv;
1025 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1028 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1026 struct device *dev = oiommu->dev; 1029 struct device *dev = oiommu->dev;
1027 size_t bytes = PAGE_SIZE << order;
1028 struct iotlb_entry e; 1030 struct iotlb_entry e;
1029 int omap_pgsz; 1031 int omap_pgsz;
1030 u32 ret, flags; 1032 u32 ret, flags;
@@ -1049,19 +1051,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1049 return ret; 1051 return ret;
1050} 1052}
1051 1053
1052static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1054static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1053 int order) 1055 size_t size)
1054{ 1056{
1055 struct omap_iommu_domain *omap_domain = domain->priv; 1057 struct omap_iommu_domain *omap_domain = domain->priv;
1056 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1058 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1057 struct device *dev = oiommu->dev; 1059 struct device *dev = oiommu->dev;
1058 size_t unmap_size;
1059
1060 dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
1061 1060
1062 unmap_size = iopgtable_clear_entry(oiommu, da); 1061 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1063 1062
1064 return unmap_size ? get_order(unmap_size) : -EINVAL; 1063 return iopgtable_clear_entry(oiommu, da);
1065} 1064}
1066 1065
1067static int 1066static int
@@ -1211,6 +1210,7 @@ static struct iommu_ops omap_iommu_ops = {
1211 .unmap = omap_iommu_unmap, 1210 .unmap = omap_iommu_unmap,
1212 .iova_to_phys = omap_iommu_iova_to_phys, 1211 .iova_to_phys = omap_iommu_iova_to_phys,
1213 .domain_has_cap = omap_iommu_domain_has_cap, 1212 .domain_has_cap = omap_iommu_domain_has_cap,
1213 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1214}; 1214};
1215 1215
1216static int __init omap_iommu_init(void) 1216static int __init omap_iommu_init(void)
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 46be456fcc00..6edc4ceba197 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -410,7 +410,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
410 unsigned int i, j; 410 unsigned int i, j;
411 struct scatterlist *sg; 411 struct scatterlist *sg;
412 u32 da = new->da_start; 412 u32 da = new->da_start;
413 int order;
414 413
415 if (!domain || !sgt) 414 if (!domain || !sgt)
416 return -EINVAL; 415 return -EINVAL;
@@ -429,12 +428,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
429 if (bytes_to_iopgsz(bytes) < 0) 428 if (bytes_to_iopgsz(bytes) < 0)
430 goto err_out; 429 goto err_out;
431 430
432 order = get_order(bytes);
433
434 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, 431 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
435 i, da, pa, bytes); 432 i, da, pa, bytes);
436 433
437 err = iommu_map(domain, da, pa, order, flags); 434 err = iommu_map(domain, da, pa, bytes, flags);
438 if (err) 435 if (err)
439 goto err_out; 436 goto err_out;
440 437
@@ -449,10 +446,9 @@ err_out:
449 size_t bytes; 446 size_t bytes;
450 447
451 bytes = sg->length + sg->offset; 448 bytes = sg->length + sg->offset;
452 order = get_order(bytes);
453 449
454 /* ignore failures.. we're already handling one */ 450 /* ignore failures.. we're already handling one */
455 iommu_unmap(domain, da, order); 451 iommu_unmap(domain, da, bytes);
456 452
457 da += bytes; 453 da += bytes;
458 } 454 }
@@ -467,7 +463,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
467 size_t total = area->da_end - area->da_start; 463 size_t total = area->da_end - area->da_start;
468 const struct sg_table *sgt = area->sgt; 464 const struct sg_table *sgt = area->sgt;
469 struct scatterlist *sg; 465 struct scatterlist *sg;
470 int i, err; 466 int i;
467 size_t unmapped;
471 468
472 BUG_ON(!sgtable_ok(sgt)); 469 BUG_ON(!sgtable_ok(sgt));
473 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); 470 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +472,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
475 start = area->da_start; 472 start = area->da_start;
476 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 473 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
477 size_t bytes; 474 size_t bytes;
478 int order;
479 475
480 bytes = sg->length + sg->offset; 476 bytes = sg->length + sg->offset;
481 order = get_order(bytes);
482 477
483 err = iommu_unmap(domain, start, order); 478 unmapped = iommu_unmap(domain, start, bytes);
484 if (err < 0) 479 if (unmapped < bytes)
485 break; 480 break;
486 481
487 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", 482 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721bf..b6907118283a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
1106 */ 1106 */
1107 int i; 1107 int i;
1108 1108
1109 spin_lock_irq(&bitmap->lock);
1109 for (i = 0; i < bitmap->file_pages; i++) 1110 for (i = 0; i < bitmap->file_pages; i++)
1110 set_page_attr(bitmap, bitmap->filemap[i], 1111 set_page_attr(bitmap, bitmap->filemap[i],
1111 BITMAP_PAGE_NEEDWRITE); 1112 BITMAP_PAGE_NEEDWRITE);
1112 bitmap->allclean = 0; 1113 bitmap->allclean = 0;
1114 spin_unlock_irq(&bitmap->lock);
1113} 1115}
1114 1116
1115static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) 1117static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1605 for (chunk = s; chunk <= e; chunk++) { 1607 for (chunk = s; chunk <= e; chunk++) {
1606 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); 1608 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
1607 bitmap_set_memory_bits(bitmap, sec, 1); 1609 bitmap_set_memory_bits(bitmap, sec, 1);
1610 spin_lock_irq(&bitmap->lock);
1608 bitmap_file_set_bit(bitmap, sec); 1611 bitmap_file_set_bit(bitmap, sec);
1612 spin_unlock_irq(&bitmap->lock);
1609 if (sec < bitmap->mddev->recovery_cp) 1613 if (sec < bitmap->mddev->recovery_cp)
1610 /* We are asserting that the array is dirty, 1614 /* We are asserting that the array is dirty,
1611 * so move the recovery_cp address back so 1615 * so move the recovery_cp address back so
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e4..ee981737edfc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
570 mddev->ctime == 0 && !mddev->hold_active) { 570 mddev->ctime == 0 && !mddev->hold_active) {
571 /* Array is not configured at all, and not held active, 571 /* Array is not configured at all, and not held active,
572 * so destroy it */ 572 * so destroy it */
573 list_del(&mddev->all_mddevs); 573 list_del_init(&mddev->all_mddevs);
574 bs = mddev->bio_set; 574 bs = mddev->bio_set;
575 mddev->bio_set = NULL; 575 mddev->bio_set = NULL;
576 if (mddev->gendisk) { 576 if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
2546 sep = ","; 2546 sep = ",";
2547 } 2547 }
2548 if (test_bit(Blocked, &rdev->flags) || 2548 if (test_bit(Blocked, &rdev->flags) ||
2549 rdev->badblocks.unacked_exist) { 2549 (rdev->badblocks.unacked_exist
2550 && !test_bit(Faulty, &rdev->flags))) {
2550 len += sprintf(page+len, "%sblocked", sep); 2551 len += sprintf(page+len, "%sblocked", sep);
2551 sep = ","; 2552 sep = ",";
2552 } 2553 }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3788 if (err) 3789 if (err)
3789 return err; 3790 return err;
3790 else { 3791 else {
3792 if (mddev->hold_active == UNTIL_IOCTL)
3793 mddev->hold_active = 0;
3791 sysfs_notify_dirent_safe(mddev->sysfs_state); 3794 sysfs_notify_dirent_safe(mddev->sysfs_state);
3792 return len; 3795 return len;
3793 } 3796 }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4487 4490
4488 if (!entry->show) 4491 if (!entry->show)
4489 return -EIO; 4492 return -EIO;
4493 spin_lock(&all_mddevs_lock);
4494 if (list_empty(&mddev->all_mddevs)) {
4495 spin_unlock(&all_mddevs_lock);
4496 return -EBUSY;
4497 }
4498 mddev_get(mddev);
4499 spin_unlock(&all_mddevs_lock);
4500
4490 rv = mddev_lock(mddev); 4501 rv = mddev_lock(mddev);
4491 if (!rv) { 4502 if (!rv) {
4492 rv = entry->show(mddev, page); 4503 rv = entry->show(mddev, page);
4493 mddev_unlock(mddev); 4504 mddev_unlock(mddev);
4494 } 4505 }
4506 mddev_put(mddev);
4495 return rv; 4507 return rv;
4496} 4508}
4497 4509
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
4507 return -EIO; 4519 return -EIO;
4508 if (!capable(CAP_SYS_ADMIN)) 4520 if (!capable(CAP_SYS_ADMIN))
4509 return -EACCES; 4521 return -EACCES;
4522 spin_lock(&all_mddevs_lock);
4523 if (list_empty(&mddev->all_mddevs)) {
4524 spin_unlock(&all_mddevs_lock);
4525 return -EBUSY;
4526 }
4527 mddev_get(mddev);
4528 spin_unlock(&all_mddevs_lock);
4510 rv = mddev_lock(mddev); 4529 rv = mddev_lock(mddev);
4511 if (mddev->hold_active == UNTIL_IOCTL)
4512 mddev->hold_active = 0;
4513 if (!rv) { 4530 if (!rv) {
4514 rv = entry->store(mddev, page, length); 4531 rv = entry->store(mddev, page, length);
4515 mddev_unlock(mddev); 4532 mddev_unlock(mddev);
4516 } 4533 }
4534 mddev_put(mddev);
4517 return rv; 4535 return rv;
4518} 4536}
4519 4537
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
7840 s + rdev->data_offset, sectors, acknowledged); 7858 s + rdev->data_offset, sectors, acknowledged);
7841 if (rv) { 7859 if (rv) {
7842 /* Make sure they get written out promptly */ 7860 /* Make sure they get written out promptly */
7861 sysfs_notify_dirent_safe(rdev->sysfs_state);
7843 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7862 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
7844 md_wakeup_thread(rdev->mddev->thread); 7863 md_wakeup_thread(rdev->mddev->thread);
7845 } 7864 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e26092178..31670f8d6b65 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3036 if (dev->written) 3036 if (dev->written)
3037 s->written++; 3037 s->written++;
3038 rdev = rcu_dereference(conf->disks[i].rdev); 3038 rdev = rcu_dereference(conf->disks[i].rdev);
3039 if (rdev && test_bit(Faulty, &rdev->flags))
3040 rdev = NULL;
3039 if (rdev) { 3041 if (rdev) {
3040 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3042 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3041 &first_bad, &bad_sectors); 3043 &first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3063 } 3065 }
3064 } else if (test_bit(In_sync, &rdev->flags)) 3066 } else if (test_bit(In_sync, &rdev->flags))
3065 set_bit(R5_Insync, &dev->flags); 3067 set_bit(R5_Insync, &dev->flags);
3066 else if (!test_bit(Faulty, &rdev->flags)) { 3068 else {
3067 /* in sync if before recovery_offset */ 3069 /* in sync if before recovery_offset */
3068 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3070 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3069 set_bit(R5_Insync, &dev->flags); 3071 set_bit(R5_Insync, &dev->flags);
3070 } 3072 }
3071 if (test_bit(R5_WriteError, &dev->flags)) { 3073 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3072 clear_bit(R5_Insync, &dev->flags); 3074 clear_bit(R5_Insync, &dev->flags);
3073 if (!test_bit(Faulty, &rdev->flags)) { 3075 if (!test_bit(Faulty, &rdev->flags)) {
3074 s->handle_bad_blocks = 1; 3076 s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3076 } else 3078 } else
3077 clear_bit(R5_WriteError, &dev->flags); 3079 clear_bit(R5_WriteError, &dev->flags);
3078 } 3080 }
3079 if (test_bit(R5_MadeGood, &dev->flags)) { 3081 if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
3080 if (!test_bit(Faulty, &rdev->flags)) { 3082 if (!test_bit(Faulty, &rdev->flags)) {
3081 s->handle_bad_blocks = 1; 3083 s->handle_bad_blocks = 1;
3082 atomic_inc(&rdev->nr_pending); 3084 atomic_inc(&rdev->nr_pending);
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434bafc..90497ffb1ac3 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
2# Makefile for the A Semi network device drivers. 2# Makefile for the A Semi network device drivers.
3# 3#
4 4
5obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o 5obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
6pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f06aa10f0d7..67bf07819992 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1183 return value; 1183 return value;
1184} 1184}
1185 1185
1186static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 1186static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1187{ 1187{
1188 RTL_W16(IntrMask, 0x0000); 1188 void __iomem *ioaddr = tp->mmio_addr;
1189 1189
1190 RTL_W16(IntrStatus, 0xffff); 1190 RTL_W16(IntrMask, 0x0000);
1191 RTL_W16(IntrStatus, tp->intr_event);
1192 RTL_R8(ChipCmd);
1191} 1193}
1192 1194
1193static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) 1195static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
3933 break; 3935 break;
3934 udelay(100); 3936 udelay(100);
3935 } 3937 }
3936
3937 rtl8169_init_ring_indexes(tp);
3938} 3938}
3939 3939
3940static int __devinit 3940static int __devinit
@@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4339 void __iomem *ioaddr = tp->mmio_addr; 4339 void __iomem *ioaddr = tp->mmio_addr;
4340 4340
4341 /* Disable interrupts */ 4341 /* Disable interrupts */
4342 rtl8169_irq_mask_and_ack(ioaddr); 4342 rtl8169_irq_mask_and_ack(tp);
4343 4343
4344 rtl_rx_close(tp); 4344 rtl_rx_close(tp);
4345 4345
@@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
4885 RTL_W16(IntrMitigate, 0x5151); 4885 RTL_W16(IntrMitigate, 0x5151);
4886 4886
4887 /* Work around for RxFIFO overflow. */ 4887 /* Work around for RxFIFO overflow. */
4888 if (tp->mac_version == RTL_GIGA_MAC_VER_11 || 4888 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4889 tp->mac_version == RTL_GIGA_MAC_VER_22) {
4890 tp->intr_event |= RxFIFOOver | PCSTimeout; 4889 tp->intr_event |= RxFIFOOver | PCSTimeout;
4891 tp->intr_event &= ~RxOverflow; 4890 tp->intr_event &= ~RxOverflow;
4892 } 4891 }
@@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
5076 void __iomem *ioaddr = tp->mmio_addr; 5075 void __iomem *ioaddr = tp->mmio_addr;
5077 struct pci_dev *pdev = tp->pci_dev; 5076 struct pci_dev *pdev = tp->pci_dev;
5078 5077
5078 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
5079 tp->intr_event &= ~RxFIFOOver;
5080 tp->napi_event &= ~RxFIFOOver;
5081 }
5082
5079 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5083 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5080 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5084 tp->mac_version == RTL_GIGA_MAC_VER_16) {
5081 int cap = pci_pcie_cap(pdev); 5085 int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
5342 /* Wait for any pending NAPI task to complete */ 5346 /* Wait for any pending NAPI task to complete */
5343 napi_disable(&tp->napi); 5347 napi_disable(&tp->napi);
5344 5348
5345 rtl8169_irq_mask_and_ack(ioaddr); 5349 rtl8169_irq_mask_and_ack(tp);
5346 5350
5347 tp->intr_mask = 0xffff; 5351 tp->intr_mask = 0xffff;
5348 RTL_W16(IntrMask, tp->intr_event); 5352 RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
5389 if (!netif_running(dev)) 5393 if (!netif_running(dev))
5390 goto out_unlock; 5394 goto out_unlock;
5391 5395
5396 rtl8169_hw_reset(tp);
5397
5392 rtl8169_wait_for_quiescence(dev); 5398 rtl8169_wait_for_quiescence(dev);
5393 5399
5394 for (i = 0; i < NUM_RX_DESC; i++) 5400 for (i = 0; i < NUM_RX_DESC; i++)
5395 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); 5401 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5396 5402
5397 rtl8169_tx_clear(tp); 5403 rtl8169_tx_clear(tp);
5404 rtl8169_init_ring_indexes(tp);
5398 5405
5399 rtl8169_hw_reset(tp);
5400 rtl_hw_start(dev); 5406 rtl_hw_start(dev);
5401 netif_wake_queue(dev); 5407 netif_wake_queue(dev);
5402 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 5408 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5413,6 @@ out_unlock:
5407 5413
5408static void rtl8169_tx_timeout(struct net_device *dev) 5414static void rtl8169_tx_timeout(struct net_device *dev)
5409{ 5415{
5410 struct rtl8169_private *tp = netdev_priv(dev);
5411
5412 rtl8169_hw_reset(tp);
5413
5414 /* Let's wait a bit while any (async) irq lands on */
5415 rtl8169_schedule_work(dev, rtl8169_reset_task); 5416 rtl8169_schedule_work(dev, rtl8169_reset_task);
5416} 5417}
5417 5418
@@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5804 */ 5805 */
5805 status = RTL_R16(IntrStatus); 5806 status = RTL_R16(IntrStatus);
5806 while (status && status != 0xffff) { 5807 while (status && status != 0xffff) {
5808 status &= tp->intr_event;
5809 if (!status)
5810 break;
5811
5807 handled = 1; 5812 handled = 1;
5808 5813
5809 /* Handle all of the error cases first. These will reset 5814 /* Handle all of the error cases first. These will reset
@@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5818 switch (tp->mac_version) { 5823 switch (tp->mac_version) {
5819 /* Work around for rx fifo overflow */ 5824 /* Work around for rx fifo overflow */
5820 case RTL_GIGA_MAC_VER_11: 5825 case RTL_GIGA_MAC_VER_11:
5821 case RTL_GIGA_MAC_VER_22:
5822 case RTL_GIGA_MAC_VER_26:
5823 netif_stop_queue(dev); 5826 netif_stop_queue(dev);
5824 rtl8169_tx_timeout(dev); 5827 rtl8169_tx_timeout(dev);
5825 goto done; 5828 goto done;
5826 /* Testers needed. */
5827 case RTL_GIGA_MAC_VER_17:
5828 case RTL_GIGA_MAC_VER_19:
5829 case RTL_GIGA_MAC_VER_20:
5830 case RTL_GIGA_MAC_VER_21:
5831 case RTL_GIGA_MAC_VER_23:
5832 case RTL_GIGA_MAC_VER_24:
5833 case RTL_GIGA_MAC_VER_27:
5834 case RTL_GIGA_MAC_VER_28:
5835 case RTL_GIGA_MAC_VER_31:
5836 /* Experimental science. Pktgen proof. */
5837 case RTL_GIGA_MAC_VER_12:
5838 case RTL_GIGA_MAC_VER_25:
5839 if (status == RxFIFOOver)
5840 goto done;
5841 break;
5842 default: 5829 default:
5843 break; 5830 break;
5844 } 5831 }
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2d..1187a1169eb2 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
926 goto done; 926 goto done;
927 927
928 /* Re-enable the ingress interrupt. */ 928 /* Re-enable the ingress interrupt. */
929 enable_percpu_irq(priv->intr_id); 929 enable_percpu_irq(priv->intr_id, 0);
930 930
931 /* HACK: Avoid the "rotting packet" problem (see above). */ 931 /* HACK: Avoid the "rotting packet" problem (see above). */
932 if (qup->__packet_receive_read != 932 if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
1296 info->napi_enabled = true; 1296 info->napi_enabled = true;
1297 1297
1298 /* Enable the ingress interrupt. */ 1298 /* Enable the ingress interrupt. */
1299 enable_percpu_irq(priv->intr_id); 1299 enable_percpu_irq(priv->intr_id, 0);
1300} 1300}
1301 1301
1302 1302
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1697 for (i = 0; i < sh->nr_frags; i++) { 1697 for (i = 0; i < sh->nr_frags; i++) {
1698 1698
1699 skb_frag_t *f = &sh->frags[i]; 1699 skb_frag_t *f = &sh->frags[i];
1700 unsigned long pfn = page_to_pfn(f->page); 1700 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1701 1701
1702 /* FIXME: Compute "hash_for_home" properly. */ 1702 /* FIXME: Compute "hash_for_home" properly. */
1703 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ 1703 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1706 /* FIXME: Hmmm. */ 1706 /* FIXME: Hmmm. */
1707 if (!hash_default) { 1707 if (!hash_default) {
1708 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1708 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1709 BUG_ON(PageHighMem(f->page)); 1709 BUG_ON(PageHighMem(skb_frag_page(f)));
1710 finv_buffer_remote(va, f->size, 0); 1710 finv_buffer_remote(va, f->size, 0);
1711 } 1711 }
1712 1712
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff6..dd008b0e6417 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
191 .chain_noise_scale = 1000, 191 .chain_noise_scale = 1000,
192 .wd_timeout = IWL_DEF_WD_TIMEOUT, 192 .wd_timeout = IWL_DEF_WD_TIMEOUT,
193 .max_event_log_size = 128, 193 .max_event_log_size = 128,
194 .wd_disable = true,
194}; 195};
195static struct iwl_ht_params iwl1000_ht_params = { 196static struct iwl_ht_params iwl1000_ht_params = {
196 .ht_greenfield_support = true, 197 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a8..f55fb2d1af52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
364 .wd_timeout = IWL_LONG_WD_TIMEOUT, 364 .wd_timeout = IWL_LONG_WD_TIMEOUT,
365 .max_event_log_size = 512, 365 .max_event_log_size = 512,
366 .no_idle_support = true, 366 .no_idle_support = true,
367 .wd_disable = true,
367}; 368};
368static struct iwl_ht_params iwl5000_ht_params = { 369static struct iwl_ht_params iwl5000_ht_params = {
369 .ht_greenfield_support = true, 370 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c89..a7a6def40d05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
528 return 0; 528 return 0;
529} 529}
530 530
531void iwlagn_config_ht40(struct ieee80211_conf *conf,
532 struct iwl_rxon_context *ctx)
533{
534 if (conf_is_ht40_minus(conf)) {
535 ctx->ht.extension_chan_offset =
536 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
537 ctx->ht.is_40mhz = true;
538 } else if (conf_is_ht40_plus(conf)) {
539 ctx->ht.extension_chan_offset =
540 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
541 ctx->ht.is_40mhz = true;
542 } else {
543 ctx->ht.extension_chan_offset =
544 IEEE80211_HT_PARAM_CHA_SEC_NONE;
545 ctx->ht.is_40mhz = false;
546 }
547}
548
531int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) 549int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
532{ 550{
533 struct iwl_priv *priv = hw->priv; 551 struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
586 ctx->ht.enabled = conf_is_ht(conf); 604 ctx->ht.enabled = conf_is_ht(conf);
587 605
588 if (ctx->ht.enabled) { 606 if (ctx->ht.enabled) {
589 if (conf_is_ht40_minus(conf)) { 607 /* if HT40 is used, it should not change
590 ctx->ht.extension_chan_offset = 608 * after associated except channel switch */
591 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 609 if (iwl_is_associated_ctx(ctx) &&
592 ctx->ht.is_40mhz = true; 610 !ctx->ht.is_40mhz)
593 } else if (conf_is_ht40_plus(conf)) { 611 iwlagn_config_ht40(conf, ctx);
594 ctx->ht.extension_chan_offset =
595 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
596 ctx->ht.is_40mhz = true;
597 } else {
598 ctx->ht.extension_chan_offset =
599 IEEE80211_HT_PARAM_CHA_SEC_NONE;
600 ctx->ht.is_40mhz = false;
601 }
602 } else 612 } else
603 ctx->ht.is_40mhz = false; 613 ctx->ht.is_40mhz = false;
604 614
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed6283623932..4b2aa1da0953 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1268 1268
1269 switch (keyconf->cipher) { 1269 switch (keyconf->cipher) {
1270 case WLAN_CIPHER_SUITE_TKIP: 1270 case WLAN_CIPHER_SUITE_TKIP:
1271 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1272 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1273
1274 if (sta) 1271 if (sta)
1275 addr = sta->addr; 1272 addr = sta->addr;
1276 else /* station mode case only */ 1273 else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1283 seq.tkip.iv32, p1k, CMD_SYNC); 1280 seq.tkip.iv32, p1k, CMD_SYNC);
1284 break; 1281 break;
1285 case WLAN_CIPHER_SUITE_CCMP: 1282 case WLAN_CIPHER_SUITE_CCMP:
1286 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1287 /* fall through */
1288 case WLAN_CIPHER_SUITE_WEP40: 1283 case WLAN_CIPHER_SUITE_WEP40:
1289 case WLAN_CIPHER_SUITE_WEP104: 1284 case WLAN_CIPHER_SUITE_WEP104:
1290 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1285 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a7..bacc06c95e7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2316 return -EOPNOTSUPP; 2316 return -EOPNOTSUPP;
2317 } 2317 }
2318 2318
2319 switch (key->cipher) {
2320 case WLAN_CIPHER_SUITE_TKIP:
2321 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2322 /* fall through */
2323 case WLAN_CIPHER_SUITE_CCMP:
2324 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2325 break;
2326 default:
2327 break;
2328 }
2329
2319 /* 2330 /*
2320 * We could program these keys into the hardware as well, but we 2331 * We could program these keys into the hardware as well, but we
2321 * don't expect much multicast traffic in IBSS and having keys 2332 * don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
2599 2610
2600 /* Configure HT40 channels */ 2611 /* Configure HT40 channels */
2601 ctx->ht.enabled = conf_is_ht(conf); 2612 ctx->ht.enabled = conf_is_ht(conf);
2602 if (ctx->ht.enabled) { 2613 if (ctx->ht.enabled)
2603 if (conf_is_ht40_minus(conf)) { 2614 iwlagn_config_ht40(conf, ctx);
2604 ctx->ht.extension_chan_offset = 2615 else
2605 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2606 ctx->ht.is_40mhz = true;
2607 } else if (conf_is_ht40_plus(conf)) {
2608 ctx->ht.extension_chan_offset =
2609 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2610 ctx->ht.is_40mhz = true;
2611 } else {
2612 ctx->ht.extension_chan_offset =
2613 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2614 ctx->ht.is_40mhz = false;
2615 }
2616 } else
2617 ctx->ht.is_40mhz = false; 2616 ctx->ht.is_40mhz = false;
2618 2617
2619 if ((le16_to_cpu(ctx->staging.channel) != ch)) 2618 if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
3499module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); 3498module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
3500MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); 3499MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
3501 3500
3502module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); 3501module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
3503MODULE_PARM_DESC(wd_disable, 3502MODULE_PARM_DESC(wd_disable,
3504 "Disable stuck queue watchdog timer (default: 0 [enabled])"); 3503 "Disable stuck queue watchdog timer 0=system default, "
3504 "1=disable, 2=enable (default: 0)");
3505 3505
3506/* 3506/*
3507 * set bt_coex_active to true, uCode will do kill/defer 3507 * set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a541..3856abaea507 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
86 struct ieee80211_vif *vif, 86 struct ieee80211_vif *vif,
87 struct ieee80211_bss_conf *bss_conf, 87 struct ieee80211_bss_conf *bss_conf,
88 u32 changes); 88 u32 changes);
89void iwlagn_config_ht40(struct ieee80211_conf *conf,
90 struct iwl_rxon_context *ctx);
89 91
90/* uCode */ 92/* uCode */
91int iwlagn_rx_calib_result(struct iwl_priv *priv, 93int iwlagn_rx_calib_result(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140abb..fcf54160e4ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
1810{ 1810{
1811 unsigned int timeout = priv->cfg->base_params->wd_timeout; 1811 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1812 1812
1813 if (timeout && !iwlagn_mod_params.wd_disable) 1813 if (!iwlagn_mod_params.wd_disable) {
1814 mod_timer(&priv->watchdog, 1814 /* use system default */
1815 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); 1815 if (timeout && !priv->cfg->base_params->wd_disable)
1816 else 1816 mod_timer(&priv->watchdog,
1817 del_timer(&priv->watchdog); 1817 jiffies +
1818 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1819 else
1820 del_timer(&priv->watchdog);
1821 } else {
1822 /* module parameter overwrite default configuration */
1823 if (timeout && iwlagn_mod_params.wd_disable == 2)
1824 mod_timer(&priv->watchdog,
1825 jiffies +
1826 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1827 else
1828 del_timer(&priv->watchdog);
1829 }
1818} 1830}
1819 1831
1820/** 1832/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da3380704..f2fc288f3dd3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
113 * @shadow_reg_enable: HW shadhow register bit 113 * @shadow_reg_enable: HW shadhow register bit
114 * @no_idle_support: do not support idle mode 114 * @no_idle_support: do not support idle mode
115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
116 * wd_disable: disable watchdog timer
116 */ 117 */
117struct iwl_base_params { 118struct iwl_base_params {
118 int eeprom_size; 119 int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
134 const bool shadow_reg_enable; 135 const bool shadow_reg_enable;
135 const bool no_idle_support; 136 const bool no_idle_support;
136 const bool hd_v2; 137 const bool hd_v2;
138 const bool wd_disable;
137}; 139};
138/* 140/*
139 * @advanced_bt_coexist: support advanced bt coexist 141 * @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c45..14eaf37ce3b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
120 * @restart_fw: restart firmware, default = 1 120 * @restart_fw: restart firmware, default = 1
121 * @plcp_check: enable plcp health check, default = true 121 * @plcp_check: enable plcp health check, default = true
122 * @ack_check: disable ack health check, default = false 122 * @ack_check: disable ack health check, default = false
123 * @wd_disable: enable stuck queue check, default = false 123 * @wd_disable: enable stuck queue check, default = 0
124 * @bt_coex_active: enable bt coex, default = true 124 * @bt_coex_active: enable bt coex, default = true
125 * @led_mode: system default, default = 0 125 * @led_mode: system default, default = 0
126 * @no_sleep_autoadjust: disable autoadjust, default = true 126 * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
141 int restart_fw; 141 int restart_fw;
142 bool plcp_check; 142 bool plcp_check;
143 bool ack_check; 143 bool ack_check;
144 bool wd_disable; 144 int wd_disable;
145 bool bt_coex_active; 145 bool bt_coex_active;
146 int led_mode; 146 int led_mode;
147 bool no_sleep_autoadjust; 147 bool no_sleep_autoadjust;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1ae270eed51a..15e332d08c8d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
1668 "netback/%u", group); 1668 "netback/%u", group);
1669 1669
1670 if (IS_ERR(netbk->task)) { 1670 if (IS_ERR(netbk->task)) {
1671 printk(KERN_ALERT "kthread_run() fails at netback\n"); 1671 printk(KERN_ALERT "kthread_create() fails at netback\n");
1672 del_timer(&netbk->net_timer); 1672 del_timer(&netbk->net_timer);
1673 rc = PTR_ERR(netbk->task); 1673 rc = PTR_ERR(netbk->task);
1674 goto failed_init; 1674 goto failed_init;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 19c0115092dd..0f0cfa3bca30 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
30#ifndef NO_IRQ
31#define NO_IRQ 0
32#endif
33
34/** 29/**
35 * irq_of_parse_and_map - Parse and map an interrupt into linux virq space 30 * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
36 * @device: Device node of the device whose interrupt is to be mapped 31 * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
44 struct of_irq oirq; 39 struct of_irq oirq;
45 40
46 if (of_irq_map_one(dev, index, &oirq)) 41 if (of_irq_map_one(dev, index, &oirq))
47 return NO_IRQ; 42 return 0;
48 43
49 return irq_create_of_mapping(oirq.controller, oirq.specifier, 44 return irq_create_of_mapping(oirq.controller, oirq.specifier,
50 oirq.size); 45 oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
345 340
346 /* Only dereference the resource if both the 341 /* Only dereference the resource if both the
347 * resource and the irq are valid. */ 342 * resource and the irq are valid. */
348 if (r && irq != NO_IRQ) { 343 if (r && irq) {
349 r->start = r->end = irq; 344 r->start = r->end = irq;
350 r->flags = IORESOURCE_IRQ; 345 r->flags = IORESOURCE_IRQ;
351 r->name = dev->full_name; 346 r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
363{ 358{
364 int nr = 0; 359 int nr = 0;
365 360
366 while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ) 361 while (of_irq_to_resource(dev, nr, NULL))
367 nr++; 362 nr++;
368 363
369 return nr; 364 return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
383 int i; 378 int i;
384 379
385 for (i = 0; i < nr_irqs; i++, res++) 380 for (i = 0; i < nr_irqs; i++, res++)
386 if (of_irq_to_resource(dev, i, res) == NO_IRQ) 381 if (!of_irq_to_resource(dev, i, res))
387 break; 382 break;
388 383
389 return i; 384 return i;
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095c..f8c752e408a6 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
239 return err; 239 return err;
240} 240}
241 241
242static int timer_mode;
243
242static int __init oprofile_init(void) 244static int __init oprofile_init(void)
243{ 245{
244 int err; 246 int err;
245 247
248 /* always init architecture to setup backtrace support */
246 err = oprofile_arch_init(&oprofile_ops); 249 err = oprofile_arch_init(&oprofile_ops);
247 if (err < 0 || timer) { 250
248 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 251 timer_mode = err || timer; /* fall back to timer mode on errors */
252 if (timer_mode) {
253 if (!err)
254 oprofile_arch_exit();
249 err = oprofile_timer_init(&oprofile_ops); 255 err = oprofile_timer_init(&oprofile_ops);
250 if (err) 256 if (err)
251 return err; 257 return err;
252 } 258 }
253 return oprofilefs_register(); 259
260 err = oprofilefs_register();
261 if (!err)
262 return 0;
263
264 /* failed */
265 if (timer_mode)
266 oprofile_timer_exit();
267 else
268 oprofile_arch_exit();
269
270 return err;
254} 271}
255 272
256 273
257static void __exit oprofile_exit(void) 274static void __exit oprofile_exit(void)
258{ 275{
259 oprofile_timer_exit();
260 oprofilefs_unregister(); 276 oprofilefs_unregister();
261 oprofile_arch_exit(); 277 if (timer_mode)
278 oprofile_timer_exit();
279 else
280 oprofile_arch_exit();
262} 281}
263 282
264 283
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f510..878fba126582 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
110 ops->start = oprofile_hrtimer_start; 110 ops->start = oprofile_hrtimer_start;
111 ops->stop = oprofile_hrtimer_stop; 111 ops->stop = oprofile_hrtimer_stop;
112 ops->cpu_type = "timer"; 112 ops->cpu_type = "timer";
113 printk(KERN_INFO "oprofile: using timer interrupt.\n");
113 return 0; 114 return 0;
114} 115}
115 116
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471d..dcdc1f4a4624 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
121 int illumination_supported:1; 121 int illumination_supported:1;
122 int video_supported:1; 122 int video_supported:1;
123 int fan_supported:1; 123 int fan_supported:1;
124 int system_event_supported:1;
124 125
125 struct mutex mutex; 126 struct mutex mutex;
126}; 127};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
724 u32 hci_result; 725 u32 hci_result;
725 u32 value; 726 u32 value;
726 727
727 if (!dev->key_event_valid) { 728 if (!dev->key_event_valid && dev->system_event_supported) {
728 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); 729 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
729 if (hci_result == HCI_SUCCESS) { 730 if (hci_result == HCI_SUCCESS) {
730 dev->key_event_valid = 1; 731 dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
964 965
965 /* enable event fifo */ 966 /* enable event fifo */
966 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); 967 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
968 if (hci_result == HCI_SUCCESS)
969 dev->system_event_supported = 1;
967 970
968 props.type = BACKLIGHT_PLATFORM; 971 props.type = BACKLIGHT_PLATFORM;
969 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 972 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1032{ 1035{
1033 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); 1036 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
1034 u32 hci_result, value; 1037 u32 hci_result, value;
1038 int retries = 3;
1035 1039
1036 if (event != 0x80) 1040 if (!dev->system_event_supported || event != 0x80)
1037 return; 1041 return;
1042
1038 do { 1043 do {
1039 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); 1044 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
1040 if (hci_result == HCI_SUCCESS) { 1045 switch (hci_result) {
1046 case HCI_SUCCESS:
1041 if (value == 0x100) 1047 if (value == 0x100)
1042 continue; 1048 continue;
1043 /* act on key press; ignore key release */ 1049 /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1049 pr_info("Unknown key %x\n", 1055 pr_info("Unknown key %x\n",
1050 value); 1056 value);
1051 } 1057 }
1052 } else if (hci_result == HCI_NOT_SUPPORTED) { 1058 break;
1059 case HCI_NOT_SUPPORTED:
1053 /* This is a workaround for an unresolved issue on 1060 /* This is a workaround for an unresolved issue on
1054 * some machines where system events sporadically 1061 * some machines where system events sporadically
1055 * become disabled. */ 1062 * become disabled. */
1056 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); 1063 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
1057 pr_notice("Re-enabled hotkeys\n"); 1064 pr_notice("Re-enabled hotkeys\n");
1065 /* fall through */
1066 default:
1067 retries--;
1068 break;
1058 } 1069 }
1059 } while (hci_result != HCI_EMPTY); 1070 } while (retries && hci_result != HCI_EMPTY);
1060} 1071}
1061 1072
1062 1073
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b00..01fa671ec97f 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
61#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5) 61#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
62#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6) 62#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
63#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7) 63#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
64#define PMIC_BATT_CHR_EXCPT_MASK 0xC6 64#define PMIC_BATT_CHR_EXCPT_MASK 0x86
65
65#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31) 66#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
66#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF 67#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
67 68
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
304 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; 305 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
305 pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT); 306 pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
306 batt_exception = 1; 307 batt_exception = 1;
307 } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
308 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
309 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
310 pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
311 batt_exception = 1;
312 } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) { 308 } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
313 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; 309 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
314 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; 310 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
316 batt_exception = 1; 312 batt_exception = 1;
317 } else { 313 } else {
318 pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD; 314 pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
315 if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
316 /* PMIC will change charging current automatically */
317 pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
318 }
319 } 319 }
320 } 320 }
321 321
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546d..10451a15e828 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
101 101
102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) 102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
103{ 103{
104 return 1; /* always round timer functions to one nanosecond */ 104 tp->tv_sec = 0;
105 tp->tv_nsec = 1;
106 return 0;
105} 107}
106 108
107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) 109static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10cd..691b1ab1a3d0 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
851 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 851 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
852 852
853 /* Allocate buffer for inbound doorbells queue */ 853 /* Allocate buffer for inbound doorbells queue */
854 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, 854 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
856 &priv->idb_dma, GFP_KERNEL); 856 &priv->idb_dma, GFP_KERNEL);
857 if (!priv->idb_base) 857 if (!priv->idb_base)
858 return -ENOMEM; 858 return -ENOMEM;
859 859
860 memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
861
862 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", 860 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
863 priv->idb_base, (unsigned long long)priv->idb_dma); 861 priv->idb_base, (unsigned long long)priv->idb_dma);
864 862
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
904 */ 902 */
905 903
906 /* Allocate space for DMA descriptors */ 904 /* Allocate space for DMA descriptors */
907 bd_ptr = dma_alloc_coherent(&priv->pdev->dev, 905 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
908 bd_num * sizeof(struct tsi721_dma_desc), 906 bd_num * sizeof(struct tsi721_dma_desc),
909 &bd_phys, GFP_KERNEL); 907 &bd_phys, GFP_KERNEL);
910 if (!bd_ptr) 908 if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
913 priv->bdma[chnum].bd_phys = bd_phys; 911 priv->bdma[chnum].bd_phys = bd_phys;
914 priv->bdma[chnum].bd_base = bd_ptr; 912 priv->bdma[chnum].bd_base = bd_ptr;
915 913
916 memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
917
918 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 914 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
919 bd_ptr, (unsigned long long)bd_phys); 915 bd_ptr, (unsigned long long)bd_phys);
920 916
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
922 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 918 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
923 bd_num : TSI721_DMA_MINSTSSZ; 919 bd_num : TSI721_DMA_MINSTSSZ;
924 sts_size = roundup_pow_of_two(sts_size); 920 sts_size = roundup_pow_of_two(sts_size);
925 sts_ptr = dma_alloc_coherent(&priv->pdev->dev, 921 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
926 sts_size * sizeof(struct tsi721_dma_sts), 922 sts_size * sizeof(struct tsi721_dma_sts),
927 &sts_phys, GFP_KERNEL); 923 &sts_phys, GFP_KERNEL);
928 if (!sts_ptr) { 924 if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
938 priv->bdma[chnum].sts_base = sts_ptr; 934 priv->bdma[chnum].sts_base = sts_ptr;
939 priv->bdma[chnum].sts_size = sts_size; 935 priv->bdma[chnum].sts_size = sts_size;
940 936
941 memset(sts_ptr, 0, sts_size);
942
943 dev_dbg(&priv->pdev->dev, 937 dev_dbg(&priv->pdev->dev,
944 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 938 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
945 sts_ptr, (unsigned long long)sts_phys, sts_size); 939 sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1400 1394
1401 /* Outbound message descriptor status FIFO allocation */ 1395 /* Outbound message descriptor status FIFO allocation */
1402 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1396 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1403 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, 1397 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
1404 priv->omsg_ring[mbox].sts_size * 1398 priv->omsg_ring[mbox].sts_size *
1405 sizeof(struct tsi721_dma_sts), 1399 sizeof(struct tsi721_dma_sts),
1406 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1400 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1412 goto out_desc; 1406 goto out_desc;
1413 } 1407 }
1414 1408
1415 memset(priv->omsg_ring[mbox].sts_base, 0,
1416 entries * sizeof(struct tsi721_dma_sts));
1417
1418 /* 1409 /*
1419 * Configure Outbound Messaging Engine 1410 * Configure Outbound Messaging Engine
1420 */ 1411 */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2116 INIT_LIST_HEAD(&mport->dbells); 2107 INIT_LIST_HEAD(&mport->dbells);
2117 2108
2118 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 2109 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2119 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 2110 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2120 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 2111 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2121 strcpy(mport->name, "Tsi721 mport"); 2112 strcpy(mport->name, "Tsi721 mport");
2122 2113
2123 /* Hook up interrupt handler */ 2114 /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2163 const struct pci_device_id *id) 2154 const struct pci_device_id *id)
2164{ 2155{
2165 struct tsi721_device *priv; 2156 struct tsi721_device *priv;
2166 int i; 2157 int i, cap;
2167 int err; 2158 int err;
2168 u32 regval; 2159 u32 regval;
2169 2160
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2271 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2262 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2272 } 2263 }
2273 2264
2274 /* Clear "no snoop" and "relaxed ordering" bits. */ 2265 cap = pci_pcie_cap(pdev);
2275 pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval); 2266 BUG_ON(cap == 0);
2276 regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); 2267
2277 pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval); 2268 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
2269 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
2270 regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2271 PCI_EXP_DEVCTL_NOSNOOP_EN);
2272 regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
2273 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
2274
2275 /* Adjust PCIe completion timeout. */
2276 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
2277 regval &= ~(0x0f);
2278 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
2278 2279
2279 /* 2280 /*
2280 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2281 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb1402..822e54c394d5 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
72#define TSI721_MSIXPBA_OFFSET 0x2a000 72#define TSI721_MSIXPBA_OFFSET 0x2a000
73#define TSI721_PCIECFG_EPCTL 0x400 73#define TSI721_PCIECFG_EPCTL 0x400
74 74
75#define MAX_READ_REQUEST_SZ_SHIFT 12
76
75/* 77/*
76 * Event Management Registers 78 * Event Management Registers
77 */ 79 */
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2f..dc4c2748bbc3 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
63 */ 63 */
64 delta = timespec_sub(old_system, old_rtc); 64 delta = timespec_sub(old_system, old_rtc);
65 delta_delta = timespec_sub(delta, old_delta); 65 delta_delta = timespec_sub(delta, old_delta);
66 if (abs(delta_delta.tv_sec) >= 2) { 66 if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
67 /* 67 /*
68 * if delta_delta is too large, assume time correction 68 * if delta_delta is too large, assume time correction
69 * has occured and set old_delta to the current delta. 69 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
97 rtc_tm_to_time(&tm, &new_rtc.tv_sec); 97 rtc_tm_to_time(&tm, &new_rtc.tv_sec);
98 new_rtc.tv_nsec = 0; 98 new_rtc.tv_nsec = 0;
99 99
100 if (new_rtc.tv_sec <= old_rtc.tv_sec) { 100 if (new_rtc.tv_sec < old_rtc.tv_sec) {
101 if (new_rtc.tv_sec < old_rtc.tv_sec) 101 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
102 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
103 return 0; 102 return 0;
104 } 103 }
105 104
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
116 sleep_time = timespec_sub(sleep_time, 115 sleep_time = timespec_sub(sleep_time,
117 timespec_sub(new_system, old_system)); 116 timespec_sub(new_system, old_system));
118 117
119 timekeeping_inject_sleeptime(&sleep_time); 118 if (sleep_time.tv_sec >= 0)
119 timekeeping_inject_sleeptime(&sleep_time);
120 return 0; 120 return 0;
121} 121}
122 122
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a007..fa4d9f324189 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
319} 319}
320EXPORT_SYMBOL_GPL(rtc_read_alarm); 320EXPORT_SYMBOL_GPL(rtc_read_alarm);
321 321
322static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323{
324 int err;
325
326 if (!rtc->ops)
327 err = -ENODEV;
328 else if (!rtc->ops->set_alarm)
329 err = -EINVAL;
330 else
331 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
332
333 return err;
334}
335
322static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 336static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323{ 337{
324 struct rtc_time tm; 338 struct rtc_time tm;
@@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
342 * over right here, before we set the alarm. 356 * over right here, before we set the alarm.
343 */ 357 */
344 358
345 if (!rtc->ops) 359 return ___rtc_set_alarm(rtc, alarm);
346 err = -ENODEV;
347 else if (!rtc->ops->set_alarm)
348 err = -EINVAL;
349 else
350 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
351
352 return err;
353} 360}
354 361
355int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 362int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
763 return 0; 770 return 0;
764} 771}
765 772
773static void rtc_alarm_disable(struct rtc_device *rtc)
774{
775 struct rtc_wkalrm alarm;
776 struct rtc_time tm;
777
778 __rtc_read_time(rtc, &tm);
779
780 alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
781 ktime_set(300, 0)));
782 alarm.enabled = 0;
783
784 ___rtc_set_alarm(rtc, &alarm);
785}
786
766/** 787/**
767 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue 788 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
768 * @rtc rtc device 789 * @rtc rtc device
@@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
784 struct rtc_wkalrm alarm; 805 struct rtc_wkalrm alarm;
785 int err; 806 int err;
786 next = timerqueue_getnext(&rtc->timerqueue); 807 next = timerqueue_getnext(&rtc->timerqueue);
787 if (!next) 808 if (!next) {
809 rtc_alarm_disable(rtc);
788 return; 810 return;
811 }
789 alarm.time = rtc_ktime_to_tm(next->expires); 812 alarm.time = rtc_ktime_to_tm(next->expires);
790 alarm.enabled = 1; 813 alarm.enabled = 1;
791 err = __rtc_set_alarm(rtc, &alarm); 814 err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +870,8 @@ again:
847 err = __rtc_set_alarm(rtc, &alarm); 870 err = __rtc_set_alarm(rtc, &alarm);
848 if (err == -ETIME) 871 if (err == -ETIME)
849 goto again; 872 goto again;
850 } 873 } else
874 rtc_alarm_disable(rtc);
851 875
852 mutex_unlock(&rtc->ops_lock); 876 mutex_unlock(&rtc->ops_lock);
853} 877}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f02..5b979d9cc332 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
202 void __iomem *base = s3c_rtc_base; 202 void __iomem *base = s3c_rtc_base;
203 int year = tm->tm_year - 100; 203 int year = tm->tm_year - 100;
204 204
205 clk_enable(rtc_clk);
206 pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n", 205 pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
207 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 206 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
208 tm->tm_hour, tm->tm_min, tm->tm_sec); 207 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
214 return -EINVAL; 213 return -EINVAL;
215 } 214 }
216 215
216 clk_enable(rtc_clk);
217 writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC); 217 writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
218 writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN); 218 writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
219 writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR); 219 writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd43..a84631a7391d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
529int chsc_chp_vary(struct chp_id chpid, int on) 529int chsc_chp_vary(struct chp_id chpid, int on)
530{ 530{
531 struct channel_path *chp = chpid_to_chp(chpid); 531 struct channel_path *chp = chpid_to_chp(chpid);
532 struct chp_link link;
533 532
534 memset(&link, 0, sizeof(struct chp_link));
535 link.chpid = chpid;
536 /* Wait until previous actions have settled. */ 533 /* Wait until previous actions have settled. */
537 css_wait_for_slow_path(); 534 css_wait_for_slow_path();
538 /* 535 /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
542 /* Try to update the channel path descritor. */ 539 /* Try to update the channel path descritor. */
543 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 540 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
544 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 541 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
545 __s390_vary_chpid_on, &link); 542 __s390_vary_chpid_on, &chpid);
546 } else 543 } else
547 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
548 NULL, &link); 545 NULL, &chpid);
549 546
550 return 0; 547 return 0;
551} 548}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e5..4a1ff5c2eb88 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71/*
72 * When rescheduled, todo's with higher values will overwrite those
73 * with lower values.
74 */
71enum sch_todo { 75enum sch_todo {
72 SCH_TODO_NOTHING, 76 SCH_TODO_NOTHING,
77 SCH_TODO_EVAL,
73 SCH_TODO_UNREG, 78 SCH_TODO_UNREG,
74}; 79};
75 80
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1c..21908e67bf67 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
195} 195}
196EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196EXPORT_SYMBOL_GPL(css_sch_device_unregister);
197 197
198static void css_sch_todo(struct work_struct *work)
199{
200 struct subchannel *sch;
201 enum sch_todo todo;
202
203 sch = container_of(work, struct subchannel, todo_work);
204 /* Find out todo. */
205 spin_lock_irq(sch->lock);
206 todo = sch->todo;
207 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
208 sch->schid.sch_no, todo);
209 sch->todo = SCH_TODO_NOTHING;
210 spin_unlock_irq(sch->lock);
211 /* Perform todo. */
212 if (todo == SCH_TODO_UNREG)
213 css_sch_device_unregister(sch);
214 /* Release workqueue ref. */
215 put_device(&sch->dev);
216}
217
218/**
219 * css_sched_sch_todo - schedule a subchannel operation
220 * @sch: subchannel
221 * @todo: todo
222 *
223 * Schedule the operation identified by @todo to be performed on the slow path
224 * workqueue. Do nothing if another operation with higher priority is already
225 * scheduled. Needs to be called with subchannel lock held.
226 */
227void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
228{
229 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
230 sch->schid.ssid, sch->schid.sch_no, todo);
231 if (sch->todo >= todo)
232 return;
233 /* Get workqueue ref. */
234 if (!get_device(&sch->dev))
235 return;
236 sch->todo = todo;
237 if (!queue_work(cio_work_q, &sch->todo_work)) {
238 /* Already queued, release workqueue ref. */
239 put_device(&sch->dev);
240 }
241}
242
243static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 198static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
244{ 199{
245 int i; 200 int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
466 css_schedule_eval(schid); 421 css_schedule_eval(schid);
467} 422}
468 423
424/**
425 * css_sched_sch_todo - schedule a subchannel operation
426 * @sch: subchannel
427 * @todo: todo
428 *
429 * Schedule the operation identified by @todo to be performed on the slow path
430 * workqueue. Do nothing if another operation with higher priority is already
431 * scheduled. Needs to be called with subchannel lock held.
432 */
433void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
434{
435 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
436 sch->schid.ssid, sch->schid.sch_no, todo);
437 if (sch->todo >= todo)
438 return;
439 /* Get workqueue ref. */
440 if (!get_device(&sch->dev))
441 return;
442 sch->todo = todo;
443 if (!queue_work(cio_work_q, &sch->todo_work)) {
444 /* Already queued, release workqueue ref. */
445 put_device(&sch->dev);
446 }
447}
448
449static void css_sch_todo(struct work_struct *work)
450{
451 struct subchannel *sch;
452 enum sch_todo todo;
453 int ret;
454
455 sch = container_of(work, struct subchannel, todo_work);
456 /* Find out todo. */
457 spin_lock_irq(sch->lock);
458 todo = sch->todo;
459 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
460 sch->schid.sch_no, todo);
461 sch->todo = SCH_TODO_NOTHING;
462 spin_unlock_irq(sch->lock);
463 /* Perform todo. */
464 switch (todo) {
465 case SCH_TODO_NOTHING:
466 break;
467 case SCH_TODO_EVAL:
468 ret = css_evaluate_known_subchannel(sch, 1);
469 if (ret == -EAGAIN) {
470 spin_lock_irq(sch->lock);
471 css_sched_sch_todo(sch, todo);
472 spin_unlock_irq(sch->lock);
473 }
474 break;
475 case SCH_TODO_UNREG:
476 css_sch_device_unregister(sch);
477 break;
478 }
479 /* Release workqueue ref. */
480 put_device(&sch->dev);
481}
482
469static struct idset *slow_subchannel_set; 483static struct idset *slow_subchannel_set;
470static spinlock_t slow_subchannel_lock; 484static spinlock_t slow_subchannel_lock;
471static wait_queue_head_t css_eval_wq; 485static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0ecac..47269858ecb6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1868 */ 1868 */
1869 cdev->private->flags.resuming = 1; 1869 cdev->private->flags.resuming = 1;
1870 cdev->private->path_new_mask = LPM_ANYPATH; 1870 cdev->private->path_new_mask = LPM_ANYPATH;
1871 css_schedule_eval(sch->schid); 1871 css_sched_sch_todo(sch, SCH_TODO_EVAL);
1872 spin_unlock_irq(sch->lock); 1872 spin_unlock_irq(sch->lock);
1873 css_complete_work(); 1873 css_wait_for_slow_path();
1874 1874
1875 /* cdev may have been moved to a different subchannel. */ 1875 /* cdev may have been moved to a different subchannel. */
1876 sch = to_subchannel(cdev->dev.parent); 1876 sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b12..1b853513c891 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
496 cdev->private->pgid_reset_mask = 0; 496 cdev->private->pgid_reset_mask = 0;
497} 497}
498 498
499void 499static void create_fake_irb(struct irb *irb, int type)
500ccw_device_verify_done(struct ccw_device *cdev, int err) 500{
501 memset(irb, 0, sizeof(*irb));
502 if (type == FAKE_CMD_IRB) {
503 struct cmd_scsw *scsw = &irb->scsw.cmd;
504 scsw->cc = 1;
505 scsw->fctl = SCSW_FCTL_START_FUNC;
506 scsw->actl = SCSW_ACTL_START_PEND;
507 scsw->stctl = SCSW_STCTL_STATUS_PEND;
508 } else if (type == FAKE_TM_IRB) {
509 struct tm_scsw *scsw = &irb->scsw.tm;
510 scsw->x = 1;
511 scsw->cc = 1;
512 scsw->fctl = SCSW_FCTL_START_FUNC;
513 scsw->actl = SCSW_ACTL_START_PEND;
514 scsw->stctl = SCSW_STCTL_STATUS_PEND;
515 }
516}
517
518void ccw_device_verify_done(struct ccw_device *cdev, int err)
501{ 519{
502 struct subchannel *sch; 520 struct subchannel *sch;
503 521
@@ -520,12 +538,8 @@ callback:
520 ccw_device_done(cdev, DEV_STATE_ONLINE); 538 ccw_device_done(cdev, DEV_STATE_ONLINE);
521 /* Deliver fake irb to device driver, if needed. */ 539 /* Deliver fake irb to device driver, if needed. */
522 if (cdev->private->flags.fake_irb) { 540 if (cdev->private->flags.fake_irb) {
523 memset(&cdev->private->irb, 0, sizeof(struct irb)); 541 create_fake_irb(&cdev->private->irb,
524 cdev->private->irb.scsw.cmd.cc = 1; 542 cdev->private->flags.fake_irb);
525 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
526 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
527 cdev->private->irb.scsw.cmd.stctl =
528 SCSW_STCTL_STATUS_PEND;
529 cdev->private->flags.fake_irb = 0; 543 cdev->private->flags.fake_irb = 0;
530 if (cdev->handler) 544 if (cdev->handler)
531 cdev->handler(cdev, cdev->private->intparm, 545 cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735e..ec7fb6d3b479 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
198 if (cdev->private->state == DEV_STATE_VERIFY) { 198 if (cdev->private->state == DEV_STATE_VERIFY) {
199 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
200 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
201 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = FAKE_CMD_IRB;
202 cdev->private->intparm = intparm; 202 cdev->private->intparm = intparm;
203 return 0; 203 return 0;
204 } else 204 } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
213 ret = cio_set_options (sch, flags); 213 ret = cio_set_options (sch, flags);
214 if (ret) 214 if (ret)
215 return ret; 215 return ret;
216 /* Adjust requested path mask to excluded varied off paths. */ 216 /* Adjust requested path mask to exclude unusable paths. */
217 if (lpm) { 217 if (lpm) {
218 lpm &= sch->opm; 218 lpm &= sch->lpm;
219 if (lpm == 0) 219 if (lpm == 0)
220 return -EACCES; 220 return -EACCES;
221 } 221 }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
605 sch = to_subchannel(cdev->dev.parent); 605 sch = to_subchannel(cdev->dev.parent);
606 if (!sch->schib.pmcw.ena) 606 if (!sch->schib.pmcw.ena)
607 return -EINVAL; 607 return -EINVAL;
608 if (cdev->private->state == DEV_STATE_VERIFY) {
609 /* Remember to fake irb when finished. */
610 if (!cdev->private->flags.fake_irb) {
611 cdev->private->flags.fake_irb = FAKE_TM_IRB;
612 cdev->private->intparm = intparm;
613 return 0;
614 } else
615 /* There's already a fake I/O around. */
616 return -EBUSY;
617 }
608 if (cdev->private->state != DEV_STATE_ONLINE) 618 if (cdev->private->state != DEV_STATE_ONLINE)
609 return -EIO; 619 return -EIO;
610 /* Adjust requested path mask to excluded varied off paths. */ 620 /* Adjust requested path mask to exclude unusable paths. */
611 if (lpm) { 621 if (lpm) {
612 lpm &= sch->opm; 622 lpm &= sch->lpm;
613 if (lpm == 0) 623 if (lpm == 0)
614 return -EACCES; 624 return -EACCES;
615 } 625 }
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c17..76253dfcc1be 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
111 CDEV_TODO_UNREG_EVAL, 111 CDEV_TODO_UNREG_EVAL,
112}; 112};
113 113
114#define FAKE_CMD_IRB 1
115#define FAKE_TM_IRB 2
116
114struct ccw_device_private { 117struct ccw_device_private {
115 struct ccw_device *cdev; 118 struct ccw_device *cdev;
116 struct subchannel *sch; 119 struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
138 unsigned int doverify:1; /* delayed path verification */ 141 unsigned int doverify:1; /* delayed path verification */
139 unsigned int donotify:1; /* call notify function */ 142 unsigned int donotify:1; /* call notify function */
140 unsigned int recog_done:1; /* dev. recog. complete */ 143 unsigned int recog_done:1; /* dev. recog. complete */
141 unsigned int fake_irb:1; /* deliver faked irb */ 144 unsigned int fake_irb:2; /* deliver faked irb */
142 unsigned int resuming:1; /* recognition while resume */ 145 unsigned int resuming:1; /* recognition while resume */
143 unsigned int pgroup:1; /* pathgroup is set up */ 146 unsigned int pgroup:1; /* pathgroup is set up */
144 unsigned int mpath:1; /* multipathing is set up */ 147 unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec94f049e995..96bbe9d12a79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
1552 rc = ap_init_queue(ap_dev->qid); 1552 rc = ap_init_queue(ap_dev->qid);
1553 if (rc == -ENODEV) 1553 if (rc == -ENODEV)
1554 ap_dev->unregistered = 1; 1554 ap_dev->unregistered = 1;
1555 else
1556 __ap_schedule_poll_timer();
1555} 1557}
1556 1558
1557static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1559static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df5416..8ba4510a9519 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
199 depends on FSL_SOC 199 depends on FSL_SOC
200 200
201config SPI_FSL_SPI 201config SPI_FSL_SPI
202 tristate "Freescale SPI controller" 202 bool "Freescale SPI controller"
203 depends on FSL_SOC 203 depends on FSL_SOC
204 select SPI_FSL_LIB 204 select SPI_FSL_LIB
205 help 205 help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
208 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. 208 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
209 209
210config SPI_FSL_ESPI 210config SPI_FSL_ESPI
211 tristate "Freescale eSPI controller" 211 bool "Freescale eSPI controller"
212 depends on FSL_SOC 212 depends on FSL_SOC
213 select SPI_FSL_LIB 213 select SPI_FSL_LIB
214 help 214 help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5ca..acc88b4d2869 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/spinlock.h> 19#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41ba..0094c645ff0d 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
256 spi_bitbang_cleanup(spi); 256 spi_bitbang_cleanup(spi);
257} 257}
258 258
259static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) 259static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
260{ 260{
261 int value; 261 int value;
262 262
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
270 return value; 270 return value;
271} 271}
272 272
273static int __init 273static int __devinit
274spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, 274spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
275 u16 *res_flags) 275 u16 *res_flags)
276{ 276{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 21c70b2b8311..182e9c873822 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
13#include <linux/workqueue.h> 14#include <linux/workqueue.h>
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271d..8599545cdf9e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
614 hdr = (struct iscsi_reject *) cmd->pdu; 614 hdr = (struct iscsi_reject *) cmd->pdu;
615 hdr->reason = reason; 615 hdr->reason = reason;
616 616
617 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 617 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
618 if (!cmd->buf_ptr) { 618 if (!cmd->buf_ptr) {
619 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 619 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
620 iscsit_release_cmd(cmd); 620 iscsit_release_cmd(cmd);
621 return -1; 621 return -1;
622 } 622 }
623 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
624 623
625 spin_lock_bh(&conn->cmd_lock); 624 spin_lock_bh(&conn->cmd_lock);
626 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
661 hdr = (struct iscsi_reject *) cmd->pdu; 660 hdr = (struct iscsi_reject *) cmd->pdu;
662 hdr->reason = reason; 661 hdr->reason = reason;
663 662
664 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 663 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
665 if (!cmd->buf_ptr) { 664 if (!cmd->buf_ptr) {
666 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 665 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
667 iscsit_release_cmd(cmd); 666 iscsit_release_cmd(cmd);
668 return -1; 667 return -1;
669 } 668 }
670 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
671 669
672 if (add_to_conn) { 670 if (add_to_conn) {
673 spin_lock_bh(&conn->cmd_lock); 671 spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
1017 " non-existent or non-exported iSCSI LUN:" 1015 " non-existent or non-exported iSCSI LUN:"
1018 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 1016 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
1019 } 1017 }
1020 if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
1021 return iscsit_add_reject_from_cmd(
1022 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1023 1, 1, buf, cmd);
1024
1025 send_check_condition = 1; 1018 send_check_condition = 1;
1026 goto attach_cmd; 1019 goto attach_cmd;
1027 } 1020 }
@@ -1044,6 +1037,8 @@ done:
1044 */ 1037 */
1045 send_check_condition = 1; 1038 send_check_condition = 1;
1046 } else { 1039 } else {
1040 cmd->data_length = cmd->se_cmd.data_length;
1041
1047 if (iscsit_decide_list_to_build(cmd, payload_length) < 0) 1042 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1048 return iscsit_add_reject_from_cmd( 1043 return iscsit_add_reject_from_cmd(
1049 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1044 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
1123 * the backend memory allocation. 1118 * the backend memory allocation.
1124 */ 1119 */
1125 ret = transport_generic_new_cmd(&cmd->se_cmd); 1120 ret = transport_generic_new_cmd(&cmd->se_cmd);
1126 if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) { 1121 if (ret < 0) {
1127 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1122 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1128 dump_immediate_data = 1; 1123 dump_immediate_data = 1;
1129 goto after_immediate_data; 1124 goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1341 1336
1342 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1337 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1343 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1338 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1344 (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED)) 1339 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1345 dump_unsolicited_data = 1; 1340 dump_unsolicited_data = 1;
1346 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1341 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1347 1342
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
2513 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2508 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2514 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2509 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2515 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2510 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2516 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2511 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2517 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2512 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2518 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2513 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2519 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2514 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2520 } 2515 }
2521 } 2516 }
2522 hton24(hdr->dlength, datain.length); 2517 hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
3018 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3013 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3019 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3014 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3020 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3015 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3021 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3016 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3022 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3017 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3023 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3018 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3024 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3019 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3025 } 3020 }
3026 hdr->response = cmd->iscsi_response; 3021 hdr->response = cmd->iscsi_response;
3027 hdr->cmd_status = cmd->se_cmd.scsi_status; 3022 hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
3133 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3128 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3134 memset(hdr, 0, ISCSI_HDR_LEN); 3129 memset(hdr, 0, ISCSI_HDR_LEN);
3135 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3130 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3131 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3136 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3132 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3137 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3133 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3138 cmd->stat_sn = conn->stat_sn++; 3134 cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f1..1cd6ce373b83 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
30 30
31static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) 31static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
32{ 32{
33 int j = DIV_ROUND_UP(len, 2); 33 int j = DIV_ROUND_UP(len, 2), rc;
34 34
35 hex2bin(dst, src, j); 35 rc = hex2bin(dst, src, j);
36 if (rc < 0)
37 pr_debug("CHAP string contains non hex digit symbols\n");
36 38
37 dst[j] = '\0'; 39 dst[j] = '\0';
38 return j; 40 return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae5..f1a02dad05a0 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
398 u32 pdu_send_order; 398 u32 pdu_send_order;
399 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ 399 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
400 u32 pdu_start; 400 u32 pdu_start;
401 u32 residual_count;
402 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ 401 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
403 u32 seq_send_order; 402 u32 seq_send_order;
404 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ 403 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
535 atomic_t connection_exit; 534 atomic_t connection_exit;
536 atomic_t connection_recovery; 535 atomic_t connection_recovery;
537 atomic_t connection_reinstatement; 536 atomic_t connection_reinstatement;
538 atomic_t connection_wait;
539 atomic_t connection_wait_rcfr; 537 atomic_t connection_wait_rcfr;
540 atomic_t sleep_on_conn_wait_comp; 538 atomic_t sleep_on_conn_wait_comp;
541 atomic_t transport_failed; 539 atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
643 atomic_t session_reinstatement; 641 atomic_t session_reinstatement;
644 atomic_t session_stop_active; 642 atomic_t session_stop_active;
645 atomic_t sleep_on_sess_wait_comp; 643 atomic_t sleep_on_sess_wait_comp;
646 atomic_t transport_wait_cmds;
647 /* connection list */ 644 /* connection list */
648 struct list_head sess_conn_list; 645 struct list_head sess_conn_list;
649 struct list_head cr_active_list; 646 struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e500..101b1beb3bca 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well. 938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
939 */ 939 */
940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { 940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
941 if (se_cmd->se_cmd_flags & 941 if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
942 SCF_SCSI_RESERVATION_CONFLICT) {
943 cmd->i_state = ISTATE_SEND_STATUS; 942 cmd->i_state = ISTATE_SEND_STATUS;
944 spin_unlock_bh(&cmd->istate_lock); 943 spin_unlock_bh(&cmd->istate_lock);
945 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, 944 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93ce..d734bdec24f9 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
225 ISCSI_LOGIN_STATUS_NO_RESOURCES); 225 ISCSI_LOGIN_STATUS_NO_RESOURCES);
226 pr_err("Could not allocate memory for session\n"); 226 pr_err("Could not allocate memory for session\n");
227 return -1; 227 return -ENOMEM;
228 } 228 }
229 229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid); 230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
250 pr_err("idr_pre_get() for sess_idr failed\n"); 250 pr_err("idr_pre_get() for sess_idr failed\n");
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
252 ISCSI_LOGIN_STATUS_NO_RESOURCES); 252 ISCSI_LOGIN_STATUS_NO_RESOURCES);
253 return -1; 253 kfree(sess);
254 return -ENOMEM;
254 } 255 }
255 spin_lock(&sess_idr_lock); 256 spin_lock(&sess_idr_lock);
256 idr_get_new(&sess_idr, NULL, &sess->session_index); 257 idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
270 ISCSI_LOGIN_STATUS_NO_RESOURCES); 271 ISCSI_LOGIN_STATUS_NO_RESOURCES);
271 pr_err("Unable to allocate memory for" 272 pr_err("Unable to allocate memory for"
272 " struct iscsi_sess_ops.\n"); 273 " struct iscsi_sess_ops.\n");
273 return -1; 274 kfree(sess);
275 return -ENOMEM;
274 } 276 }
275 277
276 sess->se_sess = transport_init_session(); 278 sess->se_sess = transport_init_session();
277 if (!sess->se_sess) { 279 if (IS_ERR(sess->se_sess)) {
278 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
279 ISCSI_LOGIN_STATUS_NO_RESOURCES); 281 ISCSI_LOGIN_STATUS_NO_RESOURCES);
280 return -1; 282 kfree(sess);
283 return -ENOMEM;
281 } 284 }
282 285
283 return 0; 286 return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9a..98936cb7c294 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
981 return NULL; 981 return NULL;
982 } 982 }
983 983
984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 984 login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
985 if (!login->req) { 985 if (!login->req) {
986 pr_err("Unable to allocate memory for Login Request.\n"); 986 pr_err("Unable to allocate memory for Login Request.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES); 988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out; 989 goto out;
990 } 990 }
991 memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
992 991
993 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 992 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
994 if (!login->req_buf) { 993 if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6b..81d5832fbbd5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, 113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
114 &tl_cmd->tl_sense_buf[0]); 114 &tl_cmd->tl_sense_buf[0]);
115 115
116 /*
117 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
118 */
119 if (scsi_bidi_cmnd(sc)) 116 if (scsi_bidi_cmnd(sc))
120 se_cmd->t_tasks_bidi = 1; 117 se_cmd->se_cmd_flags |= SCF_BIDI;
118
121 /* 119 /*
122 * Locate the struct se_lun pointer and attach it to struct se_cmd 120 * Locate the struct se_lun pointer and attach it to struct se_cmd
123 */ 121 */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
148 * Allocate the necessary tasks to complete the received CDB+data 146 * Allocate the necessary tasks to complete the received CDB+data
149 */ 147 */
150 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); 148 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
151 if (ret == -ENOMEM) { 149 if (ret != 0)
152 /* Out of Resources */ 150 return ret;
153 return PYX_TRANSPORT_LU_COMM_FAILURE;
154 } else if (ret == -EINVAL) {
155 /*
156 * Handle case for SAM_STAT_RESERVATION_CONFLICT
157 */
158 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
159 return PYX_TRANSPORT_RESERVATION_CONFLICT;
160 /*
161 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
162 * sense data.
163 */
164 return PYX_TRANSPORT_USE_SENSE_REASON;
165 }
166
167 /* 151 /*
168 * For BIDI commands, pass in the extra READ buffer 152 * For BIDI commands, pass in the extra READ buffer
169 * to transport_generic_map_mem_to_cmd() below.. 153 * to transport_generic_map_mem_to_cmd() below..
170 */ 154 */
171 if (se_cmd->t_tasks_bidi) { 155 if (se_cmd->se_cmd_flags & SCF_BIDI) {
172 struct scsi_data_buffer *sdb = scsi_in(sc); 156 struct scsi_data_buffer *sdb = scsi_in(sc);
173 157
174 sgl_bidi = sdb->table.sgl; 158 sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
194 } 178 }
195 179
196 /* Tell the core about our preallocated memory */ 180 /* Tell the core about our preallocated memory */
197 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), 181 return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
198 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); 182 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
199 if (ret < 0)
200 return PYX_TRANSPORT_LU_COMM_FAILURE;
201
202 return 0;
203} 183}
204 184
205/* 185/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
1360{ 1340{
1361 struct tcm_loop_hba *tl_hba = container_of(wwn, 1341 struct tcm_loop_hba *tl_hba = container_of(wwn,
1362 struct tcm_loop_hba, tl_hba_wwn); 1342 struct tcm_loop_hba, tl_hba_wwn);
1363 int host_no = tl_hba->sh->host_no; 1343
1344 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1345 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1346 tl_hba->tl_wwn_address, tl_hba->sh->host_no);
1364 /* 1347 /*
1365 * Call device_unregister() on the original tl_hba->dev. 1348 * Call device_unregister() on the original tl_hba->dev.
1366 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1349 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1367 * release *tl_hba; 1350 * release *tl_hba;
1368 */ 1351 */
1369 device_unregister(&tl_hba->dev); 1352 device_unregister(&tl_hba->dev);
1370
1371 pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
1372 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1373 config_item_name(&wwn->wwn_group.cg_item), host_no);
1374} 1353}
1375 1354
1376/* Start items for tcm_loop_cit */ 1355/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8b..1dcbef499d6a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
191 int alua_access_state, primary = 0, rc; 191 int alua_access_state, primary = 0, rc;
192 u16 tg_pt_id, rtpi; 192 u16 tg_pt_id, rtpi;
193 193
194 if (!l_port) 194 if (!l_port) {
195 return PYX_TRANSPORT_LU_COMM_FAILURE; 195 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
196 196 return -EINVAL;
197 }
197 buf = transport_kmap_first_data_page(cmd); 198 buf = transport_kmap_first_data_page(cmd);
198 199
199 /* 200 /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
203 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 204 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
204 if (!l_tg_pt_gp_mem) { 205 if (!l_tg_pt_gp_mem) {
205 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 206 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
206 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
208 rc = -EINVAL;
207 goto out; 209 goto out;
208 } 210 }
209 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 211 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
211 if (!l_tg_pt_gp) { 213 if (!l_tg_pt_gp) {
212 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 214 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
213 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 215 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
214 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 216 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
217 rc = -EINVAL;
215 goto out; 218 goto out;
216 } 219 }
217 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 220 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
220 if (!rc) { 223 if (!rc) {
221 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 224 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
222 " while TPGS_EXPLICT_ALUA is disabled\n"); 225 " while TPGS_EXPLICT_ALUA is disabled\n");
223 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 226 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
227 rc = -EINVAL;
224 goto out; 228 goto out;
225 } 229 }
226 230
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
245 * REQUEST, and the additional sense code set to INVALID 249 * REQUEST, and the additional sense code set to INVALID
246 * FIELD IN PARAMETER LIST. 250 * FIELD IN PARAMETER LIST.
247 */ 251 */
248 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 252 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
253 rc = -EINVAL;
249 goto out; 254 goto out;
250 } 255 }
251 rc = -1; 256 rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
298 * throw an exception with ASCQ: INVALID_PARAMETER_LIST 303 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
299 */ 304 */
300 if (rc != 0) { 305 if (rc != 0) {
301 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 306 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
307 rc = -EINVAL;
302 goto out; 308 goto out;
303 } 309 }
304 } else { 310 } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
335 * INVALID_PARAMETER_LIST 341 * INVALID_PARAMETER_LIST
336 */ 342 */
337 if (rc != 0) { 343 if (rc != 0) {
338 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 344 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
345 rc = -EINVAL;
339 goto out; 346 goto out;
340 } 347 }
341 } 348 }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1184 * struct t10_alua_lu_gp. 1191 * struct t10_alua_lu_gp.
1185 */ 1192 */
1186 spin_lock(&lu_gps_lock); 1193 spin_lock(&lu_gps_lock);
1187 atomic_set(&lu_gp->lu_gp_shutdown, 1);
1188 list_del(&lu_gp->lu_gp_node); 1194 list_del(&lu_gp->lu_gp_node);
1189 alua_lu_gps_count--; 1195 alua_lu_gps_count--;
1190 spin_unlock(&lu_gps_lock); 1196 spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1438 1444
1439 tg_pt_gp_mem->tg_pt = port; 1445 tg_pt_gp_mem->tg_pt = port;
1440 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1446 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1441 atomic_set(&port->sep_tg_pt_gp_active, 1);
1442 1447
1443 return tg_pt_gp_mem; 1448 return tg_pt_gp_mem;
1444} 1449}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b8247..831468b3163d 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
478 if (cmd->data_length < 60) 478 if (cmd->data_length < 60)
479 return 0; 479 return 0;
480 480
481 buf[2] = 0x3c; 481 buf[3] = 0x3c;
482 /* Set HEADSUP, ORDSUP, SIMPSUP */ 482 /* Set HEADSUP, ORDSUP, SIMPSUP */
483 buf[5] = 0x07; 483 buf[5] = 0x07;
484 484
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
703 if (cmd->data_length < 4) { 703 if (cmd->data_length < 4) {
704 pr_err("SCSI Inquiry payload length: %u" 704 pr_err("SCSI Inquiry payload length: %u"
705 " too small for EVPD=1\n", cmd->data_length); 705 " too small for EVPD=1\n", cmd->data_length);
706 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
706 return -EINVAL; 707 return -EINVAL;
707 } 708 }
708 709
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
719 } 720 }
720 721
721 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 722 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
723 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
722 ret = -EINVAL; 724 ret = -EINVAL;
723 725
724out_unmap: 726out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
969 default: 971 default:
970 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
971 cdb[2] & 0x3f, cdb[3]); 973 cdb[2] & 0x3f, cdb[3]);
972 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 974 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
975 return -EINVAL;
973 } 976 }
974 offset += length; 977 offset += length;
975 978
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
1027 if (cdb[1] & 0x01) { 1030 if (cdb[1] & 0x01) {
1028 pr_err("REQUEST_SENSE description emulation not" 1031 pr_err("REQUEST_SENSE description emulation not"
1029 " supported\n"); 1032 " supported\n");
1030 return PYX_TRANSPORT_INVALID_CDB_FIELD; 1033 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1034 return -ENOSYS;
1031 } 1035 }
1032 1036
1033 buf = transport_kmap_first_data_page(cmd); 1037 buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
1100 if (!dev->transport->do_discard) { 1104 if (!dev->transport->do_discard) {
1101 pr_err("UNMAP emulation not supported for: %s\n", 1105 pr_err("UNMAP emulation not supported for: %s\n",
1102 dev->transport->name); 1106 dev->transport->name);
1103 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1107 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1108 return -ENOSYS;
1104 } 1109 }
1105 1110
1106 /* First UNMAP block descriptor starts at 8 byte offset */ 1111 /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
1157 if (!dev->transport->do_discard) { 1162 if (!dev->transport->do_discard) {
1158 pr_err("WRITE_SAME emulation not supported" 1163 pr_err("WRITE_SAME emulation not supported"
1159 " for: %s\n", dev->transport->name); 1164 " for: %s\n", dev->transport->name);
1160 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1165 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1166 return -ENOSYS;
1161 } 1167 }
1162 1168
1163 if (cmd->t_task_cdb[0] == WRITE_SAME) 1169 if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
1193int target_emulate_synchronize_cache(struct se_task *task) 1199int target_emulate_synchronize_cache(struct se_task *task)
1194{ 1200{
1195 struct se_device *dev = task->task_se_cmd->se_dev; 1201 struct se_device *dev = task->task_se_cmd->se_dev;
1202 struct se_cmd *cmd = task->task_se_cmd;
1196 1203
1197 if (!dev->transport->do_sync_cache) { 1204 if (!dev->transport->do_sync_cache) {
1198 pr_err("SYNCHRONIZE_CACHE emulation not supported" 1205 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1199 " for: %s\n", dev->transport->name); 1206 " for: %s\n", dev->transport->name);
1200 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1208 return -ENOSYS;
1201 } 1209 }
1202 1210
1203 dev->transport->do_sync_cache(task); 1211 dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4e..93d4f6a1b798 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
67static struct config_group alua_group; 67static struct config_group alua_group;
68static struct config_group alua_lu_gps_group; 68static struct config_group alua_lu_gps_group;
69 69
70static DEFINE_SPINLOCK(se_device_lock);
71static LIST_HEAD(se_dev_list);
72
73static inline struct se_hba * 70static inline struct se_hba *
74item_to_hba(struct config_item *item) 71item_to_hba(struct config_item *item)
75{ 72{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
2741 " struct se_subsystem_dev\n"); 2738 " struct se_subsystem_dev\n");
2742 goto unlock; 2739 goto unlock;
2743 } 2740 }
2744 INIT_LIST_HEAD(&se_dev->se_dev_node);
2745 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 2741 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2746 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 2742 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2747 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 2743 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
2777 " from allocate_virtdevice()\n"); 2773 " from allocate_virtdevice()\n");
2778 goto out; 2774 goto out;
2779 } 2775 }
2780 spin_lock(&se_device_lock);
2781 list_add_tail(&se_dev->se_dev_node, &se_dev_list);
2782 spin_unlock(&se_device_lock);
2783 2776
2784 config_group_init_type_name(&se_dev->se_dev_group, name, 2777 config_group_init_type_name(&se_dev->se_dev_group, name,
2785 &target_core_dev_cit); 2778 &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
2874 mutex_lock(&hba->hba_access_mutex); 2867 mutex_lock(&hba->hba_access_mutex);
2875 t = hba->transport; 2868 t = hba->transport;
2876 2869
2877 spin_lock(&se_device_lock);
2878 list_del(&se_dev->se_dev_node);
2879 spin_unlock(&se_device_lock);
2880
2881 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2870 dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2882 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2871 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2883 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2872 df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f8..9b8639425472 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
104 se_cmd->se_lun = deve->se_lun; 104 se_cmd->se_lun = deve->se_lun;
105 se_cmd->pr_res_key = deve->pr_res_key; 105 se_cmd->pr_res_key = deve->pr_res_key;
106 se_cmd->orig_fe_lun = unpacked_lun; 106 se_cmd->orig_fe_lun = unpacked_lun;
107 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
108 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
109 } 108 }
110 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 109 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
137 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
139 se_cmd->orig_fe_lun = 0; 138 se_cmd->orig_fe_lun = 0;
140 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
141 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
142 } 140 }
143 /* 141 /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
200 se_lun = deve->se_lun; 198 se_lun = deve->se_lun;
201 se_cmd->pr_res_key = deve->pr_res_key; 199 se_cmd->pr_res_key = deve->pr_res_key;
202 se_cmd->orig_fe_lun = unpacked_lun; 200 se_cmd->orig_fe_lun = unpacked_lun;
203 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
204 } 201 }
205 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 202 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
206 203
@@ -708,7 +705,7 @@ done:
708 705
709 se_task->task_scsi_status = GOOD; 706 se_task->task_scsi_status = GOOD;
710 transport_complete_task(se_task, 1); 707 transport_complete_task(se_task, 1);
711 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 708 return 0;
712} 709}
713 710
714/* se_release_device_for_hba(): 711/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
957 return -EINVAL; 954 return -EINVAL;
958 } 955 }
959 956
960 pr_err("dpo_emulated not supported\n"); 957 if (flag) {
961 return -EINVAL; 958 pr_err("dpo_emulated not supported\n");
959 return -EINVAL;
960 }
961
962 return 0;
962} 963}
963 964
964int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 965int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
968 return -EINVAL; 969 return -EINVAL;
969 } 970 }
970 971
971 if (dev->transport->fua_write_emulated == 0) { 972 if (flag && dev->transport->fua_write_emulated == 0) {
972 pr_err("fua_write_emulated not supported\n"); 973 pr_err("fua_write_emulated not supported\n");
973 return -EINVAL; 974 return -EINVAL;
974 } 975 }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
985 return -EINVAL; 986 return -EINVAL;
986 } 987 }
987 988
988 pr_err("ua read emulated not supported\n"); 989 if (flag) {
989 return -EINVAL; 990 pr_err("ua read emulated not supported\n");
991 return -EINVAL;
992 }
993
994 return 0;
990} 995}
991 996
992int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 997int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
995 pr_err("Illegal value %d\n", flag); 1000 pr_err("Illegal value %d\n", flag);
996 return -EINVAL; 1001 return -EINVAL;
997 } 1002 }
998 if (dev->transport->write_cache_emulated == 0) { 1003 if (flag && dev->transport->write_cache_emulated == 0) {
999 pr_err("write_cache_emulated not supported\n"); 1004 pr_err("write_cache_emulated not supported\n");
1000 return -EINVAL; 1005 return -EINVAL;
1001 } 1006 }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1056 * We expect this value to be non-zero when generic Block Layer 1061 * We expect this value to be non-zero when generic Block Layer
1057 * Discard supported is detected iblock_create_virtdevice(). 1062 * Discard supported is detected iblock_create_virtdevice().
1058 */ 1063 */
1059 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1064 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1060 pr_err("Generic Block Discard not supported\n"); 1065 pr_err("Generic Block Discard not supported\n");
1061 return -ENOSYS; 1066 return -ENOSYS;
1062 } 1067 }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1077 * We expect this value to be non-zero when generic Block Layer 1082 * We expect this value to be non-zero when generic Block Layer
1078 * Discard supported is detected iblock_create_virtdevice(). 1083 * Discard supported is detected iblock_create_virtdevice().
1079 */ 1084 */
1080 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1085 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1081 pr_err("Generic Block Discard not supported\n"); 1086 pr_err("Generic Block Discard not supported\n");
1082 return -ENOSYS; 1087 return -ENOSYS;
1083 } 1088 }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
1587 ret = -ENOMEM; 1592 ret = -ENOMEM;
1588 goto out; 1593 goto out;
1589 } 1594 }
1590 INIT_LIST_HEAD(&se_dev->se_dev_node);
1591 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1595 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1592 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1596 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1593 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 1597 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bfa..b4864fba4ef0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
292 for (i = 0; i < task->task_sg_nents; i++) { 292 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
293 iov[i].iov_len = sg[i].length; 293 iov[i].iov_len = sg->length;
294 iov[i].iov_base = sg_virt(&sg[i]); 294 iov[i].iov_base = sg_virt(sg);
295 } 295 }
296 296
297 old_fs = get_fs(); 297 old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
342 return -ENOMEM; 342 return -ENOMEM;
343 } 343 }
344 344
345 for (i = 0; i < task->task_sg_nents; i++) { 345 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
346 iov[i].iov_len = sg[i].length; 346 iov[i].iov_len = sg->length;
347 iov[i].iov_base = sg_virt(&sg[i]); 347 iov[i].iov_base = sg_virt(sg);
348 } 348 }
349 349
350 old_fs = get_fs(); 350 old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
438 if (ret > 0 && 438 if (ret > 0 &&
439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
441 cmd->t_tasks_fua) { 441 (cmd->se_cmd_flags & SCF_FUA)) {
442 /* 442 /*
443 * We might need to be a bit smarter here 443 * We might need to be a bit smarter here
444 * and return some sense data to let the initiator 444 * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
449 449
450 } 450 }
451 451
452 if (ret < 0) 452 if (ret < 0) {
453 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
453 return ret; 454 return ret;
455 }
454 if (ret) { 456 if (ret) {
455 task->task_scsi_status = GOOD; 457 task->task_scsi_status = GOOD;
456 transport_complete_task(task, 1); 458 transport_complete_task(task, 1);
457 } 459 }
458 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 460 return 0;
459} 461}
460 462
461/* fd_free_task(): (Part of se_subsystem_api_t template) 463/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe29262..4aa992204438 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
531 */ 531 */
532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
534 task->task_se_cmd->t_tasks_fua)) 534 (cmd->se_cmd_flags & SCF_FUA)))
535 rw = WRITE_FUA; 535 rw = WRITE_FUA;
536 else 536 else
537 rw = WRITE; 537 rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
554 else { 554 else {
555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
557 return PYX_TRANSPORT_LU_COMM_FAILURE; 557 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
558 return -ENOSYS;
558 } 559 }
559 560
560 bio = iblock_get_bio(task, block_lba, sg_num); 561 bio = iblock_get_bio(task, block_lba, sg_num);
561 if (!bio) 562 if (!bio) {
562 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 563 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
564 return -ENOMEM;
565 }
563 566
564 bio_list_init(&list); 567 bio_list_init(&list);
565 bio_list_add(&list, bio); 568 bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
588 submit_bio(rw, bio); 591 submit_bio(rw, bio);
589 blk_finish_plug(&plug); 592 blk_finish_plug(&plug);
590 593
591 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 594 return 0;
592 595
593fail: 596fail:
594 while ((bio = bio_list_pop(&list))) 597 while ((bio = bio_list_pop(&list)))
595 bio_put(bio); 598 bio_put(bio);
596 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 599 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
600 return -ENOMEM;
597} 601}
598 602
599static u32 iblock_get_device_rev(struct se_device *dev) 603static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54f..95dee7074aeb 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
191 pr_err("Received legacy SPC-2 RESERVE/RELEASE" 191 pr_err("Received legacy SPC-2 RESERVE/RELEASE"
192 " while active SPC-3 registrations exist," 192 " while active SPC-3 registrations exist,"
193 " returning RESERVATION_CONFLICT\n"); 193 " returning RESERVATION_CONFLICT\n");
194 *ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 194 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
195 return true; 195 return true;
196 } 196 }
197 197
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
252 (cmd->t_task_cdb[1] & 0x02)) { 252 (cmd->t_task_cdb[1] & 0x02)) {
253 pr_err("LongIO and Obselete Bits set, returning" 253 pr_err("LongIO and Obselete Bits set, returning"
254 " ILLEGAL_REQUEST\n"); 254 " ILLEGAL_REQUEST\n");
255 ret = PYX_TRANSPORT_ILLEGAL_REQUEST; 255 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
256 ret = -EINVAL;
256 goto out; 257 goto out;
257 } 258 }
258 /* 259 /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
277 " from %s \n", cmd->se_lun->unpacked_lun, 278 " from %s \n", cmd->se_lun->unpacked_lun,
278 cmd->se_deve->mapped_lun, 279 cmd->se_deve->mapped_lun,
279 sess->se_node_acl->initiatorname); 280 sess->se_node_acl->initiatorname);
280 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 281 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
282 ret = -EINVAL;
281 goto out_unlock; 283 goto out_unlock;
282 } 284 }
283 285
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
1510 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1512 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1511 if (!tidh_new) { 1513 if (!tidh_new) {
1512 pr_err("Unable to allocate tidh_new\n"); 1514 pr_err("Unable to allocate tidh_new\n");
1513 return PYX_TRANSPORT_LU_COMM_FAILURE; 1515 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1516 return -EINVAL;
1514 } 1517 }
1515 INIT_LIST_HEAD(&tidh_new->dest_list); 1518 INIT_LIST_HEAD(&tidh_new->dest_list);
1516 tidh_new->dest_tpg = tpg; 1519 tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
1522 sa_res_key, all_tg_pt, aptpl); 1525 sa_res_key, all_tg_pt, aptpl);
1523 if (!local_pr_reg) { 1526 if (!local_pr_reg) {
1524 kfree(tidh_new); 1527 kfree(tidh_new);
1525 return PYX_TRANSPORT_LU_COMM_FAILURE; 1528 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1529 return -ENOMEM;
1526 } 1530 }
1527 tidh_new->dest_pr_reg = local_pr_reg; 1531 tidh_new->dest_pr_reg = local_pr_reg;
1528 /* 1532 /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
1548 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1552 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1549 " does not equal CDB data_length: %u\n", tpdl, 1553 " does not equal CDB data_length: %u\n", tpdl,
1550 cmd->data_length); 1554 cmd->data_length);
1551 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1555 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1556 ret = -EINVAL;
1552 goto out; 1557 goto out;
1553 } 1558 }
1554 /* 1559 /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
1598 " for tmp_tpg\n"); 1603 " for tmp_tpg\n");
1599 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1604 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1600 smp_mb__after_atomic_dec(); 1605 smp_mb__after_atomic_dec();
1601 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1606 cmd->scsi_sense_reason =
1607 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1608 ret = -EINVAL;
1602 goto out; 1609 goto out;
1603 } 1610 }
1604 /* 1611 /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
1628 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1635 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1629 smp_mb__after_atomic_dec(); 1636 smp_mb__after_atomic_dec();
1630 core_scsi3_tpg_undepend_item(tmp_tpg); 1637 core_scsi3_tpg_undepend_item(tmp_tpg);
1631 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1638 cmd->scsi_sense_reason =
1639 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1640 ret = -EINVAL;
1632 goto out; 1641 goto out;
1633 } 1642 }
1634 1643
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
1646 if (!dest_tpg) { 1655 if (!dest_tpg) {
1647 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" 1656 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
1648 " dest_tpg\n"); 1657 " dest_tpg\n");
1649 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1658 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1659 ret = -EINVAL;
1650 goto out; 1660 goto out;
1651 } 1661 }
1652#if 0 1662#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
1660 " %u for Transport ID: %s\n", tid_len, ptr); 1670 " %u for Transport ID: %s\n", tid_len, ptr);
1661 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1671 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1662 core_scsi3_tpg_undepend_item(dest_tpg); 1672 core_scsi3_tpg_undepend_item(dest_tpg);
1663 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1673 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1674 ret = -EINVAL;
1664 goto out; 1675 goto out;
1665 } 1676 }
1666 /* 1677 /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
1678 1689
1679 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1690 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1680 core_scsi3_tpg_undepend_item(dest_tpg); 1691 core_scsi3_tpg_undepend_item(dest_tpg);
1681 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1692 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1693 ret = -EINVAL;
1682 goto out; 1694 goto out;
1683 } 1695 }
1684 1696
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
1690 smp_mb__after_atomic_dec(); 1702 smp_mb__after_atomic_dec();
1691 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1703 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1692 core_scsi3_tpg_undepend_item(dest_tpg); 1704 core_scsi3_tpg_undepend_item(dest_tpg);
1693 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1705 cmd->scsi_sense_reason =
1706 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1707 ret = -EINVAL;
1694 goto out; 1708 goto out;
1695 } 1709 }
1696#if 0 1710#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
1727 core_scsi3_lunacl_undepend_item(dest_se_deve); 1741 core_scsi3_lunacl_undepend_item(dest_se_deve);
1728 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1742 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1729 core_scsi3_tpg_undepend_item(dest_tpg); 1743 core_scsi3_tpg_undepend_item(dest_tpg);
1730 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1744 cmd->scsi_sense_reason =
1745 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1746 ret = -ENOMEM;
1731 goto out; 1747 goto out;
1732 } 1748 }
1733 INIT_LIST_HEAD(&tidh_new->dest_list); 1749 INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
1759 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1775 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1760 core_scsi3_tpg_undepend_item(dest_tpg); 1776 core_scsi3_tpg_undepend_item(dest_tpg);
1761 kfree(tidh_new); 1777 kfree(tidh_new);
1762 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1778 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1779 ret = -EINVAL;
1763 goto out; 1780 goto out;
1764 } 1781 }
1765 tidh_new->dest_pr_reg = dest_pr_reg; 1782 tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
2098 2115
2099 if (!se_sess || !se_lun) { 2116 if (!se_sess || !se_lun) {
2100 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2117 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2101 return PYX_TRANSPORT_LU_COMM_FAILURE; 2118 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2119 return -EINVAL;
2102 } 2120 }
2103 se_tpg = se_sess->se_tpg; 2121 se_tpg = se_sess->se_tpg;
2104 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2122 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
2117 if (res_key) { 2135 if (res_key) {
2118 pr_warn("SPC-3 PR: Reservation Key non-zero" 2136 pr_warn("SPC-3 PR: Reservation Key non-zero"
2119 " for SA REGISTER, returning CONFLICT\n"); 2137 " for SA REGISTER, returning CONFLICT\n");
2120 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2138 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2139 return -EINVAL;
2121 } 2140 }
2122 /* 2141 /*
2123 * Do nothing but return GOOD status. 2142 * Do nothing but return GOOD status.
2124 */ 2143 */
2125 if (!sa_res_key) 2144 if (!sa_res_key)
2126 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2145 return 0;
2127 2146
2128 if (!spec_i_pt) { 2147 if (!spec_i_pt) {
2129 /* 2148 /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
2138 if (ret != 0) { 2157 if (ret != 0) {
2139 pr_err("Unable to allocate" 2158 pr_err("Unable to allocate"
2140 " struct t10_pr_registration\n"); 2159 " struct t10_pr_registration\n");
2141 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2160 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2161 return -EINVAL;
2142 } 2162 }
2143 } else { 2163 } else {
2144 /* 2164 /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
2197 " 0x%016Lx\n", res_key, 2217 " 0x%016Lx\n", res_key,
2198 pr_reg->pr_res_key); 2218 pr_reg->pr_res_key);
2199 core_scsi3_put_pr_reg(pr_reg); 2219 core_scsi3_put_pr_reg(pr_reg);
2200 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2220 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2221 return -EINVAL;
2201 } 2222 }
2202 } 2223 }
2203 if (spec_i_pt) { 2224 if (spec_i_pt) {
2204 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" 2225 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
2205 " set while sa_res_key=0\n"); 2226 " set while sa_res_key=0\n");
2206 core_scsi3_put_pr_reg(pr_reg); 2227 core_scsi3_put_pr_reg(pr_reg);
2207 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2228 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2229 return -EINVAL;
2208 } 2230 }
2209 /* 2231 /*
2210 * An existing ALL_TG_PT=1 registration being released 2232 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
2215 " registration exists, but ALL_TG_PT=1 bit not" 2237 " registration exists, but ALL_TG_PT=1 bit not"
2216 " present in received PROUT\n"); 2238 " present in received PROUT\n");
2217 core_scsi3_put_pr_reg(pr_reg); 2239 core_scsi3_put_pr_reg(pr_reg);
2218 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2240 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2241 return -EINVAL;
2219 } 2242 }
2220 /* 2243 /*
2221 * Allocate APTPL metadata buffer used for UNREGISTER ops 2244 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
2227 pr_err("Unable to allocate" 2250 pr_err("Unable to allocate"
2228 " pr_aptpl_buf\n"); 2251 " pr_aptpl_buf\n");
2229 core_scsi3_put_pr_reg(pr_reg); 2252 core_scsi3_put_pr_reg(pr_reg);
2230 return PYX_TRANSPORT_LU_COMM_FAILURE; 2253 cmd->scsi_sense_reason =
2254 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2255 return -EINVAL;
2231 } 2256 }
2232 } 2257 }
2233 /* 2258 /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
2241 if (pr_holder < 0) { 2266 if (pr_holder < 0) {
2242 kfree(pr_aptpl_buf); 2267 kfree(pr_aptpl_buf);
2243 core_scsi3_put_pr_reg(pr_reg); 2268 core_scsi3_put_pr_reg(pr_reg);
2244 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2269 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2270 return -EINVAL;
2245 } 2271 }
2246 2272
2247 spin_lock(&pr_tmpl->registration_lock); 2273 spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
2405 2431
2406 if (!se_sess || !se_lun) { 2432 if (!se_sess || !se_lun) {
2407 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2433 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2408 return PYX_TRANSPORT_LU_COMM_FAILURE; 2434 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2435 return -EINVAL;
2409 } 2436 }
2410 se_tpg = se_sess->se_tpg; 2437 se_tpg = se_sess->se_tpg;
2411 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2438 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
2417 if (!pr_reg) { 2444 if (!pr_reg) {
2418 pr_err("SPC-3 PR: Unable to locate" 2445 pr_err("SPC-3 PR: Unable to locate"
2419 " PR_REGISTERED *pr_reg for RESERVE\n"); 2446 " PR_REGISTERED *pr_reg for RESERVE\n");
2420 return PYX_TRANSPORT_LU_COMM_FAILURE; 2447 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2448 return -EINVAL;
2421 } 2449 }
2422 /* 2450 /*
2423 * From spc4r17 Section 5.7.9: Reserving: 2451 * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
2433 " does not match existing SA REGISTER res_key:" 2461 " does not match existing SA REGISTER res_key:"
2434 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2462 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2435 core_scsi3_put_pr_reg(pr_reg); 2463 core_scsi3_put_pr_reg(pr_reg);
2436 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2464 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2465 return -EINVAL;
2437 } 2466 }
2438 /* 2467 /*
2439 * From spc4r17 Section 5.7.9: Reserving: 2468 * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
2448 if (scope != PR_SCOPE_LU_SCOPE) { 2477 if (scope != PR_SCOPE_LU_SCOPE) {
2449 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2478 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2450 core_scsi3_put_pr_reg(pr_reg); 2479 core_scsi3_put_pr_reg(pr_reg);
2451 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2480 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2481 return -EINVAL;
2452 } 2482 }
2453 /* 2483 /*
2454 * See if we have an existing PR reservation holder pointer at 2484 * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
2480 2510
2481 spin_unlock(&dev->dev_reservation_lock); 2511 spin_unlock(&dev->dev_reservation_lock);
2482 core_scsi3_put_pr_reg(pr_reg); 2512 core_scsi3_put_pr_reg(pr_reg);
2483 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2513 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2514 return -EINVAL;
2484 } 2515 }
2485 /* 2516 /*
2486 * From spc4r17 Section 5.7.9: Reserving: 2517 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
2503 2534
2504 spin_unlock(&dev->dev_reservation_lock); 2535 spin_unlock(&dev->dev_reservation_lock);
2505 core_scsi3_put_pr_reg(pr_reg); 2536 core_scsi3_put_pr_reg(pr_reg);
2506 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2537 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2538 return -EINVAL;
2507 } 2539 }
2508 /* 2540 /*
2509 * From spc4r17 Section 5.7.9: Reserving: 2541 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
2517 */ 2549 */
2518 spin_unlock(&dev->dev_reservation_lock); 2550 spin_unlock(&dev->dev_reservation_lock);
2519 core_scsi3_put_pr_reg(pr_reg); 2551 core_scsi3_put_pr_reg(pr_reg);
2520 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2552 return 0;
2521 } 2553 }
2522 /* 2554 /*
2523 * Otherwise, our *pr_reg becomes the PR reservation holder for said 2555 * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
2574 default: 2606 default:
2575 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" 2607 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
2576 " 0x%02x\n", type); 2608 " 0x%02x\n", type);
2577 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2609 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2610 return -EINVAL;
2578 } 2611 }
2579 2612
2580 return ret; 2613 return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
2630 2663
2631 if (!se_sess || !se_lun) { 2664 if (!se_sess || !se_lun) {
2632 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2665 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2633 return PYX_TRANSPORT_LU_COMM_FAILURE; 2666 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2667 return -EINVAL;
2634 } 2668 }
2635 /* 2669 /*
2636 * Locate the existing *pr_reg via struct se_node_acl pointers 2670 * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
2639 if (!pr_reg) { 2673 if (!pr_reg) {
2640 pr_err("SPC-3 PR: Unable to locate" 2674 pr_err("SPC-3 PR: Unable to locate"
2641 " PR_REGISTERED *pr_reg for RELEASE\n"); 2675 " PR_REGISTERED *pr_reg for RELEASE\n");
2642 return PYX_TRANSPORT_LU_COMM_FAILURE; 2676 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2677 return -EINVAL;
2643 } 2678 }
2644 /* 2679 /*
2645 * From spc4r17 Section 5.7.11.2 Releasing: 2680 * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
2661 */ 2696 */
2662 spin_unlock(&dev->dev_reservation_lock); 2697 spin_unlock(&dev->dev_reservation_lock);
2663 core_scsi3_put_pr_reg(pr_reg); 2698 core_scsi3_put_pr_reg(pr_reg);
2664 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2699 return 0;
2665 } 2700 }
2666 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 2701 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2667 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) 2702 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
2675 */ 2710 */
2676 spin_unlock(&dev->dev_reservation_lock); 2711 spin_unlock(&dev->dev_reservation_lock);
2677 core_scsi3_put_pr_reg(pr_reg); 2712 core_scsi3_put_pr_reg(pr_reg);
2678 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2713 return 0;
2679 } 2714 }
2680 /* 2715 /*
2681 * From spc4r17 Section 5.7.11.2 Releasing: 2716 * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
2697 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2732 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2698 spin_unlock(&dev->dev_reservation_lock); 2733 spin_unlock(&dev->dev_reservation_lock);
2699 core_scsi3_put_pr_reg(pr_reg); 2734 core_scsi3_put_pr_reg(pr_reg);
2700 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2735 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2736 return -EINVAL;
2701 } 2737 }
2702 /* 2738 /*
2703 * From spc4r17 Section 5.7.11.2 Releasing and above: 2739 * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
2719 2755
2720 spin_unlock(&dev->dev_reservation_lock); 2756 spin_unlock(&dev->dev_reservation_lock);
2721 core_scsi3_put_pr_reg(pr_reg); 2757 core_scsi3_put_pr_reg(pr_reg);
2722 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2758 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2759 return -EINVAL;
2723 } 2760 }
2724 /* 2761 /*
2725 * In response to a persistent reservation release request from the 2762 * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
2802 if (!pr_reg_n) { 2839 if (!pr_reg_n) {
2803 pr_err("SPC-3 PR: Unable to locate" 2840 pr_err("SPC-3 PR: Unable to locate"
2804 " PR_REGISTERED *pr_reg for CLEAR\n"); 2841 " PR_REGISTERED *pr_reg for CLEAR\n");
2805 return PYX_TRANSPORT_LU_COMM_FAILURE; 2842 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2843 return -EINVAL;
2806 } 2844 }
2807 /* 2845 /*
2808 * From spc4r17 section 5.7.11.6, Clearing: 2846 * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
2821 " existing SA REGISTER res_key:" 2859 " existing SA REGISTER res_key:"
2822 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); 2860 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
2823 core_scsi3_put_pr_reg(pr_reg_n); 2861 core_scsi3_put_pr_reg(pr_reg_n);
2824 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2862 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2863 return -EINVAL;
2825 } 2864 }
2826 /* 2865 /*
2827 * a) Release the persistent reservation, if any; 2866 * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
2979 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 3018 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
2980 int prh_type = 0, prh_scope = 0, ret; 3019 int prh_type = 0, prh_scope = 0, ret;
2981 3020
2982 if (!se_sess) 3021 if (!se_sess) {
2983 return PYX_TRANSPORT_LU_COMM_FAILURE; 3022 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3023 return -EINVAL;
3024 }
2984 3025
2985 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3026 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2986 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3027 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
2989 pr_err("SPC-3 PR: Unable to locate" 3030 pr_err("SPC-3 PR: Unable to locate"
2990 " PR_REGISTERED *pr_reg for PREEMPT%s\n", 3031 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
2991 (abort) ? "_AND_ABORT" : ""); 3032 (abort) ? "_AND_ABORT" : "");
2992 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3033 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3034 return -EINVAL;
2993 } 3035 }
2994 if (pr_reg_n->pr_res_key != res_key) { 3036 if (pr_reg_n->pr_res_key != res_key) {
2995 core_scsi3_put_pr_reg(pr_reg_n); 3037 core_scsi3_put_pr_reg(pr_reg_n);
2996 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3038 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3039 return -EINVAL;
2997 } 3040 }
2998 if (scope != PR_SCOPE_LU_SCOPE) { 3041 if (scope != PR_SCOPE_LU_SCOPE) {
2999 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 3042 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
3000 core_scsi3_put_pr_reg(pr_reg_n); 3043 core_scsi3_put_pr_reg(pr_reg_n);
3001 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3044 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3045 return -EINVAL;
3002 } 3046 }
3003 INIT_LIST_HEAD(&preempt_and_abort_list); 3047 INIT_LIST_HEAD(&preempt_and_abort_list);
3004 3048
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
3012 if (!all_reg && !sa_res_key) { 3056 if (!all_reg && !sa_res_key) {
3013 spin_unlock(&dev->dev_reservation_lock); 3057 spin_unlock(&dev->dev_reservation_lock);
3014 core_scsi3_put_pr_reg(pr_reg_n); 3058 core_scsi3_put_pr_reg(pr_reg_n);
3015 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3059 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3060 return -EINVAL;
3016 } 3061 }
3017 /* 3062 /*
3018 * From spc4r17, section 5.7.11.4.4 Removing Registrations: 3063 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
3106 if (!released_regs) { 3151 if (!released_regs) {
3107 spin_unlock(&dev->dev_reservation_lock); 3152 spin_unlock(&dev->dev_reservation_lock);
3108 core_scsi3_put_pr_reg(pr_reg_n); 3153 core_scsi3_put_pr_reg(pr_reg_n);
3109 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3154 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3155 return -EINVAL;
3110 } 3156 }
3111 /* 3157 /*
3112 * For an existing all registrants type reservation 3158 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
3297 default: 3343 default:
3298 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 3344 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
3299 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); 3345 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3300 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3346 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3347 return -EINVAL;
3301 } 3348 }
3302 3349
3303 return ret; 3350 return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3331 3378
3332 if (!se_sess || !se_lun) { 3379 if (!se_sess || !se_lun) {
3333 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3380 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3334 return PYX_TRANSPORT_LU_COMM_FAILURE; 3381 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3382 return -EINVAL;
3335 } 3383 }
3336 memset(dest_iport, 0, 64); 3384 memset(dest_iport, 0, 64);
3337 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3385 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3349 if (!pr_reg) { 3397 if (!pr_reg) {
3350 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" 3398 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
3351 " *pr_reg for REGISTER_AND_MOVE\n"); 3399 " *pr_reg for REGISTER_AND_MOVE\n");
3352 return PYX_TRANSPORT_LU_COMM_FAILURE; 3400 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3401 return -EINVAL;
3353 } 3402 }
3354 /* 3403 /*
3355 * The provided reservation key much match the existing reservation key 3404 * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3360 " res_key: 0x%016Lx does not match existing SA REGISTER" 3409 " res_key: 0x%016Lx does not match existing SA REGISTER"
3361 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); 3410 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3362 core_scsi3_put_pr_reg(pr_reg); 3411 core_scsi3_put_pr_reg(pr_reg);
3363 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3412 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3413 return -EINVAL;
3364 } 3414 }
3365 /* 3415 /*
3366 * The service active reservation key needs to be non zero 3416 * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3369 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" 3419 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
3370 " sa_res_key\n"); 3420 " sa_res_key\n");
3371 core_scsi3_put_pr_reg(pr_reg); 3421 core_scsi3_put_pr_reg(pr_reg);
3372 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3422 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3423 return -EINVAL;
3373 } 3424 }
3374 3425
3375 /* 3426 /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3392 " does not equal CDB data_length: %u\n", tid_len, 3443 " does not equal CDB data_length: %u\n", tid_len,
3393 cmd->data_length); 3444 cmd->data_length);
3394 core_scsi3_put_pr_reg(pr_reg); 3445 core_scsi3_put_pr_reg(pr_reg);
3395 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3446 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3447 return -EINVAL;
3396 } 3448 }
3397 3449
3398 spin_lock(&dev->se_port_lock); 3450 spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3417 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3469 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3418 smp_mb__after_atomic_dec(); 3470 smp_mb__after_atomic_dec();
3419 core_scsi3_put_pr_reg(pr_reg); 3471 core_scsi3_put_pr_reg(pr_reg);
3420 return PYX_TRANSPORT_LU_COMM_FAILURE; 3472 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3473 return -EINVAL;
3421 } 3474 }
3422 3475
3423 spin_lock(&dev->se_port_lock); 3476 spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3430 " fabric ops from Relative Target Port Identifier:" 3483 " fabric ops from Relative Target Port Identifier:"
3431 " %hu\n", rtpi); 3484 " %hu\n", rtpi);
3432 core_scsi3_put_pr_reg(pr_reg); 3485 core_scsi3_put_pr_reg(pr_reg);
3433 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3486 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3487 return -EINVAL;
3434 } 3488 }
3435 3489
3436 buf = transport_kmap_first_data_page(cmd); 3490 buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
3445 " from fabric: %s\n", proto_ident, 3499 " from fabric: %s\n", proto_ident,
3446 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), 3500 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
3447 dest_tf_ops->get_fabric_name()); 3501 dest_tf_ops->get_fabric_name());
3448 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3502 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3503 ret = -EINVAL;
3449 goto out; 3504 goto out;
3450 } 3505 }
3451 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { 3506 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3452 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3507 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3453 " containg a valid tpg_parse_pr_out_transport_id" 3508 " containg a valid tpg_parse_pr_out_transport_id"
3454 " function pointer\n"); 3509 " function pointer\n");
3455 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3510 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3511 ret = -EINVAL;
3456 goto out; 3512 goto out;
3457 } 3513 }
3458 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, 3514 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3460 if (!initiator_str) { 3516 if (!initiator_str) {
3461 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3517 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3462 " initiator_str from Transport ID\n"); 3518 " initiator_str from Transport ID\n");
3463 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3519 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3520 ret = -EINVAL;
3464 goto out; 3521 goto out;
3465 } 3522 }
3466 3523
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3489 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3546 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3490 " matches: %s on received I_T Nexus\n", initiator_str, 3547 " matches: %s on received I_T Nexus\n", initiator_str,
3491 pr_reg_nacl->initiatorname); 3548 pr_reg_nacl->initiatorname);
3492 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3549 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3550 ret = -EINVAL;
3493 goto out; 3551 goto out;
3494 } 3552 }
3495 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { 3553 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3497 " matches: %s %s on received I_T Nexus\n", 3555 " matches: %s %s on received I_T Nexus\n",
3498 initiator_str, iport_ptr, pr_reg_nacl->initiatorname, 3556 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3499 pr_reg->pr_reg_isid); 3557 pr_reg->pr_reg_isid);
3500 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3558 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3559 ret = -EINVAL;
3501 goto out; 3560 goto out;
3502 } 3561 }
3503after_iport_check: 3562after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
3517 pr_err("Unable to locate %s dest_node_acl for" 3576 pr_err("Unable to locate %s dest_node_acl for"
3518 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3577 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3519 initiator_str); 3578 initiator_str);
3520 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3579 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3580 ret = -EINVAL;
3521 goto out; 3581 goto out;
3522 } 3582 }
3523 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 3583 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
3527 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3587 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3528 smp_mb__after_atomic_dec(); 3588 smp_mb__after_atomic_dec();
3529 dest_node_acl = NULL; 3589 dest_node_acl = NULL;
3530 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3590 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3591 ret = -EINVAL;
3531 goto out; 3592 goto out;
3532 } 3593 }
3533#if 0 3594#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
3543 if (!dest_se_deve) { 3604 if (!dest_se_deve) {
3544 pr_err("Unable to locate %s dest_se_deve from RTPI:" 3605 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3545 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3606 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3546 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3607 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3608 ret = -EINVAL;
3547 goto out; 3609 goto out;
3548 } 3610 }
3549 3611
@@ -3553,7 +3615,8 @@ after_iport_check:
3553 atomic_dec(&dest_se_deve->pr_ref_count); 3615 atomic_dec(&dest_se_deve->pr_ref_count);
3554 smp_mb__after_atomic_dec(); 3616 smp_mb__after_atomic_dec();
3555 dest_se_deve = NULL; 3617 dest_se_deve = NULL;
3556 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3618 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3619 ret = -EINVAL;
3557 goto out; 3620 goto out;
3558 } 3621 }
3559#if 0 3622#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
3572 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" 3635 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
3573 " currently held\n"); 3636 " currently held\n");
3574 spin_unlock(&dev->dev_reservation_lock); 3637 spin_unlock(&dev->dev_reservation_lock);
3575 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3638 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3639 ret = -EINVAL;
3576 goto out; 3640 goto out;
3577 } 3641 }
3578 /* 3642 /*
@@ -3585,7 +3649,8 @@ after_iport_check:
3585 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3649 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3586 " Nexus is not reservation holder\n"); 3650 " Nexus is not reservation holder\n");
3587 spin_unlock(&dev->dev_reservation_lock); 3651 spin_unlock(&dev->dev_reservation_lock);
3588 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3652 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3653 ret = -EINVAL;
3589 goto out; 3654 goto out;
3590 } 3655 }
3591 /* 3656 /*
@@ -3603,7 +3668,8 @@ after_iport_check:
3603 " reservation for type: %s\n", 3668 " reservation for type: %s\n",
3604 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); 3669 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3605 spin_unlock(&dev->dev_reservation_lock); 3670 spin_unlock(&dev->dev_reservation_lock);
3606 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3671 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3672 ret = -EINVAL;
3607 goto out; 3673 goto out;
3608 } 3674 }
3609 pr_res_nacl = pr_res_holder->pr_reg_nacl; 3675 pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
3640 sa_res_key, 0, aptpl, 2, 1); 3706 sa_res_key, 0, aptpl, 2, 1);
3641 if (ret != 0) { 3707 if (ret != 0) {
3642 spin_unlock(&dev->dev_reservation_lock); 3708 spin_unlock(&dev->dev_reservation_lock);
3643 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3709 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3710 ret = -EINVAL;
3644 goto out; 3711 goto out;
3645 } 3712 }
3646 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3713 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3771 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 3838 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
3772 " SPC-2 reservation is held, returning" 3839 " SPC-2 reservation is held, returning"
3773 " RESERVATION_CONFLICT\n"); 3840 " RESERVATION_CONFLICT\n");
3774 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3841 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3842 ret = EINVAL;
3775 goto out; 3843 goto out;
3776 } 3844 }
3777 3845
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3779 * FIXME: A NULL struct se_session pointer means an this is not coming from 3847 * FIXME: A NULL struct se_session pointer means an this is not coming from
3780 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3848 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3781 */ 3849 */
3782 if (!cmd->se_sess) 3850 if (!cmd->se_sess) {
3783 return PYX_TRANSPORT_LU_COMM_FAILURE; 3851 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3852 return -EINVAL;
3853 }
3784 3854
3785 if (cmd->data_length < 24) { 3855 if (cmd->data_length < 24) {
3786 pr_warn("SPC-PR: Received PR OUT parameter list" 3856 pr_warn("SPC-PR: Received PR OUT parameter list"
3787 " length too small: %u\n", cmd->data_length); 3857 " length too small: %u\n", cmd->data_length);
3788 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3858 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3859 ret = -EINVAL;
3789 goto out; 3860 goto out;
3790 } 3861 }
3791 /* 3862 /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3820 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3891 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3821 */ 3892 */
3822 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { 3893 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
3823 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3894 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3895 ret = -EINVAL;
3824 goto out; 3896 goto out;
3825 } 3897 }
3826 3898
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3837 (cmd->data_length != 24)) { 3909 (cmd->data_length != 24)) {
3838 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3910 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3839 " list length: %u\n", cmd->data_length); 3911 " list length: %u\n", cmd->data_length);
3840 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3912 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3913 ret = -EINVAL;
3841 goto out; 3914 goto out;
3842 } 3915 }
3843 /* 3916 /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3878 default: 3951 default:
3879 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3952 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3880 " action: 0x%02x\n", cdb[1] & 0x1f); 3953 " action: 0x%02x\n", cdb[1] & 0x1f);
3881 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3954 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3955 ret = -EINVAL;
3882 break; 3956 break;
3883 } 3957 }
3884 3958
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3906 if (cmd->data_length < 8) { 3980 if (cmd->data_length < 8) {
3907 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" 3981 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
3908 " too small\n", cmd->data_length); 3982 " too small\n", cmd->data_length);
3909 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3983 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3984 return -EINVAL;
3910 } 3985 }
3911 3986
3912 buf = transport_kmap_first_data_page(cmd); 3987 buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3965 if (cmd->data_length < 8) { 4040 if (cmd->data_length < 8) {
3966 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 4041 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
3967 " too small\n", cmd->data_length); 4042 " too small\n", cmd->data_length);
3968 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4043 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4044 return -EINVAL;
3969 } 4045 }
3970 4046
3971 buf = transport_kmap_first_data_page(cmd); 4047 buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4047 if (cmd->data_length < 6) { 4123 if (cmd->data_length < 6) {
4048 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4124 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
4049 " %u too small\n", cmd->data_length); 4125 " %u too small\n", cmd->data_length);
4050 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4126 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4127 return -EINVAL;
4051 } 4128 }
4052 4129
4053 buf = transport_kmap_first_data_page(cmd); 4130 buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4108 if (cmd->data_length < 8) { 4185 if (cmd->data_length < 8) {
4109 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4186 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4110 " too small\n", cmd->data_length); 4187 " too small\n", cmd->data_length);
4111 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4188 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4189 return -EINVAL;
4112 } 4190 }
4113 4191
4114 buf = transport_kmap_first_data_page(cmd); 4192 buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4255 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4333 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4256 " SPC-2 reservation is held, returning" 4334 " SPC-2 reservation is held, returning"
4257 " RESERVATION_CONFLICT\n"); 4335 " RESERVATION_CONFLICT\n");
4258 return PYX_TRANSPORT_RESERVATION_CONFLICT; 4336 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
4337 return -EINVAL;
4259 } 4338 }
4260 4339
4261 switch (cmd->t_task_cdb[1] & 0x1f) { 4340 switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4274 default: 4353 default:
4275 pr_err("Unknown PERSISTENT_RESERVE_IN service" 4354 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4276 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); 4355 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
4277 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 4356 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4357 ret = -EINVAL;
4278 break; 4358 break;
4279 } 4359 }
4280 4360
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe429..8b15e56b0384 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
963static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, 963static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
964 struct bio **hbio) 964 struct bio **hbio)
965{ 965{
966 struct se_cmd *cmd = task->task_se_cmd;
966 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 967 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
967 u32 task_sg_num = task->task_sg_nents; 968 u32 task_sg_num = task->task_sg_nents;
968 struct bio *bio = NULL, *tbio = NULL; 969 struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
971 u32 data_len = task->task_size, i, len, bytes, off; 972 u32 data_len = task->task_size, i, len, bytes, off;
972 int nr_pages = (task->task_size + task_sg[0].offset + 973 int nr_pages = (task->task_size + task_sg[0].offset +
973 PAGE_SIZE - 1) >> PAGE_SHIFT; 974 PAGE_SIZE - 1) >> PAGE_SHIFT;
974 int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 975 int nr_vecs = 0, rc;
975 int rw = (task->task_data_direction == DMA_TO_DEVICE); 976 int rw = (task->task_data_direction == DMA_TO_DEVICE);
976 977
977 *hbio = NULL; 978 *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
1058 bio->bi_next = NULL; 1059 bio->bi_next = NULL;
1059 bio_endio(bio, 0); /* XXX: should be error */ 1060 bio_endio(bio, 0); /* XXX: should be error */
1060 } 1061 }
1061 return ret; 1062 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1063 return -ENOMEM;
1062} 1064}
1063 1065
1064static int pscsi_do_task(struct se_task *task) 1066static int pscsi_do_task(struct se_task *task)
1065{ 1067{
1068 struct se_cmd *cmd = task->task_se_cmd;
1066 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 1069 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
1067 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1070 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1068 struct request *req; 1071 struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
1078 if (!req || IS_ERR(req)) { 1081 if (!req || IS_ERR(req)) {
1079 pr_err("PSCSI: blk_get_request() failed: %ld\n", 1082 pr_err("PSCSI: blk_get_request() failed: %ld\n",
1080 req ? IS_ERR(req) : -ENOMEM); 1083 req ? IS_ERR(req) : -ENOMEM);
1081 return PYX_TRANSPORT_LU_COMM_FAILURE; 1084 cmd->scsi_sense_reason =
1085 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1086 return -ENODEV;
1082 } 1087 }
1083 } else { 1088 } else {
1084 BUG_ON(!task->task_size); 1089 BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
1087 * Setup the main struct request for the task->task_sg[] payload 1092 * Setup the main struct request for the task->task_sg[] payload
1088 */ 1093 */
1089 ret = pscsi_map_sg(task, task->task_sg, &hbio); 1094 ret = pscsi_map_sg(task, task->task_sg, &hbio);
1090 if (ret < 0) 1095 if (ret < 0) {
1091 return PYX_TRANSPORT_LU_COMM_FAILURE; 1096 cmd->scsi_sense_reason =
1097 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1098 return ret;
1099 }
1092 1100
1093 req = blk_make_request(pdv->pdv_sd->request_queue, hbio, 1101 req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
1094 GFP_KERNEL); 1102 GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
1115 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), 1123 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
1116 pscsi_req_done); 1124 pscsi_req_done);
1117 1125
1118 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1126 return 0;
1119 1127
1120fail: 1128fail:
1121 while (hbio) { 1129 while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
1124 bio->bi_next = NULL; 1132 bio->bi_next = NULL;
1125 bio_endio(bio, 0); /* XXX: should be error */ 1133 bio_endio(bio, 0); /* XXX: should be error */
1126 } 1134 }
1127 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 1135 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1136 return -ENOMEM;
1128} 1137}
1129 1138
1130/* pscsi_get_sense_buffer(): 1139/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
1198 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1207 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1199 pt->pscsi_result); 1208 pt->pscsi_result);
1200 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1209 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
1201 task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1210 task->task_se_cmd->scsi_sense_reason =
1202 task->task_se_cmd->transport_error_status = 1211 TCM_UNSUPPORTED_SCSI_OPCODE;
1203 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1204 transport_complete_task(task, 0); 1212 transport_complete_task(task, 0);
1205 break; 1213 break;
1206 } 1214 }
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f19..02e51faa2f4e 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
343 return NULL; 343 return NULL;
344} 344}
345 345
346/* rd_MEMCPY_read(): 346static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
347 *
348 *
349 */
350static int rd_MEMCPY_read(struct rd_request *req)
351{ 347{
352 struct se_task *task = &req->rd_task; 348 struct se_task *task = &req->rd_task;
353 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; 349 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
354 struct rd_dev_sg_table *table; 350 struct rd_dev_sg_table *table;
355 struct scatterlist *sg_d, *sg_s; 351 struct scatterlist *rd_sg;
356 void *dst, *src; 352 struct sg_mapping_iter m;
357 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
358 u32 length, page_end = 0, table_sg_end;
359 u32 rd_offset = req->rd_offset; 353 u32 rd_offset = req->rd_offset;
354 u32 src_len;
360 355
361 table = rd_get_sg_table(dev, req->rd_page); 356 table = rd_get_sg_table(dev, req->rd_page);
362 if (!table) 357 if (!table)
363 return -EINVAL; 358 return -EINVAL;
364 359
365 table_sg_end = (table->page_end_offset - req->rd_page); 360 rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
366 sg_d = task->task_sg;
367 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
368 361
369 pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 362 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
370 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 363 dev->rd_dev_id, read_rd ? "Read" : "Write",
371 req->rd_page, req->rd_offset); 364 task->task_lba, req->rd_size, req->rd_page,
372 365 rd_offset);
373 src_offset = rd_offset;
374 366
367 src_len = PAGE_SIZE - rd_offset;
368 sg_miter_start(&m, task->task_sg, task->task_sg_nents,
369 read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
375 while (req->rd_size) { 370 while (req->rd_size) {
376 if ((sg_d[i].length - dst_offset) < 371 u32 len;
377 (sg_s[j].length - src_offset)) { 372 void *rd_addr;
378 length = (sg_d[i].length - dst_offset);
379
380 pr_debug("Step 1 - sg_d[%d]: %p length: %d"
381 " offset: %u sg_s[%d].length: %u\n", i,
382 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
383 sg_s[j].length);
384 pr_debug("Step 1 - length: %u dst_offset: %u"
385 " src_offset: %u\n", length, dst_offset,
386 src_offset);
387
388 if (length > req->rd_size)
389 length = req->rd_size;
390
391 dst = sg_virt(&sg_d[i++]) + dst_offset;
392 BUG_ON(!dst);
393
394 src = sg_virt(&sg_s[j]) + src_offset;
395 BUG_ON(!src);
396
397 dst_offset = 0;
398 src_offset = length;
399 page_end = 0;
400 } else {
401 length = (sg_s[j].length - src_offset);
402
403 pr_debug("Step 2 - sg_d[%d]: %p length: %d"
404 " offset: %u sg_s[%d].length: %u\n", i,
405 &sg_d[i], sg_d[i].length, sg_d[i].offset,
406 j, sg_s[j].length);
407 pr_debug("Step 2 - length: %u dst_offset: %u"
408 " src_offset: %u\n", length, dst_offset,
409 src_offset);
410
411 if (length > req->rd_size)
412 length = req->rd_size;
413
414 dst = sg_virt(&sg_d[i]) + dst_offset;
415 BUG_ON(!dst);
416
417 if (sg_d[i].length == length) {
418 i++;
419 dst_offset = 0;
420 } else
421 dst_offset = length;
422
423 src = sg_virt(&sg_s[j++]) + src_offset;
424 BUG_ON(!src);
425
426 src_offset = 0;
427 page_end = 1;
428 }
429 373
430 memcpy(dst, src, length); 374 sg_miter_next(&m);
375 len = min((u32)m.length, src_len);
376 m.consumed = len;
431 377
432 pr_debug("page: %u, remaining size: %u, length: %u," 378 rd_addr = sg_virt(rd_sg) + rd_offset;
433 " i: %u, j: %u\n", req->rd_page,
434 (req->rd_size - length), length, i, j);
435 379
436 req->rd_size -= length; 380 if (read_rd)
437 if (!req->rd_size) 381 memcpy(m.addr, rd_addr, len);
438 return 0; 382 else
383 memcpy(rd_addr, m.addr, len);
439 384
440 if (!page_end) 385 req->rd_size -= len;
386 if (!req->rd_size)
441 continue; 387 continue;
442 388
443 if (++req->rd_page <= table->page_end_offset) { 389 src_len -= len;
444 pr_debug("page: %u in same page table\n", 390 if (src_len) {
445 req->rd_page); 391 rd_offset += len;
446 continue; 392 continue;
447 } 393 }
448 394
449 pr_debug("getting new page table for page: %u\n", 395 /* rd page completed, next one please */
450 req->rd_page); 396 req->rd_page++;
451 397 rd_offset = 0;
452 table = rd_get_sg_table(dev, req->rd_page); 398 src_len = PAGE_SIZE;
453 if (!table) 399 if (req->rd_page <= table->page_end_offset) {
454 return -EINVAL; 400 rd_sg++;
455
456 sg_s = &table->sg_table[j = 0];
457 }
458
459 return 0;
460}
461
462/* rd_MEMCPY_write():
463 *
464 *
465 */
466static int rd_MEMCPY_write(struct rd_request *req)
467{
468 struct se_task *task = &req->rd_task;
469 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
470 struct rd_dev_sg_table *table;
471 struct scatterlist *sg_d, *sg_s;
472 void *dst, *src;
473 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
474 u32 length, page_end = 0, table_sg_end;
475 u32 rd_offset = req->rd_offset;
476
477 table = rd_get_sg_table(dev, req->rd_page);
478 if (!table)
479 return -EINVAL;
480
481 table_sg_end = (table->page_end_offset - req->rd_page);
482 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
483 sg_s = task->task_sg;
484
485 pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
486 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
487 req->rd_page, req->rd_offset);
488
489 dst_offset = rd_offset;
490
491 while (req->rd_size) {
492 if ((sg_s[i].length - src_offset) <
493 (sg_d[j].length - dst_offset)) {
494 length = (sg_s[i].length - src_offset);
495
496 pr_debug("Step 1 - sg_s[%d]: %p length: %d"
497 " offset: %d sg_d[%d].length: %u\n", i,
498 &sg_s[i], sg_s[i].length, sg_s[i].offset,
499 j, sg_d[j].length);
500 pr_debug("Step 1 - length: %u src_offset: %u"
501 " dst_offset: %u\n", length, src_offset,
502 dst_offset);
503
504 if (length > req->rd_size)
505 length = req->rd_size;
506
507 src = sg_virt(&sg_s[i++]) + src_offset;
508 BUG_ON(!src);
509
510 dst = sg_virt(&sg_d[j]) + dst_offset;
511 BUG_ON(!dst);
512
513 src_offset = 0;
514 dst_offset = length;
515 page_end = 0;
516 } else {
517 length = (sg_d[j].length - dst_offset);
518
519 pr_debug("Step 2 - sg_s[%d]: %p length: %d"
520 " offset: %d sg_d[%d].length: %u\n", i,
521 &sg_s[i], sg_s[i].length, sg_s[i].offset,
522 j, sg_d[j].length);
523 pr_debug("Step 2 - length: %u src_offset: %u"
524 " dst_offset: %u\n", length, src_offset,
525 dst_offset);
526
527 if (length > req->rd_size)
528 length = req->rd_size;
529
530 src = sg_virt(&sg_s[i]) + src_offset;
531 BUG_ON(!src);
532
533 if (sg_s[i].length == length) {
534 i++;
535 src_offset = 0;
536 } else
537 src_offset = length;
538
539 dst = sg_virt(&sg_d[j++]) + dst_offset;
540 BUG_ON(!dst);
541
542 dst_offset = 0;
543 page_end = 1;
544 }
545
546 memcpy(dst, src, length);
547
548 pr_debug("page: %u, remaining size: %u, length: %u,"
549 " i: %u, j: %u\n", req->rd_page,
550 (req->rd_size - length), length, i, j);
551
552 req->rd_size -= length;
553 if (!req->rd_size)
554 return 0;
555
556 if (!page_end)
557 continue;
558
559 if (++req->rd_page <= table->page_end_offset) {
560 pr_debug("page: %u in same page table\n",
561 req->rd_page);
562 continue; 401 continue;
563 } 402 }
564 403
565 pr_debug("getting new page table for page: %u\n",
566 req->rd_page);
567
568 table = rd_get_sg_table(dev, req->rd_page); 404 table = rd_get_sg_table(dev, req->rd_page);
569 if (!table) 405 if (!table) {
406 sg_miter_stop(&m);
570 return -EINVAL; 407 return -EINVAL;
408 }
571 409
572 sg_d = &table->sg_table[j = 0]; 410 /* since we increment, the first sg entry is correct */
411 rd_sg = table->sg_table;
573 } 412 }
574 413 sg_miter_stop(&m);
575 return 0; 414 return 0;
576} 415}
577 416
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
583{ 422{
584 struct se_device *dev = task->task_se_cmd->se_dev; 423 struct se_device *dev = task->task_se_cmd->se_dev;
585 struct rd_request *req = RD_REQ(task); 424 struct rd_request *req = RD_REQ(task);
586 unsigned long long lba; 425 u64 tmp;
587 int ret; 426 int ret;
588 427
589 req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; 428 tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
590 lba = task->task_lba; 429 req->rd_offset = do_div(tmp, PAGE_SIZE);
591 req->rd_offset = (do_div(lba, 430 req->rd_page = tmp;
592 (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
593 dev->se_sub_dev->se_dev_attrib.block_size;
594 req->rd_size = task->task_size; 431 req->rd_size = task->task_size;
595 432
596 if (task->task_data_direction == DMA_FROM_DEVICE) 433 ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
597 ret = rd_MEMCPY_read(req);
598 else
599 ret = rd_MEMCPY_write(req);
600
601 if (ret != 0) 434 if (ret != 0)
602 return ret; 435 return ret;
603 436
604 task->task_scsi_status = GOOD; 437 task->task_scsi_status = GOOD;
605 transport_complete_task(task, 1); 438 transport_complete_task(task, 1);
606 439 return 0;
607 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
608} 440}
609 441
610/* rd_free_task(): (Part of se_subsystem_api_t template) 442/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df6297..684522805a1f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
345 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 345 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
346 "Preempt" : "", cmd, cmd->t_state, 346 "Preempt" : "", cmd, cmd->t_state,
347 atomic_read(&cmd->t_fe_count)); 347 atomic_read(&cmd->t_fe_count));
348 /*
349 * Signal that the command has failed via cmd->se_cmd_flags,
350 */
351 transport_new_cmd_failure(cmd);
352 348
353 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 349 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
354 atomic_read(&cmd->t_fe_count)); 350 atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f8..0257658e2e3e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
61static int sub_api_initialized; 61static int sub_api_initialized;
62 62
63static struct workqueue_struct *target_completion_wq; 63static struct workqueue_struct *target_completion_wq;
64static struct kmem_cache *se_cmd_cache;
65static struct kmem_cache *se_sess_cache; 64static struct kmem_cache *se_sess_cache;
66struct kmem_cache *se_tmr_req_cache; 65struct kmem_cache *se_tmr_req_cache;
67struct kmem_cache *se_ua_cache; 66struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
82static void transport_put_cmd(struct se_cmd *cmd); 81static void transport_put_cmd(struct se_cmd *cmd);
83static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 82static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
84static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 83static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
85static void transport_generic_request_failure(struct se_cmd *, int, int); 84static void transport_generic_request_failure(struct se_cmd *);
86static void target_complete_ok_work(struct work_struct *work); 85static void target_complete_ok_work(struct work_struct *work);
87 86
88int init_se_kmem_caches(void) 87int init_se_kmem_caches(void)
89{ 88{
90 se_cmd_cache = kmem_cache_create("se_cmd_cache",
91 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
92 if (!se_cmd_cache) {
93 pr_err("kmem_cache_create for struct se_cmd failed\n");
94 goto out;
95 }
96 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 89 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
97 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 90 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
98 0, NULL); 91 0, NULL);
99 if (!se_tmr_req_cache) { 92 if (!se_tmr_req_cache) {
100 pr_err("kmem_cache_create() for struct se_tmr_req" 93 pr_err("kmem_cache_create() for struct se_tmr_req"
101 " failed\n"); 94 " failed\n");
102 goto out_free_cmd_cache; 95 goto out;
103 } 96 }
104 se_sess_cache = kmem_cache_create("se_sess_cache", 97 se_sess_cache = kmem_cache_create("se_sess_cache",
105 sizeof(struct se_session), __alignof__(struct se_session), 98 sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
182 kmem_cache_destroy(se_sess_cache); 175 kmem_cache_destroy(se_sess_cache);
183out_free_tmr_req_cache: 176out_free_tmr_req_cache:
184 kmem_cache_destroy(se_tmr_req_cache); 177 kmem_cache_destroy(se_tmr_req_cache);
185out_free_cmd_cache:
186 kmem_cache_destroy(se_cmd_cache);
187out: 178out:
188 return -ENOMEM; 179 return -ENOMEM;
189} 180}
@@ -191,7 +182,6 @@ out:
191void release_se_kmem_caches(void) 182void release_se_kmem_caches(void)
192{ 183{
193 destroy_workqueue(target_completion_wq); 184 destroy_workqueue(target_completion_wq);
194 kmem_cache_destroy(se_cmd_cache);
195 kmem_cache_destroy(se_tmr_req_cache); 185 kmem_cache_destroy(se_tmr_req_cache);
196 kmem_cache_destroy(se_sess_cache); 186 kmem_cache_destroy(se_sess_cache);
197 kmem_cache_destroy(se_ua_cache); 187 kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
680 task->task_scsi_status = GOOD; 670 task->task_scsi_status = GOOD;
681 } else { 671 } else {
682 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 672 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
683 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; 673 task->task_se_cmd->scsi_sense_reason =
684 task->task_se_cmd->transport_error_status = 674 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
685 PYX_TRANSPORT_ILLEGAL_REQUEST; 675
686 } 676 }
687 677
688 transport_complete_task(task, good); 678 transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
693{ 683{
694 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 684 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
695 685
696 transport_generic_request_failure(cmd, 1, 1); 686 transport_generic_request_failure(cmd);
697} 687}
698 688
699/* transport_complete_task(): 689/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
755 if (cmd->t_tasks_failed) { 745 if (cmd->t_tasks_failed) {
756 if (!task->task_error_status) { 746 if (!task->task_error_status) {
757 task->task_error_status = 747 task->task_error_status =
758 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
759 cmd->transport_error_status = 749 cmd->scsi_sense_reason =
760 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 750 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
761 } 751 }
752
762 INIT_WORK(&cmd->work, target_complete_failure_work); 753 INIT_WORK(&cmd->work, target_complete_failure_work);
763 } else { 754 } else {
764 atomic_set(&cmd->t_transport_complete, 1); 755 atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
1335 dev->se_hba = hba; 1326 dev->se_hba = hba;
1336 dev->se_sub_dev = se_dev; 1327 dev->se_sub_dev = se_dev;
1337 dev->transport = transport; 1328 dev->transport = transport;
1338 atomic_set(&dev->active_cmds, 0);
1339 INIT_LIST_HEAD(&dev->dev_list); 1329 INIT_LIST_HEAD(&dev->dev_list);
1340 INIT_LIST_HEAD(&dev->dev_sep_list); 1330 INIT_LIST_HEAD(&dev->dev_sep_list);
1341 INIT_LIST_HEAD(&dev->dev_tmr_list); 1331 INIT_LIST_HEAD(&dev->dev_tmr_list);
1342 INIT_LIST_HEAD(&dev->execute_task_list); 1332 INIT_LIST_HEAD(&dev->execute_task_list);
1343 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1333 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1344 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1345 INIT_LIST_HEAD(&dev->state_task_list); 1334 INIT_LIST_HEAD(&dev->state_task_list);
1346 INIT_LIST_HEAD(&dev->qf_cmd_list); 1335 INIT_LIST_HEAD(&dev->qf_cmd_list);
1347 spin_lock_init(&dev->execute_task_lock); 1336 spin_lock_init(&dev->execute_task_lock);
1348 spin_lock_init(&dev->delayed_cmd_lock); 1337 spin_lock_init(&dev->delayed_cmd_lock);
1349 spin_lock_init(&dev->ordered_cmd_lock);
1350 spin_lock_init(&dev->state_task_lock);
1351 spin_lock_init(&dev->dev_alua_lock);
1352 spin_lock_init(&dev->dev_reservation_lock); 1338 spin_lock_init(&dev->dev_reservation_lock);
1353 spin_lock_init(&dev->dev_status_lock); 1339 spin_lock_init(&dev->dev_status_lock);
1354 spin_lock_init(&dev->dev_status_thr_lock);
1355 spin_lock_init(&dev->se_port_lock); 1340 spin_lock_init(&dev->se_port_lock);
1356 spin_lock_init(&dev->se_tmr_lock); 1341 spin_lock_init(&dev->se_tmr_lock);
1357 spin_lock_init(&dev->qf_cmd_lock); 1342 spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
1507{ 1492{
1508 INIT_LIST_HEAD(&cmd->se_lun_node); 1493 INIT_LIST_HEAD(&cmd->se_lun_node);
1509 INIT_LIST_HEAD(&cmd->se_delayed_node); 1494 INIT_LIST_HEAD(&cmd->se_delayed_node);
1510 INIT_LIST_HEAD(&cmd->se_ordered_node);
1511 INIT_LIST_HEAD(&cmd->se_qf_node); 1495 INIT_LIST_HEAD(&cmd->se_qf_node);
1512 INIT_LIST_HEAD(&cmd->se_queue_node); 1496 INIT_LIST_HEAD(&cmd->se_queue_node);
1513 INIT_LIST_HEAD(&cmd->se_cmd_list); 1497 INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
1573 pr_err("Received SCSI CDB with command_size: %d that" 1557 pr_err("Received SCSI CDB with command_size: %d that"
1574 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1558 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1575 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1559 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1560 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1561 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1576 return -EINVAL; 1562 return -EINVAL;
1577 } 1563 }
1578 /* 1564 /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
1588 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1574 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1589 scsi_command_size(cdb), 1575 scsi_command_size(cdb),
1590 (unsigned long)sizeof(cmd->__t_task_cdb)); 1576 (unsigned long)sizeof(cmd->__t_task_cdb));
1577 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1578 cmd->scsi_sense_reason =
1579 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1591 return -ENOMEM; 1580 return -ENOMEM;
1592 } 1581 }
1593 } else 1582 } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
1658 * and call transport_generic_request_failure() if necessary.. 1647 * and call transport_generic_request_failure() if necessary..
1659 */ 1648 */
1660 ret = transport_generic_new_cmd(cmd); 1649 ret = transport_generic_new_cmd(cmd);
1661 if (ret < 0) { 1650 if (ret < 0)
1662 cmd->transport_error_status = ret; 1651 transport_generic_request_failure(cmd);
1663 transport_generic_request_failure(cmd, 0, 1652
1664 (cmd->data_direction != DMA_TO_DEVICE));
1665 }
1666 return 0; 1653 return 0;
1667} 1654}
1668EXPORT_SYMBOL(transport_handle_cdb_direct); 1655EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1798/* 1785/*
1799 * Handle SAM-esque emulation for generic transport request failures. 1786 * Handle SAM-esque emulation for generic transport request failures.
1800 */ 1787 */
1801static void transport_generic_request_failure( 1788static void transport_generic_request_failure(struct se_cmd *cmd)
1802 struct se_cmd *cmd,
1803 int complete,
1804 int sc)
1805{ 1789{
1806 int ret = 0; 1790 int ret = 0;
1807 1791
1808 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1792 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1809 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1793 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1810 cmd->t_task_cdb[0]); 1794 cmd->t_task_cdb[0]);
1811 pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", 1795 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1812 cmd->se_tfo->get_cmd_state(cmd), 1796 cmd->se_tfo->get_cmd_state(cmd),
1813 cmd->t_state, 1797 cmd->t_state, cmd->scsi_sense_reason);
1814 cmd->transport_error_status);
1815 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1798 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1816 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1799 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1817 " t_transport_active: %d t_transport_stop: %d" 1800 " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
1829 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 1812 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1830 transport_complete_task_attr(cmd); 1813 transport_complete_task_attr(cmd);
1831 1814
1832 if (complete) { 1815 switch (cmd->scsi_sense_reason) {
1833 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 1816 case TCM_NON_EXISTENT_LUN:
1834 } 1817 case TCM_UNSUPPORTED_SCSI_OPCODE:
1835 1818 case TCM_INVALID_CDB_FIELD:
1836 switch (cmd->transport_error_status) { 1819 case TCM_INVALID_PARAMETER_LIST:
1837 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: 1820 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1838 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1821 case TCM_UNKNOWN_MODE_PAGE:
1839 break; 1822 case TCM_WRITE_PROTECTED:
1840 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: 1823 case TCM_CHECK_CONDITION_ABORT_CMD:
1841 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; 1824 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1842 break; 1825 case TCM_CHECK_CONDITION_NOT_READY:
1843 case PYX_TRANSPORT_INVALID_CDB_FIELD:
1844 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1845 break;
1846 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1847 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1848 break;
1849 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1850 if (!sc)
1851 transport_new_cmd_failure(cmd);
1852 /*
1853 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
1854 * we force this session to fall back to session
1855 * recovery.
1856 */
1857 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
1858 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
1859
1860 goto check_stop;
1861 case PYX_TRANSPORT_LU_COMM_FAILURE:
1862 case PYX_TRANSPORT_ILLEGAL_REQUEST:
1863 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1864 break;
1865 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
1866 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
1867 break;
1868 case PYX_TRANSPORT_WRITE_PROTECTED:
1869 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
1870 break; 1826 break;
1871 case PYX_TRANSPORT_RESERVATION_CONFLICT: 1827 case TCM_RESERVATION_CONFLICT:
1872 /* 1828 /*
1873 * No SENSE Data payload for this case, set SCSI Status 1829 * No SENSE Data payload for this case, set SCSI Status
1874 * and queue the response to $FABRIC_MOD. 1830 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
1893 if (ret == -EAGAIN || ret == -ENOMEM) 1849 if (ret == -EAGAIN || ret == -ENOMEM)
1894 goto queue_full; 1850 goto queue_full;
1895 goto check_stop; 1851 goto check_stop;
1896 case PYX_TRANSPORT_USE_SENSE_REASON:
1897 /*
1898 * struct se_cmd->scsi_sense_reason already set
1899 */
1900 break;
1901 default: 1852 default:
1902 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1853 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1903 cmd->t_task_cdb[0], 1854 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1904 cmd->transport_error_status);
1905 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1855 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1906 break; 1856 break;
1907 } 1857 }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
1912 * transport_send_check_condition_and_sense() after handling 1862 * transport_send_check_condition_and_sense() after handling
1913 * possible unsoliticied write data payloads. 1863 * possible unsoliticied write data payloads.
1914 */ 1864 */
1915 if (!sc && !cmd->se_tfo->new_cmd_map) 1865 ret = transport_send_check_condition_and_sense(cmd,
1916 transport_new_cmd_failure(cmd); 1866 cmd->scsi_sense_reason, 0);
1917 else { 1867 if (ret == -EAGAIN || ret == -ENOMEM)
1918 ret = transport_send_check_condition_and_sense(cmd, 1868 goto queue_full;
1919 cmd->scsi_sense_reason, 0);
1920 if (ret == -EAGAIN || ret == -ENOMEM)
1921 goto queue_full;
1922 }
1923 1869
1924check_stop: 1870check_stop:
1925 transport_lun_remove_cmd(cmd); 1871 transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2002 * to allow the passed struct se_cmd list of tasks to the front of the list. 1948 * to allow the passed struct se_cmd list of tasks to the front of the list.
2003 */ 1949 */
2004 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1950 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2005 atomic_inc(&cmd->se_dev->dev_hoq_count);
2006 smp_mb__after_atomic_inc();
2007 pr_debug("Added HEAD_OF_QUEUE for CDB:" 1951 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2008 " 0x%02x, se_ordered_id: %u\n", 1952 " 0x%02x, se_ordered_id: %u\n",
2009 cmd->t_task_cdb[0], 1953 cmd->t_task_cdb[0],
2010 cmd->se_ordered_id); 1954 cmd->se_ordered_id);
2011 return 1; 1955 return 1;
2012 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1956 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2013 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2014 list_add_tail(&cmd->se_ordered_node,
2015 &cmd->se_dev->ordered_cmd_list);
2016 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2017
2018 atomic_inc(&cmd->se_dev->dev_ordered_sync); 1957 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2019 smp_mb__after_atomic_inc(); 1958 smp_mb__after_atomic_inc();
2020 1959
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2076{ 2015{
2077 int add_tasks; 2016 int add_tasks;
2078 2017
2079 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { 2018 if (se_dev_check_online(cmd->se_dev) != 0) {
2080 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 2019 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2081 transport_generic_request_failure(cmd, 0, 1); 2020 transport_generic_request_failure(cmd);
2082 return 0; 2021 return 0;
2083 } 2022 }
2084 2023
@@ -2163,14 +2102,13 @@ check_depth:
2163 else 2102 else
2164 error = dev->transport->do_task(task); 2103 error = dev->transport->do_task(task);
2165 if (error != 0) { 2104 if (error != 0) {
2166 cmd->transport_error_status = error;
2167 spin_lock_irqsave(&cmd->t_state_lock, flags); 2105 spin_lock_irqsave(&cmd->t_state_lock, flags);
2168 task->task_flags &= ~TF_ACTIVE; 2106 task->task_flags &= ~TF_ACTIVE;
2169 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2107 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2170 atomic_set(&cmd->t_transport_sent, 0); 2108 atomic_set(&cmd->t_transport_sent, 0);
2171 transport_stop_tasks_for_cmd(cmd); 2109 transport_stop_tasks_for_cmd(cmd);
2172 atomic_inc(&dev->depth_left); 2110 atomic_inc(&dev->depth_left);
2173 transport_generic_request_failure(cmd, 0, 1); 2111 transport_generic_request_failure(cmd);
2174 } 2112 }
2175 2113
2176 goto check_depth; 2114 goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
2178 return 0; 2116 return 0;
2179} 2117}
2180 2118
2181void transport_new_cmd_failure(struct se_cmd *se_cmd)
2182{
2183 unsigned long flags;
2184 /*
2185 * Any unsolicited data will get dumped for failed command inside of
2186 * the fabric plugin
2187 */
2188 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2189 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2190 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2191 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2192}
2193
2194static inline u32 transport_get_sectors_6( 2119static inline u32 transport_get_sectors_6(
2195 unsigned char *cdb, 2120 unsigned char *cdb,
2196 struct se_cmd *cmd, 2121 struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
2213 2138
2214 /* 2139 /*
2215 * Everything else assume TYPE_DISK Sector CDB location. 2140 * Everything else assume TYPE_DISK Sector CDB location.
2216 * Use 8-bit sector value. 2141 * Use 8-bit sector value. SBC-3 says:
2142 *
2143 * A TRANSFER LENGTH field set to zero specifies that 256
2144 * logical blocks shall be written. Any other value
2145 * specifies the number of logical blocks that shall be
2146 * written.
2217 */ 2147 */
2218type_disk: 2148type_disk:
2219 return (u32)cdb[4]; 2149 return cdb[4] ? : 256;
2220} 2150}
2221 2151
2222static inline u32 transport_get_sectors_10( 2152static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2460 return -1; 2390 return -1;
2461} 2391}
2462 2392
2463static int
2464transport_handle_reservation_conflict(struct se_cmd *cmd)
2465{
2466 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2467 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2468 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2469 /*
2470 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2471 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2472 * CONFLICT STATUS.
2473 *
2474 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2475 */
2476 if (cmd->se_sess &&
2477 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2478 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2479 cmd->orig_fe_lun, 0x2C,
2480 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2481 return -EINVAL;
2482}
2483
2484static inline long long transport_dev_end_lba(struct se_device *dev) 2393static inline long long transport_dev_end_lba(struct se_device *dev)
2485{ 2394{
2486 return dev->transport->get_blocks(dev) + 1; 2395 return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
2595 */ 2504 */
2596 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { 2505 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2597 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 2506 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2598 cmd, cdb, pr_reg_type) != 0) 2507 cmd, cdb, pr_reg_type) != 0) {
2599 return transport_handle_reservation_conflict(cmd); 2508 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2509 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2510 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2511 return -EBUSY;
2512 }
2600 /* 2513 /*
2601 * This means the CDB is allowed for the SCSI Initiator port 2514 * This means the CDB is allowed for the SCSI Initiator port
2602 * when said port is *NOT* holding the legacy SPC-2 or 2515 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
2658 goto out_unsupported_cdb; 2571 goto out_unsupported_cdb;
2659 size = transport_get_size(sectors, cdb, cmd); 2572 size = transport_get_size(sectors, cdb, cmd);
2660 cmd->t_task_lba = transport_lba_32(cdb); 2573 cmd->t_task_lba = transport_lba_32(cdb);
2661 cmd->t_tasks_fua = (cdb[1] & 0x8); 2574 if (cdb[1] & 0x8)
2575 cmd->se_cmd_flags |= SCF_FUA;
2662 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2576 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2663 break; 2577 break;
2664 case WRITE_12: 2578 case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
2667 goto out_unsupported_cdb; 2581 goto out_unsupported_cdb;
2668 size = transport_get_size(sectors, cdb, cmd); 2582 size = transport_get_size(sectors, cdb, cmd);
2669 cmd->t_task_lba = transport_lba_32(cdb); 2583 cmd->t_task_lba = transport_lba_32(cdb);
2670 cmd->t_tasks_fua = (cdb[1] & 0x8); 2584 if (cdb[1] & 0x8)
2585 cmd->se_cmd_flags |= SCF_FUA;
2671 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2586 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2672 break; 2587 break;
2673 case WRITE_16: 2588 case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
2676 goto out_unsupported_cdb; 2591 goto out_unsupported_cdb;
2677 size = transport_get_size(sectors, cdb, cmd); 2592 size = transport_get_size(sectors, cdb, cmd);
2678 cmd->t_task_lba = transport_lba_64(cdb); 2593 cmd->t_task_lba = transport_lba_64(cdb);
2679 cmd->t_tasks_fua = (cdb[1] & 0x8); 2594 if (cdb[1] & 0x8)
2595 cmd->se_cmd_flags |= SCF_FUA;
2680 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2596 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2681 break; 2597 break;
2682 case XDWRITEREAD_10: 2598 case XDWRITEREAD_10:
2683 if ((cmd->data_direction != DMA_TO_DEVICE) || 2599 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2684 !(cmd->t_tasks_bidi)) 2600 !(cmd->se_cmd_flags & SCF_BIDI))
2685 goto out_invalid_cdb_field; 2601 goto out_invalid_cdb_field;
2686 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2602 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2687 if (sector_ret) 2603 if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
2700 * Setup BIDI XOR callback to be run after I/O completion. 2616 * Setup BIDI XOR callback to be run after I/O completion.
2701 */ 2617 */
2702 cmd->transport_complete_callback = &transport_xor_callback; 2618 cmd->transport_complete_callback = &transport_xor_callback;
2703 cmd->t_tasks_fua = (cdb[1] & 0x8); 2619 if (cdb[1] & 0x8)
2620 cmd->se_cmd_flags |= SCF_FUA;
2704 break; 2621 break;
2705 case VARIABLE_LENGTH_CMD: 2622 case VARIABLE_LENGTH_CMD:
2706 service_action = get_unaligned_be16(&cdb[8]); 2623 service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
2728 * completion. 2645 * completion.
2729 */ 2646 */
2730 cmd->transport_complete_callback = &transport_xor_callback; 2647 cmd->transport_complete_callback = &transport_xor_callback;
2731 cmd->t_tasks_fua = (cdb[10] & 0x8); 2648 if (cdb[1] & 0x8)
2649 cmd->se_cmd_flags |= SCF_FUA;
2732 break; 2650 break;
2733 case WRITE_SAME_32: 2651 case WRITE_SAME_32:
2734 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2652 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3171 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3089 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3172 cmd->se_ordered_id); 3090 cmd->se_ordered_id);
3173 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3091 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3174 atomic_dec(&dev->dev_hoq_count);
3175 smp_mb__after_atomic_dec();
3176 dev->dev_cur_ordered_id++; 3092 dev->dev_cur_ordered_id++;
3177 pr_debug("Incremented dev_cur_ordered_id: %u for" 3093 pr_debug("Incremented dev_cur_ordered_id: %u for"
3178 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3094 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3179 cmd->se_ordered_id); 3095 cmd->se_ordered_id);
3180 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3096 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3181 spin_lock(&dev->ordered_cmd_lock);
3182 list_del(&cmd->se_ordered_node);
3183 atomic_dec(&dev->dev_ordered_sync); 3097 atomic_dec(&dev->dev_ordered_sync);
3184 smp_mb__after_atomic_dec(); 3098 smp_mb__after_atomic_dec();
3185 spin_unlock(&dev->ordered_cmd_lock);
3186 3099
3187 dev->dev_cur_ordered_id++; 3100 dev->dev_cur_ordered_id++;
3188 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3101 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
3495 3408
3496 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3409 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3497 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3410 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3411 /*
3412 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3413 * scatterlists already have been set to follow what the fabric
3414 * passes for the original expected data transfer length.
3415 */
3416 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3417 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3418 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3419 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3420 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3421 return -EINVAL;
3422 }
3498 3423
3499 cmd->t_data_sg = sgl; 3424 cmd->t_data_sg = sgl;
3500 cmd->t_data_nents = sgl_count; 3425 cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3813 cmd->data_length) { 3738 cmd->data_length) {
3814 ret = transport_generic_get_mem(cmd); 3739 ret = transport_generic_get_mem(cmd);
3815 if (ret < 0) 3740 if (ret < 0)
3816 return ret; 3741 goto out_fail;
3817 } 3742 }
3818 3743
3819 /* 3744 /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3842 task_cdbs = transport_allocate_control_task(cmd); 3767 task_cdbs = transport_allocate_control_task(cmd);
3843 } 3768 }
3844 3769
3845 if (task_cdbs <= 0) 3770 if (task_cdbs < 0)
3846 goto out_fail; 3771 goto out_fail;
3772 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3773 cmd->t_state = TRANSPORT_COMPLETE;
3774 atomic_set(&cmd->t_transport_active, 1);
3775 INIT_WORK(&cmd->work, target_complete_ok_work);
3776 queue_work(target_completion_wq, &cmd->work);
3777 return 0;
3778 }
3847 3779
3848 if (set_counts) { 3780 if (set_counts) {
3849 atomic_inc(&cmd->t_fe_count); 3781 atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
3929 else if (ret < 0) 3861 else if (ret < 0)
3930 return ret; 3862 return ret;
3931 3863
3932 return PYX_TRANSPORT_WRITE_PENDING; 3864 return 1;
3933 3865
3934queue_full: 3866queue_full:
3935 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 3867 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
4602 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 4534 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4603 atomic_inc(&cmd->t_transport_aborted); 4535 atomic_inc(&cmd->t_transport_aborted);
4604 smp_mb__after_atomic_inc(); 4536 smp_mb__after_atomic_inc();
4605 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4606 transport_new_cmd_failure(cmd);
4607 return;
4608 } 4537 }
4609 } 4538 }
4610 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4539 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
4670 struct se_cmd *cmd; 4599 struct se_cmd *cmd;
4671 struct se_device *dev = (struct se_device *) param; 4600 struct se_device *dev = (struct se_device *) param;
4672 4601
4673 set_user_nice(current, -20);
4674
4675 while (!kthread_should_stop()) { 4602 while (!kthread_should_stop()) {
4676 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4603 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4677 atomic_read(&dev->dev_queue_obj.queue_cnt) || 4604 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
4698 } 4625 }
4699 ret = cmd->se_tfo->new_cmd_map(cmd); 4626 ret = cmd->se_tfo->new_cmd_map(cmd);
4700 if (ret < 0) { 4627 if (ret < 0) {
4701 cmd->transport_error_status = ret; 4628 transport_generic_request_failure(cmd);
4702 transport_generic_request_failure(cmd,
4703 0, (cmd->data_direction !=
4704 DMA_TO_DEVICE));
4705 break; 4629 break;
4706 } 4630 }
4707 ret = transport_generic_new_cmd(cmd); 4631 ret = transport_generic_new_cmd(cmd);
4708 if (ret < 0) { 4632 if (ret < 0) {
4709 cmd->transport_error_status = ret; 4633 transport_generic_request_failure(cmd);
4710 transport_generic_request_failure(cmd, 4634 break;
4711 0, (cmd->data_direction !=
4712 DMA_TO_DEVICE));
4713 } 4635 }
4714 break; 4636 break;
4715 case TRANSPORT_PROCESS_WRITE: 4637 case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c615..71fc9cea5dc9 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
200 lport = ep->lp; 200 lport = ep->lp;
201 fp = fc_frame_alloc(lport, sizeof(*txrdy)); 201 fp = fc_frame_alloc(lport, sizeof(*txrdy));
202 if (!fp) 202 if (!fp)
203 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 203 return -ENOMEM; /* Signal QUEUE_FULL */
204 204
205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); 205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
206 memset(txrdy, 0, sizeof(*txrdy)); 206 memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca40..9402b7387cac 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
436 struct ft_lport_acl *lacl = container_of(wwn, 436 struct ft_lport_acl *lacl = container_of(wwn,
437 struct ft_lport_acl, fc_lport_wwn); 437 struct ft_lport_acl, fc_lport_wwn);
438 438
439 pr_debug("del lport %s\n", 439 pr_debug("del lport %s\n", lacl->name);
440 config_item_name(&wwn->wwn_group.cg_item));
441 mutex_lock(&ft_lport_lock); 440 mutex_lock(&ft_lport_lock);
442 list_del(&lacl->list); 441 list_del(&lacl->list);
443 mutex_unlock(&ft_lport_lock); 442 mutex_unlock(&ft_lport_lock);