aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c13
-rw-r--r--drivers/block/aoe/aoedev.c1
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c3
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/n_tty.c3
-rw-r--r--drivers/char/pty.c10
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--drivers/char/tpm/tpm_tis.c12
-rw-r--r--drivers/cpufreq/cpufreq.c95
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c73
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h25
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
-rw-r--r--drivers/crypto/padlock-sha.c329
-rw-r--r--drivers/crypto/talitos.c216
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/firewire/core-iso.c4
-rw-r--r--drivers/firewire/ohci.c14
-rw-r--r--drivers/firewire/sbp2.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c65
-rw-r--r--drivers/gpu/drm/radeon/rs690.c64
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/ide/ide-cs.c1
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c35
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/multicast.c10
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/smi.c8
-rw-r--r--drivers/infiniband/core/uverbs_main.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c37
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c21
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c52
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c128
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c767
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h103
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c204
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c21
-rw-r--r--drivers/input/keyboard/atkbd.c35
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/md/dm-exception-store.c13
-rw-r--r--drivers/md/dm-exception-store.h4
-rw-r--r--drivers/md/dm-log-userspace-base.c39
-rw-r--r--drivers/md/dm-log-userspace-transfer.c6
-rw-r--r--drivers/md/dm-log-userspace-transfer.h2
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-snap-persistent.c88
-rw-r--r--drivers/md/dm-snap.c23
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-table.c51
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/nftlcore.c15
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h8
-rw-r--r--drivers/net/gianfar.c1
-rw-r--r--drivers/net/mlx4/cq.c1
-rw-r--r--drivers/net/mlx4/eq.c77
-rw-r--r--drivers/net/mlx4/icm.c1
-rw-r--r--drivers/net/mlx4/main.c37
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h7
-rw-r--r--drivers/net/mlx4/mr.c1
-rw-r--r--drivers/net/mlx4/pd.c1
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c120
-rw-r--r--drivers/oprofile/cpu_buffer.c16
-rw-r--r--drivers/oprofile/oprof.c71
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/oprofile_files.c46
-rw-r--r--drivers/oprofile/oprofile_stats.c5
-rw-r--r--drivers/oprofile/oprofile_stats.h1
-rw-r--r--drivers/pci/intr_remapping.c14
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci.h13
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/s390/block/dasd.c26
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_alias.c5
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c47
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c9
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c24
-rw-r--r--drivers/s390/block/xpram.c65
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_async.c224
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/char/tape_block.c12
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/char/tape_std.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c19
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc.h24
-rw-r--r--drivers/s390/cio/cio.c56
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/css.c32
-rw-r--r--drivers/s390/cio/device.c172
-rw-r--r--drivers/s390/cio/device_fsm.c22
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c55
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/scsw.c843
-rw-r--r--drivers/s390/crypto/ap_bus.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/netiucv.c9
-rw-r--r--drivers/s390/net/smsgiucv.c6
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c12
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/pohmelfs/inode.c9
171 files changed, 3562 insertions, 2532 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5e41e6dd657b..db195abad698 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -155,7 +155,7 @@ struct aoedev {
155 u16 fw_ver; /* version of blade's firmware */ 155 u16 fw_ver; /* version of blade's firmware */
156 struct work_struct work;/* disk create work struct */ 156 struct work_struct work;/* disk create work struct */
157 struct gendisk *gd; 157 struct gendisk *gd;
158 struct request_queue blkq; 158 struct request_queue *blkq;
159 struct hd_geometry geo; 159 struct hd_geometry geo;
160 sector_t ssize; 160 sector_t ssize;
161 struct timer_list timer; 161 struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2307a271bdc9..95d344971eda 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -264,9 +264,13 @@ aoeblk_gdalloc(void *vp)
264 goto err_disk; 264 goto err_disk;
265 } 265 }
266 266
267 blk_queue_make_request(&d->blkq, aoeblk_make_request); 267 d->blkq = blk_alloc_queue(GFP_KERNEL);
268 if (bdi_init(&d->blkq.backing_dev_info)) 268 if (!d->blkq)
269 goto err_mempool; 269 goto err_mempool;
270 blk_queue_make_request(d->blkq, aoeblk_make_request);
271 d->blkq->backing_dev_info.name = "aoe";
272 if (bdi_init(&d->blkq->backing_dev_info))
273 goto err_blkq;
270 spin_lock_irqsave(&d->lock, flags); 274 spin_lock_irqsave(&d->lock, flags);
271 gd->major = AOE_MAJOR; 275 gd->major = AOE_MAJOR;
272 gd->first_minor = d->sysminor * AOE_PARTITIONS; 276 gd->first_minor = d->sysminor * AOE_PARTITIONS;
@@ -276,7 +280,7 @@ aoeblk_gdalloc(void *vp)
276 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 280 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
277 d->aoemajor, d->aoeminor); 281 d->aoemajor, d->aoeminor);
278 282
279 gd->queue = &d->blkq; 283 gd->queue = d->blkq;
280 d->gd = gd; 284 d->gd = gd;
281 d->flags &= ~DEVFL_GDALLOC; 285 d->flags &= ~DEVFL_GDALLOC;
282 d->flags |= DEVFL_UP; 286 d->flags |= DEVFL_UP;
@@ -287,6 +291,9 @@ aoeblk_gdalloc(void *vp)
287 aoedisk_add_sysfs(d); 291 aoedisk_add_sysfs(d);
288 return; 292 return;
289 293
294err_blkq:
295 blk_cleanup_queue(d->blkq);
296 d->blkq = NULL;
290err_mempool: 297err_mempool:
291 mempool_destroy(d->bufpool); 298 mempool_destroy(d->bufpool);
292err_disk: 299err_disk:
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index eeea477d9601..fa67027789aa 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d)
113 if (d->bufpool) 113 if (d->bufpool)
114 mempool_destroy(d->bufpool); 114 mempool_destroy(d->bufpool);
115 skbpoolfree(d); 115 skbpoolfree(d);
116 blk_cleanup_queue(d->blkq);
116 kfree(d); 117 kfree(d);
117} 118}
118 119
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8c9d50db5c3a..c58557790585 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -49,6 +49,7 @@
49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
52#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
52#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 53#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
53 54
54/* cover 915 and 945 variants */ 55/* cover 915 and 945 variants */
@@ -81,7 +82,8 @@
81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ 83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB) 85 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
86 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
85 87
86extern int agp_memory_reserved; 88extern int agp_memory_reserved;
87 89
@@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1216 case PCI_DEVICE_ID_INTEL_G41_HB: 1218 case PCI_DEVICE_ID_INTEL_G41_HB:
1217 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1219 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1218 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1220 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1221 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
1219 *gtt_offset = *gtt_size = MB(2); 1222 *gtt_offset = *gtt_size = MB(2);
1220 break; 1223 break;
1221 default: 1224 default:
@@ -2195,6 +2198,8 @@ static const struct intel_driver_description {
2195 "IGDNG/D", NULL, &intel_i965_driver }, 2198 "IGDNG/D", NULL, &intel_i965_driver },
2196 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2199 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2197 "IGDNG/M", NULL, &intel_i965_driver }, 2200 "IGDNG/M", NULL, &intel_i965_driver },
2201 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2202 "IGDNG/MA", NULL, &intel_i965_driver },
2198 { 0, 0, 0, NULL, NULL, NULL } 2203 { 0, 0, 0, NULL, NULL, NULL }
2199}; 2204};
2200 2205
@@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2398 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2403 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2399 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2404 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2400 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2405 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2406 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
2401 { } 2407 { }
2402}; 2408};
2403 2409
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 86105efb4eb6..0ecac7e532f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1006,7 +1006,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1006 priv->dev->release = (void (*)(struct device *)) kfree; 1006 priv->dev->release = (void (*)(struct device *)) kfree;
1007 rc = device_register(priv->dev); 1007 rc = device_register(priv->dev);
1008 if (rc) { 1008 if (rc) {
1009 kfree(priv->dev); 1009 put_device(priv->dev);
1010 goto out_error_dev; 1010 goto out_error_dev;
1011 } 1011 }
1012 1012
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index cd0ba51f7c80..0d8c5788b8e4 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -44,8 +44,8 @@
44 * want to register another driver on the same PCI id. 44 * want to register another driver on the same PCI id.
45 */ 45 */
46static const struct pci_device_id pci_tbl[] = { 46static const struct pci_device_id pci_tbl[] = {
47 { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, 47 { PCI_VDEVICE(AMD, 0x7443), 0, },
48 { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, 48 { PCI_VDEVICE(AMD, 0x746b), 0, },
49 { 0, }, /* terminate list */ 49 { 0, }, /* terminate list */
50}; 50};
51MODULE_DEVICE_TABLE(pci, pci_tbl); 51MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 64d513f68368..4c4d4e140f98 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -46,8 +46,7 @@
46 * want to register another driver on the same PCI id. 46 * want to register another driver on the same PCI id.
47 */ 47 */
48static const struct pci_device_id pci_tbl[] = { 48static const struct pci_device_id pci_tbl[] = {
49 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, 49 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
50 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
51 { 0, }, /* terminate list */ 50 { 0, }, /* terminate list */
52}; 51};
53MODULE_DEVICE_TABLE(pci, pci_tbl); 52MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index afa8813e737a..645237bda682 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -822,6 +822,7 @@ static const struct file_operations zero_fops = {
822 * - permits private mappings, "copies" are taken of the source of zeros 822 * - permits private mappings, "copies" are taken of the source of zeros
823 */ 823 */
824static struct backing_dev_info zero_bdi = { 824static struct backing_dev_info zero_bdi = {
825 .name = "char/mem",
825 .capabilities = BDI_CAP_MAP_COPY, 826 .capabilities = BDI_CAP_MAP_COPY,
826}; 827};
827 828
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 973be2f44195..4e28b35024ec 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
300 if (space < 2) 300 if (space < 2)
301 return -1; 301 return -1;
302 tty->canon_column = tty->column = 0; 302 tty->canon_column = tty->column = 0;
303 tty_put_char(tty, '\r'); 303 tty->ops->write(tty, "\r\n", 2);
304 tty_put_char(tty, c);
305 return 2; 304 return 2;
306 } 305 }
307 tty->canon_column = tty->column; 306 tty->canon_column = tty->column;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index d083c73d784a..b33d6688e910 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
109 * the other side of the pty/tty pair. 109 * the other side of the pty/tty pair.
110 */ 110 */
111 111
112static int pty_write(struct tty_struct *tty, const unsigned char *buf, 112static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
113 int count)
114{ 113{
115 struct tty_struct *to = tty->link; 114 struct tty_struct *to = tty->link;
116 int c;
117 115
118 if (tty->stopped) 116 if (tty->stopped)
119 return 0; 117 return 0;
120 118
121 /* This isn't locked but our 8K is quite sloppy so no
122 big deal */
123
124 c = pty_space(to);
125 if (c > count)
126 c = count;
127 if (c > 0) { 119 if (c > 0) {
128 /* Stuff the data into the input queue of the other end */ 120 /* Stuff the data into the input queue of the other end */
129 c = tty_insert_flip_string(to, buf, c); 121 c = tty_insert_flip_string(to, buf, c);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8c7444857a4b..d8a9255e1a3f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -240,6 +240,7 @@
240#include <linux/spinlock.h> 240#include <linux/spinlock.h>
241#include <linux/percpu.h> 241#include <linux/percpu.h>
242#include <linux/cryptohash.h> 242#include <linux/cryptohash.h>
243#include <linux/fips.h>
243 244
244#ifdef CONFIG_GENERIC_HARDIRQS 245#ifdef CONFIG_GENERIC_HARDIRQS
245# include <linux/irq.h> 246# include <linux/irq.h>
@@ -413,6 +414,7 @@ struct entropy_store {
413 unsigned add_ptr; 414 unsigned add_ptr;
414 int entropy_count; 415 int entropy_count;
415 int input_rotate; 416 int input_rotate;
417 __u8 *last_data;
416}; 418};
417 419
418static __u32 input_pool_data[INPUT_POOL_WORDS]; 420static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
852{ 854{
853 ssize_t ret = 0, i; 855 ssize_t ret = 0, i;
854 __u8 tmp[EXTRACT_SIZE]; 856 __u8 tmp[EXTRACT_SIZE];
857 unsigned long flags;
855 858
856 xfer_secondary_pool(r, nbytes); 859 xfer_secondary_pool(r, nbytes);
857 nbytes = account(r, nbytes, min, reserved); 860 nbytes = account(r, nbytes, min, reserved);
858 861
859 while (nbytes) { 862 while (nbytes) {
860 extract_buf(r, tmp); 863 extract_buf(r, tmp);
864
865 if (r->last_data) {
866 spin_lock_irqsave(&r->lock, flags);
867 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
868 panic("Hardware RNG duplicated output!\n");
869 memcpy(r->last_data, tmp, EXTRACT_SIZE);
870 spin_unlock_irqrestore(&r->lock, flags);
871 }
861 i = min_t(int, nbytes, EXTRACT_SIZE); 872 i = min_t(int, nbytes, EXTRACT_SIZE);
862 memcpy(buf, tmp, i); 873 memcpy(buf, tmp, i);
863 nbytes -= i; 874 nbytes -= i;
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
940 now = ktime_get_real(); 951 now = ktime_get_real();
941 mix_pool_bytes(r, &now, sizeof(now)); 952 mix_pool_bytes(r, &now, sizeof(now));
942 mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); 953 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
954 /* Enable continuous test in fips mode */
955 if (fips_enabled)
956 r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
943} 957}
944 958
945static int rand_initialize(void) 959static int rand_initialize(void)
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5d7a02f63e1c..50eecfe1d724 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -24,6 +24,7 @@
24#include <linux/sysrq.h> 24#include <linux/sysrq.h>
25#include <linux/kbd_kern.h> 25#include <linux/kbd_kern.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/nmi.h>
27#include <linux/quotaops.h> 28#include <linux/quotaops.h>
28#include <linux/perf_counter.h> 29#include <linux/perf_counter.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -222,12 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
222 223
223static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) 224static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
224{ 225{
225 struct pt_regs *regs = get_irq_regs(); 226 /*
226 if (regs) { 227 * Fall back to the workqueue based printing if the
227 printk(KERN_INFO "CPU%d:\n", smp_processor_id()); 228 * backtrace printing did not succeed or the
228 show_regs(regs); 229 * architecture has no support for it:
230 */
231 if (!trigger_all_cpu_backtrace()) {
232 struct pt_regs *regs = get_irq_regs();
233
234 if (regs) {
235 printk(KERN_INFO "CPU%d:\n", smp_processor_id());
236 show_regs(regs);
237 }
238 schedule_work(&sysrq_showallcpus);
229 } 239 }
230 schedule_work(&sysrq_showallcpus);
231} 240}
232 241
233static struct sysrq_key_op sysrq_showallcpus_op = { 242static struct sysrq_key_op sysrq_showallcpus_op = {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index aec1931608aa..0b73e4ec1add 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
450 goto out_err; 450 goto out_err;
451 } 451 }
452 452
453 /* Default timeouts */
454 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
455 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
456 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
457 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
458
453 if (request_locality(chip, 0) != 0) { 459 if (request_locality(chip, 0) != 0) {
454 rc = -ENODEV; 460 rc = -ENODEV;
455 goto out_err; 461 goto out_err;
@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
457 463
458 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 464 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
459 465
460 /* Default timeouts */
461 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
462 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
463 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
464 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
465
466 dev_info(dev, 466 dev_info(dev,
467 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 467 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
468 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 468 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fd69086d08d5..2968ed6a9c49 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1250{ 1250{
1251 int ret = 0; 1251 int ret = 0;
1252 1252
1253#ifdef __powerpc__
1254 int cpu = sysdev->id; 1253 int cpu = sysdev->id;
1255 unsigned int cur_freq = 0;
1256 struct cpufreq_policy *cpu_policy; 1254 struct cpufreq_policy *cpu_policy;
1257 1255
1258 dprintk("suspending cpu %u\n", cpu); 1256 dprintk("suspending cpu %u\n", cpu);
1259 1257
1260 /*
1261 * This whole bogosity is here because Powerbooks are made of fail.
1262 * No sane platform should need any of the code below to be run.
1263 * (it's entirely the wrong thing to do, as driver->get may
1264 * reenable interrupts on some architectures).
1265 */
1266
1267 if (!cpu_online(cpu)) 1258 if (!cpu_online(cpu))
1268 return 0; 1259 return 0;
1269 1260
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1282 1273
1283 if (cpufreq_driver->suspend) { 1274 if (cpufreq_driver->suspend) {
1284 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1275 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1285 if (ret) { 1276 if (ret)
1286 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1277 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1287 "step on CPU %u\n", cpu_policy->cpu); 1278 "step on CPU %u\n", cpu_policy->cpu);
1288 goto out;
1289 }
1290 }
1291
1292 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1293 goto out;
1294
1295 if (cpufreq_driver->get)
1296 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1297
1298 if (!cur_freq || !cpu_policy->cur) {
1299 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1300 "frequency is what timing core thinks it is.\n");
1301 goto out;
1302 }
1303
1304 if (unlikely(cur_freq != cpu_policy->cur)) {
1305 struct cpufreq_freqs freqs;
1306
1307 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1308 dprintk("Warning: CPU frequency is %u, "
1309 "cpufreq assumed %u kHz.\n",
1310 cur_freq, cpu_policy->cur);
1311
1312 freqs.cpu = cpu;
1313 freqs.old = cpu_policy->cur;
1314 freqs.new = cur_freq;
1315
1316 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
1317 CPUFREQ_SUSPENDCHANGE, &freqs);
1318 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1319
1320 cpu_policy->cur = cur_freq;
1321 } 1279 }
1322 1280
1323out: 1281out:
1324 cpufreq_cpu_put(cpu_policy); 1282 cpufreq_cpu_put(cpu_policy);
1325#endif /* __powerpc__ */
1326 return ret; 1283 return ret;
1327} 1284}
1328 1285
@@ -1330,24 +1287,21 @@ out:
1330 * cpufreq_resume - restore proper CPU frequency handling after resume 1287 * cpufreq_resume - restore proper CPU frequency handling after resume
1331 * 1288 *
1332 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) 1289 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1333 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync 1290 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1334 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are 1291 * restored. It will verify that the current freq is in sync with
1335 * restored. 1292 * what we believe it to be. This is a bit later than when it
1293 * should be, but nonethteless it's better than calling
1294 * cpufreq_driver->get() here which might re-enable interrupts...
1336 */ 1295 */
1337static int cpufreq_resume(struct sys_device *sysdev) 1296static int cpufreq_resume(struct sys_device *sysdev)
1338{ 1297{
1339 int ret = 0; 1298 int ret = 0;
1340 1299
1341#ifdef __powerpc__
1342 int cpu = sysdev->id; 1300 int cpu = sysdev->id;
1343 struct cpufreq_policy *cpu_policy; 1301 struct cpufreq_policy *cpu_policy;
1344 1302
1345 dprintk("resuming cpu %u\n", cpu); 1303 dprintk("resuming cpu %u\n", cpu);
1346 1304
1347 /* As with the ->suspend method, all the code below is
1348 * only necessary because Powerbooks suck.
1349 * See commit 42d4dc3f4e1e for jokes. */
1350
1351 if (!cpu_online(cpu)) 1305 if (!cpu_online(cpu))
1352 return 0; 1306 return 0;
1353 1307
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev)
1373 } 1327 }
1374 } 1328 }
1375 1329
1376 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1377 unsigned int cur_freq = 0;
1378
1379 if (cpufreq_driver->get)
1380 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1381
1382 if (!cur_freq || !cpu_policy->cur) {
1383 printk(KERN_ERR "cpufreq: resume failed to assert "
1384 "current frequency is what timing core "
1385 "thinks it is.\n");
1386 goto out;
1387 }
1388
1389 if (unlikely(cur_freq != cpu_policy->cur)) {
1390 struct cpufreq_freqs freqs;
1391
1392 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1393 dprintk("Warning: CPU frequency "
1394 "is %u, cpufreq assumed %u kHz.\n",
1395 cur_freq, cpu_policy->cur);
1396
1397 freqs.cpu = cpu;
1398 freqs.old = cpu_policy->cur;
1399 freqs.new = cur_freq;
1400
1401 srcu_notifier_call_chain(
1402 &cpufreq_transition_notifier_list,
1403 CPUFREQ_RESUMECHANGE, &freqs);
1404 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1405
1406 cpu_policy->cur = cur_freq;
1407 }
1408 }
1409
1410out:
1411 schedule_work(&cpu_policy->update); 1330 schedule_work(&cpu_policy->update);
1331
1412fail: 1332fail:
1413 cpufreq_cpu_put(cpu_policy); 1333 cpufreq_cpu_put(cpu_policy);
1414#endif /* __powerpc__ */
1415 return ret; 1334 return ret;
1416} 1335}
1417 1336
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692372bf..b08403d7d1ca 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -13,7 +13,6 @@ if CRYPTO_HW
13config CRYPTO_DEV_PADLOCK 13config CRYPTO_DEV_PADLOCK
14 tristate "Support for VIA PadLock ACE" 14 tristate "Support for VIA PadLock ACE"
15 depends on X86 && !UML 15 depends on X86 && !UML
16 select CRYPTO_ALGAPI
17 help 16 help
18 Some VIA processors come with an integrated crypto engine 17 Some VIA processors come with an integrated crypto engine
19 (so called VIA PadLock ACE, Advanced Cryptography Engine) 18 (so called VIA PadLock ACE, Advanced Cryptography Engine)
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
39config CRYPTO_DEV_PADLOCK_SHA 38config CRYPTO_DEV_PADLOCK_SHA
40 tristate "PadLock driver for SHA1 and SHA256 algorithms" 39 tristate "PadLock driver for SHA1 and SHA256 algorithms"
41 depends on CRYPTO_DEV_PADLOCK 40 depends on CRYPTO_DEV_PADLOCK
41 select CRYPTO_HASH
42 select CRYPTO_SHA1 42 select CRYPTO_SHA1
43 select CRYPTO_SHA256 43 select CRYPTO_SHA256
44 help 44 help
@@ -157,6 +157,19 @@ config S390_PRNG
157 ANSI X9.17 standard. The PRNG is usable via the char device 157 ANSI X9.17 standard. The PRNG is usable via the char device
158 /dev/prandom. 158 /dev/prandom.
159 159
160config CRYPTO_DEV_MV_CESA
161 tristate "Marvell's Cryptographic Engine"
162 depends on PLAT_ORION
163 select CRYPTO_ALGAPI
164 select CRYPTO_AES
165 select CRYPTO_BLKCIPHER2
166 help
167 This driver allows you to utilize the Cryptographic Engines and
168 Security Accelerator (CESA) which can be found on the Marvell Orion
169 and Kirkwood SoCs, such as QNAP's TS-209.
170
171 Currently the driver supports AES in ECB and CBC mode without DMA.
172
160config CRYPTO_DEV_HIFN_795X 173config CRYPTO_DEV_HIFN_795X
161 tristate "Driver HIFN 795x crypto accelerator chips" 174 tristate "Driver HIFN 795x crypto accelerator chips"
162 select CRYPTO_DES 175 select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc8846..6ffcb3f7f942 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 6obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 7obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
7obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 8obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 61b6e1bec8c6..a33243c17b00 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
208 } 208 }
209 } 209 }
210 210
211 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); 211 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
212 sizeof(struct crypto4xx_ctx));
212 sa = (struct dynamic_sa_ctl *) ctx->sa_in; 213 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
213 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 214 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
214 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, 215 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 4c0dfb2b872e..46e899ac924e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -31,8 +31,6 @@
31#include <asm/dcr.h> 31#include <asm/dcr.h>
32#include <asm/dcr-regs.h> 32#include <asm/dcr-regs.h>
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34#include <crypto/internal/hash.h>
35#include <crypto/algapi.h>
36#include <crypto/aes.h> 34#include <crypto/aes.h>
37#include <crypto/sha.h> 35#include <crypto/sha.h>
38#include "crypto4xx_reg_def.h" 36#include "crypto4xx_reg_def.h"
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
998 ctx->sa_out_dma_addr = 0; 996 ctx->sa_out_dma_addr = 0;
999 ctx->sa_len = 0; 997 ctx->sa_len = 0;
1000 998
1001 if (alg->cra_type == &crypto_ablkcipher_type) 999 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1000 default:
1002 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); 1001 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1003 else if (alg->cra_type == &crypto_ahash_type) 1002 break;
1004 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); 1003 case CRYPTO_ALG_TYPE_AHASH:
1004 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1005 sizeof(struct crypto4xx_ctx));
1006 break;
1007 }
1005 1008
1006 return 0; 1009 return 0;
1007} 1010}
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1015} 1018}
1016 1019
1017int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, 1020int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1018 struct crypto_alg *crypto_alg, int array_size) 1021 struct crypto4xx_alg_common *crypto_alg,
1022 int array_size)
1019{ 1023{
1020 struct crypto4xx_alg *alg; 1024 struct crypto4xx_alg *alg;
1021 int i; 1025 int i;
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1027 return -ENOMEM; 1031 return -ENOMEM;
1028 1032
1029 alg->alg = crypto_alg[i]; 1033 alg->alg = crypto_alg[i];
1030 INIT_LIST_HEAD(&alg->alg.cra_list);
1031 if (alg->alg.cra_init == NULL)
1032 alg->alg.cra_init = crypto4xx_alg_init;
1033 if (alg->alg.cra_exit == NULL)
1034 alg->alg.cra_exit = crypto4xx_alg_exit;
1035 alg->dev = sec_dev; 1034 alg->dev = sec_dev;
1036 rc = crypto_register_alg(&alg->alg); 1035
1036 switch (alg->alg.type) {
1037 case CRYPTO_ALG_TYPE_AHASH:
1038 rc = crypto_register_ahash(&alg->alg.u.hash);
1039 break;
1040
1041 default:
1042 rc = crypto_register_alg(&alg->alg.u.cipher);
1043 break;
1044 }
1045
1037 if (rc) { 1046 if (rc) {
1038 list_del(&alg->entry); 1047 list_del(&alg->entry);
1039 kfree(alg); 1048 kfree(alg);
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1051 1060
1052 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { 1061 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1053 list_del(&alg->entry); 1062 list_del(&alg->entry);
1054 crypto_unregister_alg(&alg->alg); 1063 switch (alg->alg.type) {
1064 case CRYPTO_ALG_TYPE_AHASH:
1065 crypto_unregister_ahash(&alg->alg.u.hash);
1066 break;
1067
1068 default:
1069 crypto_unregister_alg(&alg->alg.u.cipher);
1070 }
1055 kfree(alg); 1071 kfree(alg);
1056 } 1072 }
1057} 1073}
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1104/** 1120/**
1105 * Supported Crypto Algorithms 1121 * Supported Crypto Algorithms
1106 */ 1122 */
1107struct crypto_alg crypto4xx_alg[] = { 1123struct crypto4xx_alg_common crypto4xx_alg[] = {
1108 /* Crypto AES modes */ 1124 /* Crypto AES modes */
1109 { 1125 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1110 .cra_name = "cbc(aes)", 1126 .cra_name = "cbc(aes)",
1111 .cra_driver_name = "cbc-aes-ppc4xx", 1127 .cra_driver_name = "cbc-aes-ppc4xx",
1112 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1128 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1113 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1129 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1114 .cra_blocksize = AES_BLOCK_SIZE, 1130 .cra_blocksize = AES_BLOCK_SIZE,
1115 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1131 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1116 .cra_alignmask = 0,
1117 .cra_type = &crypto_ablkcipher_type, 1132 .cra_type = &crypto_ablkcipher_type,
1133 .cra_init = crypto4xx_alg_init,
1134 .cra_exit = crypto4xx_alg_exit,
1118 .cra_module = THIS_MODULE, 1135 .cra_module = THIS_MODULE,
1119 .cra_u = { 1136 .cra_u = {
1120 .ablkcipher = { 1137 .ablkcipher = {
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
1126 .decrypt = crypto4xx_decrypt, 1143 .decrypt = crypto4xx_decrypt,
1127 } 1144 }
1128 } 1145 }
1129 }, 1146 }},
1130 /* Hash SHA1 */
1131 {
1132 .cra_name = "sha1",
1133 .cra_driver_name = "sha1-ppc4xx",
1134 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
1136 .cra_blocksize = SHA1_BLOCK_SIZE,
1137 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1138 .cra_alignmask = 0,
1139 .cra_type = &crypto_ahash_type,
1140 .cra_init = crypto4xx_sha1_alg_init,
1141 .cra_module = THIS_MODULE,
1142 .cra_u = {
1143 .ahash = {
1144 .digestsize = SHA1_DIGEST_SIZE,
1145 .init = crypto4xx_hash_init,
1146 .update = crypto4xx_hash_update,
1147 .final = crypto4xx_hash_final,
1148 .digest = crypto4xx_hash_digest,
1149 }
1150 }
1151 },
1152}; 1147};
1153 1148
1154/** 1149/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 1ef103449364..da9cbe3b9fc3 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,6 +22,8 @@
22#ifndef __CRYPTO4XX_CORE_H__ 22#ifndef __CRYPTO4XX_CORE_H__
23#define __CRYPTO4XX_CORE_H__ 23#define __CRYPTO4XX_CORE_H__
24 24
25#include <crypto/internal/hash.h>
26
25#define PPC460SX_SDR0_SRST 0x201 27#define PPC460SX_SDR0_SRST 0x201
26#define PPC405EX_SDR0_SRST 0x200 28#define PPC405EX_SDR0_SRST 0x200
27#define PPC460EX_SDR0_SRST 0x201 29#define PPC460EX_SDR0_SRST 0x201
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
138 u16 sa_len; 140 u16 sa_len;
139}; 141};
140 142
143struct crypto4xx_alg_common {
144 u32 type;
145 union {
146 struct crypto_alg cipher;
147 struct ahash_alg hash;
148 } u;
149};
150
141struct crypto4xx_alg { 151struct crypto4xx_alg {
142 struct list_head entry; 152 struct list_head entry;
143 struct crypto_alg alg; 153 struct crypto4xx_alg_common alg;
144 struct crypto4xx_device *dev; 154 struct crypto4xx_device *dev;
145}; 155};
146 156
147#define crypto_alg_to_crypto4xx_alg(x) \ 157static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
148 container_of(x, struct crypto4xx_alg, alg) 158 struct crypto_alg *x)
159{
160 switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
161 case CRYPTO_ALG_TYPE_AHASH:
162 return container_of(__crypto_ahash_alg(x),
163 struct crypto4xx_alg, alg.u.hash);
164 }
165
166 return container_of(x, struct crypto4xx_alg, alg.u.cipher);
167}
149 168
150extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); 169extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
151extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); 170extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 000000000000..b21ef635f352
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
1/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17
18#include "mv_cesa.h"
19/*
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
28 */
29enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
33};
34
35/**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
45 *
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
49 */
50struct req_progress {
51 struct sg_mapping_iter src_sg_it;
52 struct sg_mapping_iter dst_sg_it;
53
54 /* src mostly */
55 int sg_src_left;
56 int src_start;
57 int crypt_len;
58 /* dst mostly */
59 int sg_dst_left;
60 int dst_start;
61 int total_req_bytes;
62};
63
64struct crypto_priv {
65 void __iomem *reg;
66 void __iomem *sram;
67 int irq;
68 struct task_struct *queue_th;
69
70 /* the lock protects queue and eng_st */
71 spinlock_t lock;
72 struct crypto_queue queue;
73 enum engine_status eng_st;
74 struct ablkcipher_request *cur_req;
75 struct req_progress p;
76 int max_req_size;
77 int sram_size;
78};
79
80static struct crypto_priv *cpg;
81
82struct mv_ctx {
83 u8 aes_enc_key[AES_KEY_LEN];
84 u32 aes_dec_key[8];
85 int key_len;
86 u32 need_calc_aes_dkey;
87};
88
89enum crypto_op {
90 COP_AES_ECB,
91 COP_AES_CBC,
92};
93
94struct mv_req_ctx {
95 enum crypto_op op;
96 int decrypt;
97};
98
99static void compute_aes_dec_key(struct mv_ctx *ctx)
100{
101 struct crypto_aes_ctx gen_aes_key;
102 int key_pos;
103
104 if (!ctx->need_calc_aes_dkey)
105 return;
106
107 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
108
109 key_pos = ctx->key_len + 24;
110 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
111 switch (ctx->key_len) {
112 case AES_KEYSIZE_256:
113 key_pos -= 2;
114 /* fall */
115 case AES_KEYSIZE_192:
116 key_pos -= 2;
117 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
118 4 * 4);
119 break;
120 }
121 ctx->need_calc_aes_dkey = 0;
122}
123
124static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
125 unsigned int len)
126{
127 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
128 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
129
130 switch (len) {
131 case AES_KEYSIZE_128:
132 case AES_KEYSIZE_192:
133 case AES_KEYSIZE_256:
134 break;
135 default:
136 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
137 return -EINVAL;
138 }
139 ctx->key_len = len;
140 ctx->need_calc_aes_dkey = 1;
141
142 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
143 return 0;
144}
145
146static void setup_data_in(struct ablkcipher_request *req)
147{
148 int ret;
149 void *buf;
150
151 if (!cpg->p.sg_src_left) {
152 ret = sg_miter_next(&cpg->p.src_sg_it);
153 BUG_ON(!ret);
154 cpg->p.sg_src_left = cpg->p.src_sg_it.length;
155 cpg->p.src_start = 0;
156 }
157
158 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
159
160 buf = cpg->p.src_sg_it.addr;
161 buf += cpg->p.src_start;
162
163 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
164
165 cpg->p.sg_src_left -= cpg->p.crypt_len;
166 cpg->p.src_start += cpg->p.crypt_len;
167}
168
169static void mv_process_current_q(int first_block)
170{
171 struct ablkcipher_request *req = cpg->cur_req;
172 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
173 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
174 struct sec_accel_config op;
175
176 switch (req_ctx->op) {
177 case COP_AES_ECB:
178 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
179 break;
180 case COP_AES_CBC:
181 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
182 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
184 if (first_block)
185 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
186 break;
187 }
188 if (req_ctx->decrypt) {
189 op.config |= CFG_DIR_DEC;
190 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
191 AES_KEY_LEN);
192 } else {
193 op.config |= CFG_DIR_ENC;
194 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
195 AES_KEY_LEN);
196 }
197
198 switch (ctx->key_len) {
199 case AES_KEYSIZE_128:
200 op.config |= CFG_AES_LEN_128;
201 break;
202 case AES_KEYSIZE_192:
203 op.config |= CFG_AES_LEN_192;
204 break;
205 case AES_KEYSIZE_256:
206 op.config |= CFG_AES_LEN_256;
207 break;
208 }
209 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
210 ENC_P_DST(SRAM_DATA_OUT_START);
211 op.enc_key_p = SRAM_DATA_KEY_P;
212
213 setup_data_in(req);
214 op.enc_len = cpg->p.crypt_len;
215 memcpy(cpg->sram + SRAM_CONFIG, &op,
216 sizeof(struct sec_accel_config));
217
218 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
219 /* GO */
220 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
221
222 /*
223 * XXX: add timer if the interrupt does not occur for some mystery
224 * reason
225 */
226}
227
228static void mv_crypto_algo_completion(void)
229{
230 struct ablkcipher_request *req = cpg->cur_req;
231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
232
233 if (req_ctx->op != COP_AES_CBC)
234 return ;
235
236 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
237}
238
239static void dequeue_complete_req(void)
240{
241 struct ablkcipher_request *req = cpg->cur_req;
242 void *buf;
243 int ret;
244
245 cpg->p.total_req_bytes += cpg->p.crypt_len;
246 do {
247 int dst_copy;
248
249 if (!cpg->p.sg_dst_left) {
250 ret = sg_miter_next(&cpg->p.dst_sg_it);
251 BUG_ON(!ret);
252 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
253 cpg->p.dst_start = 0;
254 }
255
256 buf = cpg->p.dst_sg_it.addr;
257 buf += cpg->p.dst_start;
258
259 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
260
261 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
262
263 cpg->p.sg_dst_left -= dst_copy;
264 cpg->p.crypt_len -= dst_copy;
265 cpg->p.dst_start += dst_copy;
266 } while (cpg->p.crypt_len > 0);
267
268 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
269 if (cpg->p.total_req_bytes < req->nbytes) {
270 /* process next scatter list entry */
271 cpg->eng_st = ENGINE_BUSY;
272 mv_process_current_q(0);
273 } else {
274 sg_miter_stop(&cpg->p.src_sg_it);
275 sg_miter_stop(&cpg->p.dst_sg_it);
276 mv_crypto_algo_completion();
277 cpg->eng_st = ENGINE_IDLE;
278 req->base.complete(&req->base, 0);
279 }
280}
281
282static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
283{
284 int i = 0;
285
286 do {
287 total_bytes -= sl[i].length;
288 i++;
289
290 } while (total_bytes > 0);
291
292 return i;
293}
294
295static void mv_enqueue_new_req(struct ablkcipher_request *req)
296{
297 int num_sgs;
298
299 cpg->cur_req = req;
300 memset(&cpg->p, 0, sizeof(struct req_progress));
301
302 num_sgs = count_sgs(req->src, req->nbytes);
303 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
304
305 num_sgs = count_sgs(req->dst, req->nbytes);
306 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
307 mv_process_current_q(1);
308}
309
310static int queue_manag(void *data)
311{
312 cpg->eng_st = ENGINE_IDLE;
313 do {
314 struct ablkcipher_request *req;
315 struct crypto_async_request *async_req = NULL;
316 struct crypto_async_request *backlog;
317
318 __set_current_state(TASK_INTERRUPTIBLE);
319
320 if (cpg->eng_st == ENGINE_W_DEQUEUE)
321 dequeue_complete_req();
322
323 spin_lock_irq(&cpg->lock);
324 if (cpg->eng_st == ENGINE_IDLE) {
325 backlog = crypto_get_backlog(&cpg->queue);
326 async_req = crypto_dequeue_request(&cpg->queue);
327 if (async_req) {
328 BUG_ON(cpg->eng_st != ENGINE_IDLE);
329 cpg->eng_st = ENGINE_BUSY;
330 }
331 }
332 spin_unlock_irq(&cpg->lock);
333
334 if (backlog) {
335 backlog->complete(backlog, -EINPROGRESS);
336 backlog = NULL;
337 }
338
339 if (async_req) {
340 req = container_of(async_req,
341 struct ablkcipher_request, base);
342 mv_enqueue_new_req(req);
343 async_req = NULL;
344 }
345
346 schedule();
347
348 } while (!kthread_should_stop());
349 return 0;
350}
351
352static int mv_handle_req(struct ablkcipher_request *req)
353{
354 unsigned long flags;
355 int ret;
356
357 spin_lock_irqsave(&cpg->lock, flags);
358 ret = ablkcipher_enqueue_request(&cpg->queue, req);
359 spin_unlock_irqrestore(&cpg->lock, flags);
360 wake_up_process(cpg->queue_th);
361 return ret;
362}
363
364static int mv_enc_aes_ecb(struct ablkcipher_request *req)
365{
366 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
367
368 req_ctx->op = COP_AES_ECB;
369 req_ctx->decrypt = 0;
370
371 return mv_handle_req(req);
372}
373
374static int mv_dec_aes_ecb(struct ablkcipher_request *req)
375{
376 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
377 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
378
379 req_ctx->op = COP_AES_ECB;
380 req_ctx->decrypt = 1;
381
382 compute_aes_dec_key(ctx);
383 return mv_handle_req(req);
384}
385
386static int mv_enc_aes_cbc(struct ablkcipher_request *req)
387{
388 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
389
390 req_ctx->op = COP_AES_CBC;
391 req_ctx->decrypt = 0;
392
393 return mv_handle_req(req);
394}
395
396static int mv_dec_aes_cbc(struct ablkcipher_request *req)
397{
398 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
399 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
400
401 req_ctx->op = COP_AES_CBC;
402 req_ctx->decrypt = 1;
403
404 compute_aes_dec_key(ctx);
405 return mv_handle_req(req);
406}
407
408static int mv_cra_init(struct crypto_tfm *tfm)
409{
410 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
411 return 0;
412}
413
414irqreturn_t crypto_int(int irq, void *priv)
415{
416 u32 val;
417
418 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
419 if (!(val & SEC_INT_ACCEL0_DONE))
420 return IRQ_NONE;
421
422 val &= ~SEC_INT_ACCEL0_DONE;
423 writel(val, cpg->reg + FPGA_INT_STATUS);
424 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
425 BUG_ON(cpg->eng_st != ENGINE_BUSY);
426 cpg->eng_st = ENGINE_W_DEQUEUE;
427 wake_up_process(cpg->queue_th);
428 return IRQ_HANDLED;
429}
430
431struct crypto_alg mv_aes_alg_ecb = {
432 .cra_name = "ecb(aes)",
433 .cra_driver_name = "mv-ecb-aes",
434 .cra_priority = 300,
435 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
436 .cra_blocksize = 16,
437 .cra_ctxsize = sizeof(struct mv_ctx),
438 .cra_alignmask = 0,
439 .cra_type = &crypto_ablkcipher_type,
440 .cra_module = THIS_MODULE,
441 .cra_init = mv_cra_init,
442 .cra_u = {
443 .ablkcipher = {
444 .min_keysize = AES_MIN_KEY_SIZE,
445 .max_keysize = AES_MAX_KEY_SIZE,
446 .setkey = mv_setkey_aes,
447 .encrypt = mv_enc_aes_ecb,
448 .decrypt = mv_dec_aes_ecb,
449 },
450 },
451};
452
453struct crypto_alg mv_aes_alg_cbc = {
454 .cra_name = "cbc(aes)",
455 .cra_driver_name = "mv-cbc-aes",
456 .cra_priority = 300,
457 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
458 .cra_blocksize = AES_BLOCK_SIZE,
459 .cra_ctxsize = sizeof(struct mv_ctx),
460 .cra_alignmask = 0,
461 .cra_type = &crypto_ablkcipher_type,
462 .cra_module = THIS_MODULE,
463 .cra_init = mv_cra_init,
464 .cra_u = {
465 .ablkcipher = {
466 .ivsize = AES_BLOCK_SIZE,
467 .min_keysize = AES_MIN_KEY_SIZE,
468 .max_keysize = AES_MAX_KEY_SIZE,
469 .setkey = mv_setkey_aes,
470 .encrypt = mv_enc_aes_cbc,
471 .decrypt = mv_dec_aes_cbc,
472 },
473 },
474};
475
476static int mv_probe(struct platform_device *pdev)
477{
478 struct crypto_priv *cp;
479 struct resource *res;
480 int irq;
481 int ret;
482
483 if (cpg) {
484 printk(KERN_ERR "Second crypto dev?\n");
485 return -EEXIST;
486 }
487
488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
489 if (!res)
490 return -ENXIO;
491
492 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
493 if (!cp)
494 return -ENOMEM;
495
496 spin_lock_init(&cp->lock);
497 crypto_init_queue(&cp->queue, 50);
498 cp->reg = ioremap(res->start, res->end - res->start + 1);
499 if (!cp->reg) {
500 ret = -ENOMEM;
501 goto err;
502 }
503
504 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
505 if (!res) {
506 ret = -ENXIO;
507 goto err_unmap_reg;
508 }
509 cp->sram_size = res->end - res->start + 1;
510 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
511 cp->sram = ioremap(res->start, cp->sram_size);
512 if (!cp->sram) {
513 ret = -ENOMEM;
514 goto err_unmap_reg;
515 }
516
517 irq = platform_get_irq(pdev, 0);
518 if (irq < 0 || irq == NO_IRQ) {
519 ret = irq;
520 goto err_unmap_sram;
521 }
522 cp->irq = irq;
523
524 platform_set_drvdata(pdev, cp);
525 cpg = cp;
526
527 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
528 if (IS_ERR(cp->queue_th)) {
529 ret = PTR_ERR(cp->queue_th);
530 goto err_thread;
531 }
532
533 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
534 cp);
535 if (ret)
536 goto err_unmap_sram;
537
538 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
539 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
540
541 ret = crypto_register_alg(&mv_aes_alg_ecb);
542 if (ret)
543 goto err_reg;
544
545 ret = crypto_register_alg(&mv_aes_alg_cbc);
546 if (ret)
547 goto err_unreg_ecb;
548 return 0;
549err_unreg_ecb:
550 crypto_unregister_alg(&mv_aes_alg_ecb);
551err_thread:
552 free_irq(irq, cp);
553err_reg:
554 kthread_stop(cp->queue_th);
555err_unmap_sram:
556 iounmap(cp->sram);
557err_unmap_reg:
558 iounmap(cp->reg);
559err:
560 kfree(cp);
561 cpg = NULL;
562 platform_set_drvdata(pdev, NULL);
563 return ret;
564}
565
566static int mv_remove(struct platform_device *pdev)
567{
568 struct crypto_priv *cp = platform_get_drvdata(pdev);
569
570 crypto_unregister_alg(&mv_aes_alg_ecb);
571 crypto_unregister_alg(&mv_aes_alg_cbc);
572 kthread_stop(cp->queue_th);
573 free_irq(cp->irq, cp);
574 memset(cp->sram, 0, cp->sram_size);
575 iounmap(cp->sram);
576 iounmap(cp->reg);
577 kfree(cp);
578 cpg = NULL;
579 return 0;
580}
581
582static struct platform_driver marvell_crypto = {
583 .probe = mv_probe,
584 .remove = mv_remove,
585 .driver = {
586 .owner = THIS_MODULE,
587 .name = "mv_crypto",
588 },
589};
590MODULE_ALIAS("platform:mv_crypto");
591
592static int __init mv_crypto_init(void)
593{
594 return platform_driver_register(&marvell_crypto);
595}
596module_init(mv_crypto_init);
597
598static void __exit mv_crypto_exit(void)
599{
600 platform_driver_unregister(&marvell_crypto);
601}
602module_exit(mv_crypto_exit);
603
604MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
605MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
606MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 000000000000..c3e25d3bb171
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
1#ifndef __MV_CRYPTO_H__
2
3#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DES_CMD_REG 0xdd58
5
6#define SEC_ACCEL_CMD 0xde00
7#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
8#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
9#define SEC_CMD_DISABLE_SEC (1 << 2)
10
11#define SEC_ACCEL_DESC_P0 0xde04
12#define SEC_DESC_P0_PTR(x) (x)
13
14#define SEC_ACCEL_DESC_P1 0xde14
15#define SEC_DESC_P1_PTR(x) (x)
16
17#define SEC_ACCEL_CFG 0xde08
18#define SEC_CFG_STOP_DIG_ERR (1 << 0)
19#define SEC_CFG_CH0_W_IDMA (1 << 7)
20#define SEC_CFG_CH1_W_IDMA (1 << 8)
21#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
22#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
23
24#define SEC_ACCEL_STATUS 0xde0c
25#define SEC_ST_ACT_0 (1 << 0)
26#define SEC_ST_ACT_1 (1 << 1)
27
28/*
29 * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
30 * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
31 * someone forgot to remove it while switching to the core and moving to
32 * SEC_ACCEL_INT_STATUS.
33 */
34#define FPGA_INT_STATUS 0xdd68
35#define SEC_ACCEL_INT_STATUS 0xde20
36#define SEC_INT_AUTH_DONE (1 << 0)
37#define SEC_INT_DES_E_DONE (1 << 1)
38#define SEC_INT_AES_E_DONE (1 << 2)
39#define SEC_INT_AES_D_DONE (1 << 3)
40#define SEC_INT_ENC_DONE (1 << 4)
41#define SEC_INT_ACCEL0_DONE (1 << 5)
42#define SEC_INT_ACCEL1_DONE (1 << 6)
43#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
44#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
45
46#define SEC_ACCEL_INT_MASK 0xde24
47
48#define AES_KEY_LEN (8 * 4)
49
50struct sec_accel_config {
51
52 u32 config;
53#define CFG_OP_MAC_ONLY 0
54#define CFG_OP_CRYPT_ONLY 1
55#define CFG_OP_MAC_CRYPT 2
56#define CFG_OP_CRYPT_MAC 3
57#define CFG_MACM_MD5 (4 << 4)
58#define CFG_MACM_SHA1 (5 << 4)
59#define CFG_MACM_HMAC_MD5 (6 << 4)
60#define CFG_MACM_HMAC_SHA1 (7 << 4)
61#define CFG_ENCM_DES (1 << 8)
62#define CFG_ENCM_3DES (2 << 8)
63#define CFG_ENCM_AES (3 << 8)
64#define CFG_DIR_ENC (0 << 12)
65#define CFG_DIR_DEC (1 << 12)
66#define CFG_ENC_MODE_ECB (0 << 16)
67#define CFG_ENC_MODE_CBC (1 << 16)
68#define CFG_3DES_EEE (0 << 20)
69#define CFG_3DES_EDE (1 << 20)
70#define CFG_AES_LEN_128 (0 << 24)
71#define CFG_AES_LEN_192 (1 << 24)
72#define CFG_AES_LEN_256 (2 << 24)
73
74 u32 enc_p;
75#define ENC_P_SRC(x) (x)
76#define ENC_P_DST(x) ((x) << 16)
77
78 u32 enc_len;
79#define ENC_LEN(x) (x)
80
81 u32 enc_key_p;
82#define ENC_KEY_P(x) (x)
83
84 u32 enc_iv;
85#define ENC_IV_POINT(x) ((x) << 0)
86#define ENC_IV_BUF_POINT(x) ((x) << 16)
87
88 u32 mac_src_p;
89#define MAC_SRC_DATA_P(x) (x)
90#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
91
92 u32 mac_digest;
93 u32 mac_iv;
94}__attribute__ ((packed));
95 /*
96 * /-----------\ 0
97 * | ACCEL CFG | 4 * 8
98 * |-----------| 0x20
99 * | CRYPT KEY | 8 * 4
100 * |-----------| 0x40
101 * | IV IN | 4 * 4
102 * |-----------| 0x40 (inplace)
103 * | IV BUF | 4 * 4
104 * |-----------| 0x50
105 * | DATA IN | 16 * x (max ->max_req_size)
106 * |-----------| 0x50 (inplace operation)
107 * | DATA OUT | 16 * x (max ->max_req_size)
108 * \-----------/ SRAM size
109 */
110#define SRAM_CONFIG 0x00
111#define SRAM_DATA_KEY_P 0x20
112#define SRAM_DATA_IV 0x40
113#define SRAM_DATA_IV_BUF 0x40
114#define SRAM_DATA_IN_START 0x50
115#define SRAM_DATA_OUT_START 0x50
116
117#define SRAM_CFG_SPACE 0x50
118
119#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b63..76cb6b345e7b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/algapi.h> 15#include <crypto/internal/hash.h>
16#include <crypto/sha.h> 16#include <crypto/sha.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/cryptohash.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/i387.h> 24#include <asm/i387.h>
26#include "padlock.h" 25#include "padlock.h"
27 26
28#define SHA1_DEFAULT_FALLBACK "sha1-generic" 27struct padlock_sha_desc {
29#define SHA256_DEFAULT_FALLBACK "sha256-generic" 28 struct shash_desc fallback;
29};
30 30
31struct padlock_sha_ctx { 31struct padlock_sha_ctx {
32 char *data; 32 struct crypto_shash *fallback;
33 size_t used;
34 int bypass;
35 void (*f_sha_padlock)(const char *in, char *out, int count);
36 struct hash_desc fallback;
37}; 33};
38 34
39static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 35static int padlock_sha_init(struct shash_desc *desc)
40{
41 return crypto_tfm_ctx(tfm);
42}
43
44/* We'll need aligned address on the stack */
45#define NEAREST_ALIGNED(ptr) \
46 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
47
48static struct crypto_alg sha1_alg, sha256_alg;
49
50static void padlock_sha_bypass(struct crypto_tfm *tfm)
51{ 36{
52 if (ctx(tfm)->bypass) 37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
53 return; 38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
54 39
55 crypto_hash_init(&ctx(tfm)->fallback); 40 dctx->fallback.tfm = ctx->fallback;
56 if (ctx(tfm)->data && ctx(tfm)->used) { 41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
57 struct scatterlist sg; 42 return crypto_shash_init(&dctx->fallback);
58
59 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
60 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
61 }
62
63 ctx(tfm)->used = 0;
64 ctx(tfm)->bypass = 1;
65}
66
67static void padlock_sha_init(struct crypto_tfm *tfm)
68{
69 ctx(tfm)->used = 0;
70 ctx(tfm)->bypass = 0;
71} 43}
72 44
73static void padlock_sha_update(struct crypto_tfm *tfm, 45static int padlock_sha_update(struct shash_desc *desc,
74 const uint8_t *data, unsigned int length) 46 const u8 *data, unsigned int length)
75{ 47{
76 /* Our buffer is always one page. */ 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
77 if (unlikely(!ctx(tfm)->bypass &&
78 (ctx(tfm)->used + length > PAGE_SIZE)))
79 padlock_sha_bypass(tfm);
80
81 if (unlikely(ctx(tfm)->bypass)) {
82 struct scatterlist sg;
83 sg_init_one(&sg, (uint8_t *)data, length);
84 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
85 return;
86 }
87 49
88 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); 50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
89 ctx(tfm)->used += length; 51 return crypto_shash_update(&dctx->fallback, data, length);
90} 52}
91 53
92static inline void padlock_output_block(uint32_t *src, 54static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
96 *dst++ = swab32(*src++); 58 *dst++ = swab32(*src++);
97} 59}
98 60
99static void padlock_do_sha1(const char *in, char *out, int count) 61static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
62 unsigned int count, u8 *out)
100{ 63{
101 /* We can't store directly to *out as it may be unaligned. */ 64 /* We can't store directly to *out as it may be unaligned. */
102 /* BTW Don't reduce the buffer size below 128 Bytes! 65 /* BTW Don't reduce the buffer size below 128 Bytes!
103 * PadLock microcode needs it that big. */ 66 * PadLock microcode needs it that big. */
104 char buf[128+16]; 67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
105 char *result = NEAREST_ALIGNED(buf); 68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state;
70 unsigned int space;
71 unsigned int leftover;
106 int ts_state; 72 int ts_state;
73 int err;
74
75 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
76 err = crypto_shash_export(&dctx->fallback, &state);
77 if (err)
78 goto out;
79
80 if (state.count + count > ULONG_MAX)
81 return crypto_shash_finup(&dctx->fallback, in, count, out);
82
83 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
84 space = SHA1_BLOCK_SIZE - leftover;
85 if (space) {
86 if (count > space) {
87 err = crypto_shash_update(&dctx->fallback, in, space) ?:
88 crypto_shash_export(&dctx->fallback, &state);
89 if (err)
90 goto out;
91 count -= space;
92 in += space;
93 } else {
94 memcpy(state.buffer + leftover, in, count);
95 in = state.buffer;
96 count += leftover;
97 state.count &= ~(SHA1_BLOCK_SIZE - 1);
98 }
99 }
100
101 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
107 102
108 ((uint32_t *)result)[0] = SHA1_H0;
109 ((uint32_t *)result)[1] = SHA1_H1;
110 ((uint32_t *)result)[2] = SHA1_H2;
111 ((uint32_t *)result)[3] = SHA1_H3;
112 ((uint32_t *)result)[4] = SHA1_H4;
113
114 /* prevent taking the spurious DNA fault with padlock. */ 103 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save(); 104 ts_state = irq_ts_save();
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 105 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in), "+D"(result) 106 : \
118 : "c"(count), "a"(0)); 107 : "c"((unsigned long)state.count + count), \
108 "a"((unsigned long)state.count), \
109 "S"(in), "D"(result));
119 irq_ts_restore(ts_state); 110 irq_ts_restore(ts_state);
120 111
121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 112 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
113
114out:
115 return err;
122} 116}
123 117
124static void padlock_do_sha256(const char *in, char *out, int count) 118static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
119{
120 u8 buf[4];
121
122 return padlock_sha1_finup(desc, buf, 0, out);
123}
124
125static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
126 unsigned int count, u8 *out)
125{ 127{
126 /* We can't store directly to *out as it may be unaligned. */ 128 /* We can't store directly to *out as it may be unaligned. */
127 /* BTW Don't reduce the buffer size below 128 Bytes! 129 /* BTW Don't reduce the buffer size below 128 Bytes!
128 * PadLock microcode needs it that big. */ 130 * PadLock microcode needs it that big. */
129 char buf[128+16]; 131 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
130 char *result = NEAREST_ALIGNED(buf); 132 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 struct sha256_state state;
134 unsigned int space;
135 unsigned int leftover;
131 int ts_state; 136 int ts_state;
137 int err;
138
139 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
140 err = crypto_shash_export(&dctx->fallback, &state);
141 if (err)
142 goto out;
143
144 if (state.count + count > ULONG_MAX)
145 return crypto_shash_finup(&dctx->fallback, in, count, out);
146
147 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
148 space = SHA256_BLOCK_SIZE - leftover;
149 if (space) {
150 if (count > space) {
151 err = crypto_shash_update(&dctx->fallback, in, space) ?:
152 crypto_shash_export(&dctx->fallback, &state);
153 if (err)
154 goto out;
155 count -= space;
156 in += space;
157 } else {
158 memcpy(state.buf + leftover, in, count);
159 in = state.buf;
160 count += leftover;
161 state.count &= ~(SHA1_BLOCK_SIZE - 1);
162 }
163 }
132 164
133 ((uint32_t *)result)[0] = SHA256_H0; 165 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
134 ((uint32_t *)result)[1] = SHA256_H1;
135 ((uint32_t *)result)[2] = SHA256_H2;
136 ((uint32_t *)result)[3] = SHA256_H3;
137 ((uint32_t *)result)[4] = SHA256_H4;
138 ((uint32_t *)result)[5] = SHA256_H5;
139 ((uint32_t *)result)[6] = SHA256_H6;
140 ((uint32_t *)result)[7] = SHA256_H7;
141 166
142 /* prevent taking the spurious DNA fault with padlock. */ 167 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save(); 168 ts_state = irq_ts_save();
144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 169 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
145 : "+S"(in), "+D"(result) 170 : \
146 : "c"(count), "a"(0)); 171 : "c"((unsigned long)state.count + count), \
172 "a"((unsigned long)state.count), \
173 "S"(in), "D"(result));
147 irq_ts_restore(ts_state); 174 irq_ts_restore(ts_state);
148 175
149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 176 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
177
178out:
179 return err;
150} 180}
151 181
152static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 182static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
153{ 183{
154 if (unlikely(ctx(tfm)->bypass)) { 184 u8 buf[4];
155 crypto_hash_final(&ctx(tfm)->fallback, out);
156 ctx(tfm)->bypass = 0;
157 return;
158 }
159 185
160 /* Pass the input buffer to PadLock microcode... */ 186 return padlock_sha256_finup(desc, buf, 0, out);
161 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
162
163 ctx(tfm)->used = 0;
164} 187}
165 188
166static int padlock_cra_init(struct crypto_tfm *tfm) 189static int padlock_cra_init(struct crypto_tfm *tfm)
167{ 190{
191 struct crypto_shash *hash = __crypto_shash_cast(tfm);
168 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 192 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169 struct crypto_hash *fallback_tfm; 193 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
170 194 struct crypto_shash *fallback_tfm;
171 /* For now we'll allocate one page. This 195 int err = -ENOMEM;
172 * could eventually be configurable one day. */
173 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174 if (!ctx(tfm)->data)
175 return -ENOMEM;
176 196
177 /* Allocate a fallback and abort if it failed. */ 197 /* Allocate a fallback and abort if it failed. */
178 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 198 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
179 CRYPTO_ALG_ASYNC | 199 CRYPTO_ALG_NEED_FALLBACK);
180 CRYPTO_ALG_NEED_FALLBACK);
181 if (IS_ERR(fallback_tfm)) { 200 if (IS_ERR(fallback_tfm)) {
182 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 201 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183 fallback_driver_name); 202 fallback_driver_name);
184 free_page((unsigned long)(ctx(tfm)->data)); 203 err = PTR_ERR(fallback_tfm);
185 return PTR_ERR(fallback_tfm); 204 goto out;
186 } 205 }
187 206
188 ctx(tfm)->fallback.tfm = fallback_tfm; 207 ctx->fallback = fallback_tfm;
208 hash->descsize += crypto_shash_descsize(fallback_tfm);
189 return 0; 209 return 0;
190}
191
192static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
193{
194 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
195 210
196 return padlock_cra_init(tfm); 211out:
197} 212 return err;
198
199static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
200{
201 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
202
203 return padlock_cra_init(tfm);
204} 213}
205 214
206static void padlock_cra_exit(struct crypto_tfm *tfm) 215static void padlock_cra_exit(struct crypto_tfm *tfm)
207{ 216{
208 if (ctx(tfm)->data) { 217 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
209 free_page((unsigned long)(ctx(tfm)->data));
210 ctx(tfm)->data = NULL;
211 }
212 218
213 crypto_free_hash(ctx(tfm)->fallback.tfm); 219 crypto_free_shash(ctx->fallback);
214 ctx(tfm)->fallback.tfm = NULL;
215} 220}
216 221
217static struct crypto_alg sha1_alg = { 222static struct shash_alg sha1_alg = {
218 .cra_name = "sha1", 223 .digestsize = SHA1_DIGEST_SIZE,
219 .cra_driver_name = "sha1-padlock", 224 .init = padlock_sha_init,
220 .cra_priority = PADLOCK_CRA_PRIORITY, 225 .update = padlock_sha_update,
221 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 226 .finup = padlock_sha1_finup,
222 CRYPTO_ALG_NEED_FALLBACK, 227 .final = padlock_sha1_final,
223 .cra_blocksize = SHA1_BLOCK_SIZE, 228 .descsize = sizeof(struct padlock_sha_desc),
224 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 229 .base = {
225 .cra_module = THIS_MODULE, 230 .cra_name = "sha1",
226 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), 231 .cra_driver_name = "sha1-padlock",
227 .cra_init = padlock_sha1_cra_init, 232 .cra_priority = PADLOCK_CRA_PRIORITY,
228 .cra_exit = padlock_cra_exit, 233 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
229 .cra_u = { 234 CRYPTO_ALG_NEED_FALLBACK,
230 .digest = { 235 .cra_blocksize = SHA1_BLOCK_SIZE,
231 .dia_digestsize = SHA1_DIGEST_SIZE, 236 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
232 .dia_init = padlock_sha_init, 237 .cra_module = THIS_MODULE,
233 .dia_update = padlock_sha_update, 238 .cra_init = padlock_cra_init,
234 .dia_final = padlock_sha_final, 239 .cra_exit = padlock_cra_exit,
235 }
236 } 240 }
237}; 241};
238 242
239static struct crypto_alg sha256_alg = { 243static struct shash_alg sha256_alg = {
240 .cra_name = "sha256", 244 .digestsize = SHA256_DIGEST_SIZE,
241 .cra_driver_name = "sha256-padlock", 245 .init = padlock_sha_init,
242 .cra_priority = PADLOCK_CRA_PRIORITY, 246 .update = padlock_sha_update,
243 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 247 .finup = padlock_sha256_finup,
244 CRYPTO_ALG_NEED_FALLBACK, 248 .final = padlock_sha256_final,
245 .cra_blocksize = SHA256_BLOCK_SIZE, 249 .descsize = sizeof(struct padlock_sha_desc),
246 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 250 .base = {
247 .cra_module = THIS_MODULE, 251 .cra_name = "sha256",
248 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), 252 .cra_driver_name = "sha256-padlock",
249 .cra_init = padlock_sha256_cra_init, 253 .cra_priority = PADLOCK_CRA_PRIORITY,
250 .cra_exit = padlock_cra_exit, 254 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
251 .cra_u = { 255 CRYPTO_ALG_NEED_FALLBACK,
252 .digest = { 256 .cra_blocksize = SHA256_BLOCK_SIZE,
253 .dia_digestsize = SHA256_DIGEST_SIZE, 257 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
254 .dia_init = padlock_sha_init, 258 .cra_module = THIS_MODULE,
255 .dia_update = padlock_sha_update, 259 .cra_init = padlock_cra_init,
256 .dia_final = padlock_sha_final, 260 .cra_exit = padlock_cra_exit,
257 }
258 } 261 }
259}; 262};
260 263
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
272 return -ENODEV; 275 return -ENODEV;
273 } 276 }
274 277
275 rc = crypto_register_alg(&sha1_alg); 278 rc = crypto_register_shash(&sha1_alg);
276 if (rc) 279 if (rc)
277 goto out; 280 goto out;
278 281
279 rc = crypto_register_alg(&sha256_alg); 282 rc = crypto_register_shash(&sha256_alg);
280 if (rc) 283 if (rc)
281 goto out_unreg1; 284 goto out_unreg1;
282 285
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
285 return 0; 288 return 0;
286 289
287out_unreg1: 290out_unreg1:
288 crypto_unregister_alg(&sha1_alg); 291 crypto_unregister_shash(&sha1_alg);
289out: 292out:
290 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 293 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
291 return rc; 294 return rc;
@@ -293,8 +296,8 @@ out:
293 296
294static void __exit padlock_fini(void) 297static void __exit padlock_fini(void)
295{ 298{
296 crypto_unregister_alg(&sha1_alg); 299 crypto_unregister_shash(&sha1_alg);
297 crypto_unregister_alg(&sha256_alg); 300 crypto_unregister_shash(&sha256_alg);
298} 301}
299 302
300module_init(padlock_init); 303module_init(padlock_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce2..c47ffe8a73ef 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
86 void *context; 86 void *context;
87}; 87};
88 88
89/* per-channel fifo management */
90struct talitos_channel {
91 /* request fifo */
92 struct talitos_request *fifo;
93
94 /* number of requests pending in channel h/w fifo */
95 atomic_t submit_count ____cacheline_aligned;
96
97 /* request submission (head) lock */
98 spinlock_t head_lock ____cacheline_aligned;
99 /* index to next free descriptor request */
100 int head;
101
102 /* request release (tail) lock */
103 spinlock_t tail_lock ____cacheline_aligned;
104 /* index to next in-progress/done descriptor request */
105 int tail;
106};
107
89struct talitos_private { 108struct talitos_private {
90 struct device *dev; 109 struct device *dev;
91 struct of_device *ofdev; 110 struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
101 /* SEC Compatibility info */ 120 /* SEC Compatibility info */
102 unsigned long features; 121 unsigned long features;
103 122
104 /* next channel to be assigned next incoming descriptor */
105 atomic_t last_chan;
106
107 /* per-channel number of requests pending in channel h/w fifo */
108 atomic_t *submit_count;
109
110 /* per-channel request fifo */
111 struct talitos_request **fifo;
112
113 /* 123 /*
114 * length of the request fifo 124 * length of the request fifo
115 * fifo_len is chfifo_len rounded up to next power of 2 125 * fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
117 */ 127 */
118 unsigned int fifo_len; 128 unsigned int fifo_len;
119 129
120 /* per-channel index to next free descriptor request */ 130 struct talitos_channel *chan;
121 int *head;
122
123 /* per-channel index to next in-progress/done descriptor request */
124 int *tail;
125 131
126 /* per-channel request submission (head) and release (tail) locks */ 132 /* next channel to be assigned next incoming descriptor */
127 spinlock_t *head_lock; 133 atomic_t last_chan ____cacheline_aligned;
128 spinlock_t *tail_lock;
129 134
130 /* request callback tasklet */ 135 /* request callback tasklet */
131 struct tasklet_struct done_task; 136 struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
141#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 146#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
142#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 147#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
143 148
149static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
150{
151 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
152 talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
153}
154
144/* 155/*
145 * map virtual single (contiguous) pointer to h/w descriptor pointer 156 * map virtual single (contiguous) pointer to h/w descriptor pointer
146 */ 157 */
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
150 unsigned char extent, 161 unsigned char extent,
151 enum dma_data_direction dir) 162 enum dma_data_direction dir)
152{ 163{
164 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
165
153 talitos_ptr->len = cpu_to_be16(len); 166 talitos_ptr->len = cpu_to_be16(len);
154 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 167 to_talitos_ptr(talitos_ptr, dma_addr);
155 talitos_ptr->j_extent = extent; 168 talitos_ptr->j_extent = extent;
156} 169}
157 170
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
182 return -EIO; 195 return -EIO;
183 } 196 }
184 197
185 /* set done writeback and IRQ */ 198 /* set 36-bit addressing, done writeback enable and done IRQ enable */
186 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 199 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
187 TALITOS_CCCR_LO_CDIE); 200 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
188 201
189 /* and ICCR writeback, if available */ 202 /* and ICCR writeback, if available */
190 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 203 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
282 /* emulate SEC's round-robin channel fifo polling scheme */ 295 /* emulate SEC's round-robin channel fifo polling scheme */
283 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); 296 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
284 297
285 spin_lock_irqsave(&priv->head_lock[ch], flags); 298 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
286 299
287 if (!atomic_inc_not_zero(&priv->submit_count[ch])) { 300 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
288 /* h/w fifo is full */ 301 /* h/w fifo is full */
289 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 302 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
290 return -EAGAIN; 303 return -EAGAIN;
291 } 304 }
292 305
293 head = priv->head[ch]; 306 head = priv->chan[ch].head;
294 request = &priv->fifo[ch][head]; 307 request = &priv->chan[ch].fifo[head];
295 308
296 /* map descriptor and save caller data */ 309 /* map descriptor and save caller data */
297 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 310 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
300 request->context = context; 313 request->context = context;
301 314
302 /* increment fifo head */ 315 /* increment fifo head */
303 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); 316 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304 317
305 smp_wmb(); 318 smp_wmb();
306 request->desc = desc; 319 request->desc = desc;
307 320
308 /* GO! */ 321 /* GO! */
309 wmb(); 322 wmb();
310 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 323 out_be32(priv->reg + TALITOS_FF(ch),
324 cpu_to_be32(upper_32_bits(request->dma_desc)));
325 out_be32(priv->reg + TALITOS_FF_LO(ch),
326 cpu_to_be32(lower_32_bits(request->dma_desc)));
311 327
312 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 328 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
313 329
314 return -EINPROGRESS; 330 return -EINPROGRESS;
315} 331}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
324 unsigned long flags; 340 unsigned long flags;
325 int tail, status; 341 int tail, status;
326 342
327 spin_lock_irqsave(&priv->tail_lock[ch], flags); 343 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
328 344
329 tail = priv->tail[ch]; 345 tail = priv->chan[ch].tail;
330 while (priv->fifo[ch][tail].desc) { 346 while (priv->chan[ch].fifo[tail].desc) {
331 request = &priv->fifo[ch][tail]; 347 request = &priv->chan[ch].fifo[tail];
332 348
333 /* descriptors with their done bits set don't get the error */ 349 /* descriptors with their done bits set don't get the error */
334 rmb(); 350 rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
354 request->desc = NULL; 370 request->desc = NULL;
355 371
356 /* increment fifo tail */ 372 /* increment fifo tail */
357 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 373 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
358 374
359 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 375 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
360 376
361 atomic_dec(&priv->submit_count[ch]); 377 atomic_dec(&priv->chan[ch].submit_count);
362 378
363 saved_req.callback(dev, saved_req.desc, saved_req.context, 379 saved_req.callback(dev, saved_req.desc, saved_req.context,
364 status); 380 status);
365 /* channel may resume processing in single desc error case */ 381 /* channel may resume processing in single desc error case */
366 if (error && !reset_ch && status == error) 382 if (error && !reset_ch && status == error)
367 return; 383 return;
368 spin_lock_irqsave(&priv->tail_lock[ch], flags); 384 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
369 tail = priv->tail[ch]; 385 tail = priv->chan[ch].tail;
370 } 386 }
371 387
372 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 388 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
373} 389}
374 390
375/* 391/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
397static struct talitos_desc *current_desc(struct device *dev, int ch) 413static struct talitos_desc *current_desc(struct device *dev, int ch)
398{ 414{
399 struct talitos_private *priv = dev_get_drvdata(dev); 415 struct talitos_private *priv = dev_get_drvdata(dev);
400 int tail = priv->tail[ch]; 416 int tail = priv->chan[ch].tail;
401 dma_addr_t cur_desc; 417 dma_addr_t cur_desc;
402 418
403 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); 419 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
404 420
405 while (priv->fifo[ch][tail].dma_desc != cur_desc) { 421 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
406 tail = (tail + 1) & (priv->fifo_len - 1); 422 tail = (tail + 1) & (priv->fifo_len - 1);
407 if (tail == priv->tail[ch]) { 423 if (tail == priv->chan[ch].tail) {
408 dev_err(dev, "couldn't locate current descriptor\n"); 424 dev_err(dev, "couldn't locate current descriptor\n");
409 return NULL; 425 return NULL;
410 } 426 }
411 } 427 }
412 428
413 return priv->fifo[ch][tail].desc; 429 return priv->chan[ch].fifo[tail].desc;
414} 430}
415 431
416/* 432/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
929 int n_sg = sg_count; 945 int n_sg = sg_count;
930 946
931 while (n_sg--) { 947 while (n_sg--) {
932 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 948 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
933 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 949 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
934 link_tbl_ptr->j_extent = 0; 950 link_tbl_ptr->j_extent = 0;
935 link_tbl_ptr++; 951 link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
970 struct talitos_desc *desc = &edesc->desc; 986 struct talitos_desc *desc = &edesc->desc;
971 unsigned int cryptlen = areq->cryptlen; 987 unsigned int cryptlen = areq->cryptlen;
972 unsigned int authsize = ctx->authsize; 988 unsigned int authsize = ctx->authsize;
973 unsigned int ivsize; 989 unsigned int ivsize = crypto_aead_ivsize(aead);
974 int sg_count, ret; 990 int sg_count, ret;
975 int sg_link_tbl_len; 991 int sg_link_tbl_len;
976 992
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
978 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 994 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
979 0, DMA_TO_DEVICE); 995 0, DMA_TO_DEVICE);
980 /* hmac data */ 996 /* hmac data */
981 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - 997 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
982 sg_virt(areq->assoc), sg_virt(areq->assoc), 0, 998 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
983 DMA_TO_DEVICE);
984 /* cipher iv */ 999 /* cipher iv */
985 ivsize = crypto_aead_ivsize(aead);
986 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, 1000 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
987 DMA_TO_DEVICE); 1001 DMA_TO_DEVICE);
988 1002
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1006 edesc->src_is_chained); 1020 edesc->src_is_chained);
1007 1021
1008 if (sg_count == 1) { 1022 if (sg_count == 1) {
1009 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1023 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1010 } else { 1024 } else {
1011 sg_link_tbl_len = cryptlen; 1025 sg_link_tbl_len = cryptlen;
1012 1026
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1017 &edesc->link_tbl[0]); 1031 &edesc->link_tbl[0]);
1018 if (sg_count > 1) { 1032 if (sg_count > 1) {
1019 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1033 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1020 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1034 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1021 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1035 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1022 edesc->dma_len, 1036 edesc->dma_len,
1023 DMA_BIDIRECTIONAL); 1037 DMA_BIDIRECTIONAL);
1024 } else { 1038 } else {
1025 /* Only one segment now, so no link tbl needed */ 1039 /* Only one segment now, so no link tbl needed */
1026 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> 1040 to_talitos_ptr(&desc->ptr[4],
1027 src)); 1041 sg_dma_address(areq->src));
1028 } 1042 }
1029 } 1043 }
1030 1044
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1039 edesc->dst_is_chained); 1053 edesc->dst_is_chained);
1040 1054
1041 if (sg_count == 1) { 1055 if (sg_count == 1) {
1042 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1056 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1043 } else { 1057 } else {
1044 struct talitos_ptr *link_tbl_ptr = 1058 struct talitos_ptr *link_tbl_ptr =
1045 &edesc->link_tbl[edesc->src_nents + 1]; 1059 &edesc->link_tbl[edesc->src_nents + 1];
1046 1060
1047 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 1061 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1048 edesc->dma_link_tbl + 1062 (edesc->src_nents + 1) *
1049 edesc->src_nents + 1); 1063 sizeof(struct talitos_ptr));
1050 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1064 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1051 link_tbl_ptr); 1065 link_tbl_ptr);
1052 1066
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1059 link_tbl_ptr->len = cpu_to_be16(authsize); 1073 link_tbl_ptr->len = cpu_to_be16(authsize);
1060 1074
1061 /* icv data follows link tables */ 1075 /* icv data follows link tables */
1062 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 1076 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1063 edesc->dma_link_tbl + 1077 (edesc->src_nents + edesc->dst_nents + 2) *
1064 edesc->src_nents + 1078 sizeof(struct talitos_ptr));
1065 edesc->dst_nents + 2);
1066
1067 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1079 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1068 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1080 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1069 edesc->dma_len, DMA_BIDIRECTIONAL); 1081 edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1338 1350
1339 /* first DWORD empty */ 1351 /* first DWORD empty */
1340 desc->ptr[0].len = 0; 1352 desc->ptr[0].len = 0;
1341 desc->ptr[0].ptr = 0; 1353 to_talitos_ptr(&desc->ptr[0], 0);
1342 desc->ptr[0].j_extent = 0; 1354 desc->ptr[0].j_extent = 0;
1343 1355
1344 /* cipher iv */ 1356 /* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1362 edesc->src_is_chained); 1374 edesc->src_is_chained);
1363 1375
1364 if (sg_count == 1) { 1376 if (sg_count == 1) {
1365 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1377 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1366 } else { 1378 } else {
1367 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 1379 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1368 &edesc->link_tbl[0]); 1380 &edesc->link_tbl[0]);
1369 if (sg_count > 1) { 1381 if (sg_count > 1) {
1382 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1370 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; 1383 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1371 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1372 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1384 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1373 edesc->dma_len, 1385 edesc->dma_len,
1374 DMA_BIDIRECTIONAL); 1386 DMA_BIDIRECTIONAL);
1375 } else { 1387 } else {
1376 /* Only one segment now, so no link tbl needed */ 1388 /* Only one segment now, so no link tbl needed */
1377 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> 1389 to_talitos_ptr(&desc->ptr[3],
1378 src)); 1390 sg_dma_address(areq->src));
1379 } 1391 }
1380 } 1392 }
1381 1393
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1390 edesc->dst_is_chained); 1402 edesc->dst_is_chained);
1391 1403
1392 if (sg_count == 1) { 1404 if (sg_count == 1) {
1393 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1405 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1394 } else { 1406 } else {
1395 struct talitos_ptr *link_tbl_ptr = 1407 struct talitos_ptr *link_tbl_ptr =
1396 &edesc->link_tbl[edesc->src_nents + 1]; 1408 &edesc->link_tbl[edesc->src_nents + 1];
1397 1409
1410 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1411 (edesc->src_nents + 1) *
1412 sizeof(struct talitos_ptr));
1398 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1413 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1399 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1400 edesc->dma_link_tbl +
1401 edesc->src_nents + 1);
1402 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1414 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1403 link_tbl_ptr); 1415 link_tbl_ptr);
1404 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1416 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1411 1423
1412 /* last DWORD empty */ 1424 /* last DWORD empty */
1413 desc->ptr[6].len = 0; 1425 desc->ptr[6].len = 0;
1414 desc->ptr[6].ptr = 0; 1426 to_talitos_ptr(&desc->ptr[6], 0);
1415 desc->ptr[6].j_extent = 0; 1427 desc->ptr[6].j_extent = 0;
1416 1428
1417 ret = talitos_submit(dev, desc, callback, areq); 1429 ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
1742 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1754 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1743 talitos_unregister_rng(dev); 1755 talitos_unregister_rng(dev);
1744 1756
1745 kfree(priv->submit_count); 1757 for (i = 0; i < priv->num_channels; i++)
1746 kfree(priv->tail); 1758 if (priv->chan[i].fifo)
1747 kfree(priv->head); 1759 kfree(priv->chan[i].fifo);
1748
1749 if (priv->fifo)
1750 for (i = 0; i < priv->num_channels; i++)
1751 kfree(priv->fifo[i]);
1752 1760
1753 kfree(priv->fifo); 1761 kfree(priv->chan);
1754 kfree(priv->head_lock);
1755 kfree(priv->tail_lock);
1756 1762
1757 if (priv->irq != NO_IRQ) { 1763 if (priv->irq != NO_IRQ) {
1758 free_irq(priv->irq, dev); 1764 free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
1872 if (of_device_is_compatible(np, "fsl,sec2.1")) 1878 if (of_device_is_compatible(np, "fsl,sec2.1"))
1873 priv->features |= TALITOS_FTR_HW_AUTH_CHECK; 1879 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1874 1880
1875 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1881 priv->chan = kzalloc(sizeof(struct talitos_channel) *
1876 GFP_KERNEL); 1882 priv->num_channels, GFP_KERNEL);
1877 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1883 if (!priv->chan) {
1878 GFP_KERNEL); 1884 dev_err(dev, "failed to allocate channel management space\n");
1879 if (!priv->head_lock || !priv->tail_lock) {
1880 dev_err(dev, "failed to allocate fifo locks\n");
1881 err = -ENOMEM; 1885 err = -ENOMEM;
1882 goto err_out; 1886 goto err_out;
1883 } 1887 }
1884 1888
1885 for (i = 0; i < priv->num_channels; i++) { 1889 for (i = 0; i < priv->num_channels; i++) {
1886 spin_lock_init(&priv->head_lock[i]); 1890 spin_lock_init(&priv->chan[i].head_lock);
1887 spin_lock_init(&priv->tail_lock[i]); 1891 spin_lock_init(&priv->chan[i].tail_lock);
1888 }
1889
1890 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1891 priv->num_channels, GFP_KERNEL);
1892 if (!priv->fifo) {
1893 dev_err(dev, "failed to allocate request fifo\n");
1894 err = -ENOMEM;
1895 goto err_out;
1896 } 1892 }
1897 1893
1898 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 1894 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1899 1895
1900 for (i = 0; i < priv->num_channels; i++) { 1896 for (i = 0; i < priv->num_channels; i++) {
1901 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * 1897 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
1902 priv->fifo_len, GFP_KERNEL); 1898 priv->fifo_len, GFP_KERNEL);
1903 if (!priv->fifo[i]) { 1899 if (!priv->chan[i].fifo) {
1904 dev_err(dev, "failed to allocate request fifo %d\n", i); 1900 dev_err(dev, "failed to allocate request fifo %d\n", i);
1905 err = -ENOMEM; 1901 err = -ENOMEM;
1906 goto err_out; 1902 goto err_out;
1907 } 1903 }
1908 } 1904 }
1909 1905
1910 priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1911 GFP_KERNEL);
1912 if (!priv->submit_count) {
1913 dev_err(dev, "failed to allocate fifo submit count space\n");
1914 err = -ENOMEM;
1915 goto err_out;
1916 }
1917 for (i = 0; i < priv->num_channels; i++) 1906 for (i = 0; i < priv->num_channels; i++)
1918 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); 1907 atomic_set(&priv->chan[i].submit_count,
1908 -(priv->chfifo_len - 1));
1919 1909
1920 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1910 dma_set_mask(dev, DMA_BIT_MASK(36));
1921 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1922 if (!priv->head || !priv->tail) {
1923 dev_err(dev, "failed to allocate request index space\n");
1924 err = -ENOMEM;
1925 goto err_out;
1926 }
1927 1911
1928 /* reset and initialize the h/w */ 1912 /* reset and initialize the h/w */
1929 err = init_device(dev); 1913 err = init_device(dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 575981f0cfda..ff5a1450e145 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -57,6 +57,7 @@
57#define TALITOS_CCCR_RESET 0x1 /* channel reset */ 57#define TALITOS_CCCR_RESET 0x1 /* channel reset */
58#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) 58#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
59#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ 59#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
60#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
60#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ 61#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
61#define TALITOS_CCCR_LO_NT 0x4 /* notification type */ 62#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
62#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ 63#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 110e731f5574..1c0b504a42f3 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
196 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 196 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
197 irm_id, generation, SCODE_100, 197 irm_id, generation, SCODE_100,
198 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, 198 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
199 data, sizeof(data))) { 199 data, 8)) {
200 case RCODE_GENERATION: 200 case RCODE_GENERATION:
201 /* A generation change frees all bandwidth. */ 201 /* A generation change frees all bandwidth. */
202 return allocate ? -EAGAIN : bandwidth; 202 return allocate ? -EAGAIN : bandwidth;
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
233 data[1] = old ^ c; 233 data[1] = old ^ c;
234 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 234 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
235 irm_id, generation, SCODE_100, 235 irm_id, generation, SCODE_100,
236 offset, data, sizeof(data))) { 236 offset, data, 8)) {
237 case RCODE_GENERATION: 237 case RCODE_GENERATION:
238 /* A generation change frees all channels. */ 238 /* A generation change frees all channels. */
239 return allocate ? -EAGAIN : i; 239 return allocate ? -EAGAIN : i;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ecddd11b797a..76b321bb73f9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/pci_ids.h>
37#include <linux/spinlock.h> 38#include <linux/spinlock.h>
38#include <linux/string.h> 39#include <linux/string.h>
39 40
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev)
2372#define ohci_pmac_off(dev) 2373#define ohci_pmac_off(dev)
2373#endif /* CONFIG_PPC_PMAC */ 2374#endif /* CONFIG_PPC_PMAC */
2374 2375
2376#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2377#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2378
2375static int __devinit pci_probe(struct pci_dev *dev, 2379static int __devinit pci_probe(struct pci_dev *dev,
2376 const struct pci_device_id *ent) 2380 const struct pci_device_id *ent)
2377{ 2381{
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev,
2422 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2426 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2423 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2427 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2424 2428
2429 /* dual-buffer mode is broken if more than one IR context is active */
2430 if (dev->vendor == PCI_VENDOR_ID_AGERE &&
2431 dev->device == PCI_DEVICE_ID_AGERE_FW643)
2432 ohci->use_dualbuffer = false;
2433
2434 /* dual-buffer mode is broken */
2435 if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2436 dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2437 ohci->use_dualbuffer = false;
2438
2425/* x86-32 currently doesn't use highmem for dma_alloc_coherent */ 2439/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2426#if !defined(CONFIG_X86_32) 2440#if !defined(CONFIG_X86_32)
2427 /* dual-buffer mode is broken with descriptor addresses above 2G */ 2441 /* dual-buffer mode is broken with descriptor addresses above 2G */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 8d51568ee143..e5df822a8130 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
456 } 456 }
457 spin_unlock_irqrestore(&card->lock, flags); 457 spin_unlock_irqrestore(&card->lock, flags);
458 458
459 if (&orb->link != &lu->orb_list) 459 if (&orb->link != &lu->orb_list) {
460 orb->callback(orb, &status); 460 orb->callback(orb, &status);
461 else 461 kref_put(&orb->kref, free_orb);
462 } else {
462 fw_error("status write for unknown orb\n"); 463 fw_error("status write for unknown orb\n");
463 464 }
464 kref_put(&orb->kref, free_orb);
465 465
466 fw_send_response(card, request, RCODE_COMPLETE); 466 fw_send_response(card, request, RCODE_COMPLETE);
467} 467}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c07a755b3a3..80e5ba490dc2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2267,8 +2267,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2267 fence_list) { 2267 fence_list) {
2268 old_obj = old_obj_priv->obj; 2268 old_obj = old_obj_priv->obj;
2269 2269
2270 reg = &dev_priv->fence_regs[old_obj_priv->fence_reg];
2271
2272 if (old_obj_priv->pin_count) 2270 if (old_obj_priv->pin_count)
2273 continue; 2271 continue;
2274 2272
@@ -2290,8 +2288,11 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2290 */ 2288 */
2291 i915_gem_object_flush_gpu_write_domain(old_obj); 2289 i915_gem_object_flush_gpu_write_domain(old_obj);
2292 ret = i915_gem_object_wait_rendering(old_obj); 2290 ret = i915_gem_object_wait_rendering(old_obj);
2293 if (ret != 0) 2291 if (ret != 0) {
2292 drm_gem_object_unreference(old_obj);
2294 return ret; 2293 return ret;
2294 }
2295
2295 break; 2296 break;
2296 } 2297 }
2297 2298
@@ -2299,10 +2300,14 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2299 * Zap this virtual mapping so we can set up a fence again 2300 * Zap this virtual mapping so we can set up a fence again
2300 * for this object next time we need it. 2301 * for this object next time we need it.
2301 */ 2302 */
2302 i915_gem_release_mmap(reg->obj); 2303 i915_gem_release_mmap(old_obj);
2304
2303 i = old_obj_priv->fence_reg; 2305 i = old_obj_priv->fence_reg;
2306 reg = &dev_priv->fence_regs[i];
2307
2304 old_obj_priv->fence_reg = I915_FENCE_REG_NONE; 2308 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2305 list_del_init(&old_obj_priv->fence_list); 2309 list_del_init(&old_obj_priv->fence_list);
2310
2306 drm_gem_object_unreference(old_obj); 2311 drm_gem_object_unreference(old_obj);
2307 } 2312 }
2308 2313
@@ -4227,15 +4232,11 @@ int
4227i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4232i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4228 struct drm_file *file_priv) 4233 struct drm_file *file_priv)
4229{ 4234{
4230 int ret;
4231
4232 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4235 if (drm_core_check_feature(dev, DRIVER_MODESET))
4233 return 0; 4236 return 0;
4234 4237
4235 ret = i915_gem_idle(dev);
4236 drm_irq_uninstall(dev); 4238 drm_irq_uninstall(dev);
4237 4239 return i915_gem_idle(dev);
4238 return ret;
4239} 4240}
4240 4241
4241void 4242void
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3fadb5358858..748ed50c55ca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2005,7 +2005,21 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2005 return; 2005 return;
2006} 2006}
2007 2007
2008const static int latency_ns = 3000; /* default for non-igd platforms */ 2008/*
2009 * Latency for FIFO fetches is dependent on several factors:
2010 * - memory configuration (speed, channels)
2011 * - chipset
2012 * - current MCH state
2013 * It can be fairly high in some situations, so here we assume a fairly
2014 * pessimal value. It's a tradeoff between extra memory fetches (if we
2015 * set this value too high, the FIFO will fetch frequently to stay full)
2016 * and power consumption (set it too low to save power and we might see
2017 * FIFO underruns and display "flicker").
2018 *
2019 * A value of 5us seems to be a good balance; safe for very low end
2020 * platforms but not overly aggressive on lower latency configs.
2021 */
2022const static int latency_ns = 5000;
2009 2023
2010static int intel_get_fifo_size(struct drm_device *dev, int plane) 2024static int intel_get_fifo_size(struct drm_device *dev, int plane)
2011{ 2025{
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f2afc4af4bc9..2b914d732076 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1263,7 +1263,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1263 1263
1264 if (IS_eDP(intel_output)) { 1264 if (IS_eDP(intel_output)) {
1265 intel_output->crtc_mask = (1 << 1); 1265 intel_output->crtc_mask = (1 << 1);
1266 intel_output->clone_mask = (1 << INTEL_OUTPUT_EDP); 1266 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1267 } else 1267 } else
1268 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1268 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1269 connector->interlace_allowed = true; 1269 connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25aa6facc12d..26a6227c15fe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -74,6 +74,7 @@
74#define INTEL_LVDS_CLONE_BIT 14 74#define INTEL_LVDS_CLONE_BIT 14
75#define INTEL_DVO_TMDS_CLONE_BIT 15 75#define INTEL_DVO_TMDS_CLONE_BIT 15
76#define INTEL_DVO_LVDS_CLONE_BIT 16 76#define INTEL_DVO_LVDS_CLONE_BIT 16
77#define INTEL_EDP_CLONE_BIT 17
77 78
78#define INTEL_DVO_CHIP_NONE 0 79#define INTEL_DVO_CHIP_NONE 0
79#define INTEL_DVO_CHIP_LVDS 1 80#define INTEL_DVO_CHIP_LVDS 1
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2fbe13a0de81..5b1c9e9fdba0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1730,6 +1730,7 @@ intel_tv_init(struct drm_device *dev)
1730 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1730 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1731 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1731 tv_priv = (struct intel_tv_priv *)(intel_output + 1);
1732 intel_output->type = INTEL_OUTPUT_TVOUT; 1732 intel_output->type = INTEL_OUTPUT_TVOUT;
1733 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1733 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1734 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1734 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1735 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1735 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1736 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 053f4ec397f7..051bca6e3a4f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -995,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = {
995 0x00000000, 0x00000000, 0x00000000, 0x00000000, 995 0x00000000, 0x00000000, 0x00000000, 0x00000000,
996 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, 996 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
997 0x00000000, 0x00000000, 0x00000000, 0x00000000, 997 0x00000000, 0x00000000, 0x00000000, 0x00000000,
998 0x0003FC01, 0xFFFFFFF8, 0xFE800B19, 998 0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
999}; 999};
1000 1000
1001static int r300_packet0_check(struct radeon_cs_parser *p, 1001static int r300_packet0_check(struct radeon_cs_parser *p,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 7ca6c13569b5..93d8f8889302 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -266,6 +266,7 @@ static struct radeon_asic rs400_asic = {
266/* 266/*
267 * rs600. 267 * rs600.
268 */ 268 */
269int rs600_init(struct radeon_device *dev);
269void rs600_errata(struct radeon_device *rdev); 270void rs600_errata(struct radeon_device *rdev);
270void rs600_vram_info(struct radeon_device *rdev); 271void rs600_vram_info(struct radeon_device *rdev);
271int rs600_mc_init(struct radeon_device *rdev); 272int rs600_mc_init(struct radeon_device *rdev);
@@ -281,7 +282,7 @@ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
281void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 282void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
282void rs600_bandwidth_update(struct radeon_device *rdev); 283void rs600_bandwidth_update(struct radeon_device *rdev);
283static struct radeon_asic rs600_asic = { 284static struct radeon_asic rs600_asic = {
284 .init = &r300_init, 285 .init = &rs600_init,
285 .errata = &rs600_errata, 286 .errata = &rs600_errata,
286 .vram_info = &rs600_vram_info, 287 .vram_info = &rs600_vram_info,
287 .gpu_reset = &r300_gpu_reset, 288 .gpu_reset = &r300_gpu_reset,
@@ -316,7 +317,6 @@ static struct radeon_asic rs600_asic = {
316/* 317/*
317 * rs690,rs740 318 * rs690,rs740
318 */ 319 */
319int rs690_init(struct radeon_device *rdev);
320void rs690_errata(struct radeon_device *rdev); 320void rs690_errata(struct radeon_device *rdev);
321void rs690_vram_info(struct radeon_device *rdev); 321void rs690_vram_info(struct radeon_device *rdev);
322int rs690_mc_init(struct radeon_device *rdev); 322int rs690_mc_init(struct radeon_device *rdev);
@@ -325,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
325void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 325void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
326void rs690_bandwidth_update(struct radeon_device *rdev); 326void rs690_bandwidth_update(struct radeon_device *rdev);
327static struct radeon_asic rs690_asic = { 327static struct radeon_asic rs690_asic = {
328 .init = &rs690_init, 328 .init = &rs600_init,
329 .errata = &rs690_errata, 329 .errata = &rs690_errata,
330 .vram_info = &rs690_vram_info, 330 .vram_info = &rs690_vram_info,
331 .gpu_reset = &r300_gpu_reset, 331 .gpu_reset = &r300_gpu_reset,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 7e8ce983a908..02fd11aad6a2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -409,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
409 ((reg) & RS600_MC_ADDR_MASK)); 409 ((reg) & RS600_MC_ADDR_MASK));
410 WREG32(RS600_MC_DATA, v); 410 WREG32(RS600_MC_DATA, v);
411} 411}
412
413static const unsigned rs600_reg_safe_bm[219] = {
414 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
415 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
416 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
417 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
418 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
419 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
420 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
421 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
422 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
423 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
424 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
425 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
426 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
427 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
428 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
429 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
430 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
431 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
432 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
433 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
434 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
435 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
436 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
437 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
438 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
439 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
440 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
441 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
442 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
443 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
444 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
445 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
446 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
447 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
448 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
449 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
454 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
455 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
456 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
457 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
458 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
459 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
460 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
461 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
462 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
463 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
464 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
465 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
466 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
467 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
468 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
469};
470
471int rs600_init(struct radeon_device *rdev)
472{
473 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
474 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
475 return 0;
476}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bc6b7c5339bc..879882533e45 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -653,67 +653,3 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
653 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); 653 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
654} 654}
655 655
656static const unsigned rs690_reg_safe_bm[219] = {
657 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
658 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
659 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
660 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
661 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
662 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
663 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
664 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
665 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
666 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
667 0x17FF1FFF,0xFFFFFFFC,0xFFFFFFFF,0xFF30FFBF,
668 0xFFFFFFF8,0xC3E6FFFF,0xFFFFF6DF,0xFFFFFFFF,
669 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
670 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
671 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFF03F,
672 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
673 0xFFFFFFFF,0xFFFFEFCE,0xF00EBFFF,0x007C0000,
674 0xF0000078,0xFF000009,0xFFFFFFFF,0xFFFFFFFF,
675 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
676 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
677 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
678 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
679 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
680 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
681 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
682 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
683 0xFFFFF7FF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
684 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
685 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
686 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
687 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
688 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
689 0xFFFFFC78,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,
690 0x38FF8F50,0xFFF88082,0xF000000C,0xFAE009FF,
691 0x0000FFFF,0xFFFFFFFF,0xFFFFFFFF,0x00000000,
692 0x00000000,0x0000C100,0x00000000,0x00000000,
693 0x00000000,0x00000000,0x00000000,0x00000000,
694 0x00000000,0xFFFF0000,0xFFFFFFFF,0xFF80FFFF,
695 0x00000000,0x00000000,0x00000000,0x00000000,
696 0x0003FC01,0xFFFFFFF8,0xFE800B19,0xFFFFFFFF,
697 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
698 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
699 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
700 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
701 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
702 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
703 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
704 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
705 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
706 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
707 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
708 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
709 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
710 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
711 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
712};
713
714int rs690_init(struct radeon_device *rdev)
715{
716 rdev->config.r300.reg_safe_bm = rs690_reg_safe_bm;
717 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs690_reg_safe_bm);
718 return 0;
719}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 31a7f668ae5a..0566fb67e460 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -508,7 +508,7 @@ static const unsigned r500_reg_safe_bm[219] = {
508 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 508 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
509 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, 509 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
510 0x00000000, 0x00000000, 0x00000000, 0x00000000, 510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, 511 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
512 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 512 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 514 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 527908ff298c..063b933d864a 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = {
408 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), 408 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
409 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), 409 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
410 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), 410 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
411 PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
411 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), 412 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
412 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), 413 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
413 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), 414 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 8f9509e1ebf7..55d093a36ae4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
362 * In either case, must tell the provider to reject. 362 * In either case, must tell the provider to reject.
363 */ 363 */
364 cm_id_priv->state = IW_CM_STATE_DESTROYING; 364 cm_id_priv->state = IW_CM_STATE_DESTROYING;
365 cm_id->device->iwcm->reject(cm_id, NULL, 0);
365 break; 366 break;
366 case IW_CM_STATE_CONN_SENT: 367 case IW_CM_STATE_CONN_SENT:
367 case IW_CM_STATE_DESTROYING: 368 case IW_CM_STATE_DESTROYING:
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index de922a04ca2d..7522008fda86 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2,6 +2,7 @@
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
45MODULE_AUTHOR("Hal Rosenstock"); 46MODULE_AUTHOR("Hal Rosenstock");
46MODULE_AUTHOR("Sean Hefty"); 47MODULE_AUTHOR("Sean Hefty");
47 48
49int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
50int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
51
52module_param_named(send_queue_size, mad_sendq_size, int, 0444);
53MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
54module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
55MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
56
48static struct kmem_cache *ib_mad_cache; 57static struct kmem_cache *ib_mad_cache;
49 58
50static struct list_head ib_mad_port_list; 59static struct list_head ib_mad_port_list;
51static u32 ib_mad_client_id = 0; 60static u32 ib_mad_client_id = 0;
52 61
53/* Port list lock */ 62/* Port list lock */
54static spinlock_t ib_mad_port_list_lock; 63static DEFINE_SPINLOCK(ib_mad_port_list_lock);
55
56 64
57/* Forward declarations */ 65/* Forward declarations */
58static int method_in_use(struct ib_mad_mgmt_method_table **method, 66static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1974 unsigned long delay; 1982 unsigned long delay;
1975 1983
1976 if (list_empty(&mad_agent_priv->wait_list)) { 1984 if (list_empty(&mad_agent_priv->wait_list)) {
1977 cancel_delayed_work(&mad_agent_priv->timed_work); 1985 __cancel_delayed_work(&mad_agent_priv->timed_work);
1978 } else { 1986 } else {
1979 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 1987 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1980 struct ib_mad_send_wr_private, 1988 struct ib_mad_send_wr_private,
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1983 if (time_after(mad_agent_priv->timeout, 1991 if (time_after(mad_agent_priv->timeout,
1984 mad_send_wr->timeout)) { 1992 mad_send_wr->timeout)) {
1985 mad_agent_priv->timeout = mad_send_wr->timeout; 1993 mad_agent_priv->timeout = mad_send_wr->timeout;
1986 cancel_delayed_work(&mad_agent_priv->timed_work); 1994 __cancel_delayed_work(&mad_agent_priv->timed_work);
1987 delay = mad_send_wr->timeout - jiffies; 1995 delay = mad_send_wr->timeout - jiffies;
1988 if ((long)delay <= 0) 1996 if ((long)delay <= 0)
1989 delay = 1; 1997 delay = 1;
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2023 2031
2024 /* Reschedule a work item if we have a shorter timeout */ 2032 /* Reschedule a work item if we have a shorter timeout */
2025 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2033 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2026 cancel_delayed_work(&mad_agent_priv->timed_work); 2034 __cancel_delayed_work(&mad_agent_priv->timed_work);
2027 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2035 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2028 &mad_agent_priv->timed_work, delay); 2036 &mad_agent_priv->timed_work, delay);
2029 } 2037 }
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2736 qp_init_attr.send_cq = qp_info->port_priv->cq; 2744 qp_init_attr.send_cq = qp_info->port_priv->cq;
2737 qp_init_attr.recv_cq = qp_info->port_priv->cq; 2745 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2738 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 2746 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2739 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE; 2747 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2740 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE; 2748 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2741 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 2749 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2742 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 2750 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2743 qp_init_attr.qp_type = qp_type; 2751 qp_init_attr.qp_type = qp_type;
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2752 goto error; 2760 goto error;
2753 } 2761 }
2754 /* Use minimum queue sizes unless the CQ is resized */ 2762 /* Use minimum queue sizes unless the CQ is resized */
2755 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE; 2763 qp_info->send_queue.max_active = mad_sendq_size;
2756 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE; 2764 qp_info->recv_queue.max_active = mad_recvq_size;
2757 return 0; 2765 return 0;
2758 2766
2759error: 2767error:
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
2792 init_mad_qp(port_priv, &port_priv->qp_info[0]); 2800 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2793 init_mad_qp(port_priv, &port_priv->qp_info[1]); 2801 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2794 2802
2795 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2803 cq_size = (mad_sendq_size + mad_recvq_size) * 2;
2796 port_priv->cq = ib_create_cq(port_priv->device, 2804 port_priv->cq = ib_create_cq(port_priv->device,
2797 ib_mad_thread_completion_handler, 2805 ib_mad_thread_completion_handler,
2798 NULL, port_priv, cq_size, 0); 2806 NULL, port_priv, cq_size, 0);
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
2984{ 2992{
2985 int ret; 2993 int ret;
2986 2994
2987 spin_lock_init(&ib_mad_port_list_lock); 2995 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
2996 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
2997
2998 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
2999 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
2988 3000
2989 ib_mad_cache = kmem_cache_create("ib_mad", 3001 ib_mad_cache = kmem_cache_create("ib_mad",
2990 sizeof(struct ib_mad_private), 3002 sizeof(struct ib_mad_private),
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
3021 3033
3022module_init(ib_mad_init_module); 3034module_init(ib_mad_init_module);
3023module_exit(ib_mad_cleanup_module); 3035module_exit(ib_mad_cleanup_module);
3024
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 05ce331733b0..9430ab4969c5 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -2,6 +2,7 @@
2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,8 @@
49/* QP and CQ parameters */ 50/* QP and CQ parameters */
50#define IB_MAD_QP_SEND_SIZE 128 51#define IB_MAD_QP_SEND_SIZE 128
51#define IB_MAD_QP_RECV_SIZE 512 52#define IB_MAD_QP_RECV_SIZE 512
53#define IB_MAD_QP_MIN_SIZE 64
54#define IB_MAD_QP_MAX_SIZE 8192
52#define IB_MAD_SEND_REQ_MAX_SG 2 55#define IB_MAD_SEND_REQ_MAX_SG 2
53#define IB_MAD_RECV_REQ_MAX_SG 1 56#define IB_MAD_RECV_REQ_MAX_SG 1
54 57
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 107f170c57cd..8d82ba171353 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,6 +106,8 @@ struct mcast_group {
106 struct ib_sa_query *query; 106 struct ib_sa_query *query;
107 int query_id; 107 int query_id;
108 u16 pkey_index; 108 u16 pkey_index;
109 u8 leave_state;
110 int retries;
109}; 111};
110 112
111struct mcast_member { 113struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
350 352
351 rec = group->rec; 353 rec = group->rec;
352 rec.join_state = leave_state; 354 rec.join_state = leave_state;
355 group->leave_state = leave_state;
353 356
354 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 357 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
355 port->port_num, IB_SA_METHOD_DELETE, &rec, 358 port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
542{ 545{
543 struct mcast_group *group = context; 546 struct mcast_group *group = context;
544 547
545 mcast_work_handler(&group->work); 548 if (status && group->retries > 0 &&
549 !send_leave(group, group->leave_state))
550 group->retries--;
551 else
552 mcast_work_handler(&group->work);
546} 553}
547 554
548static struct mcast_group *acquire_group(struct mcast_port *port, 555static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
565 if (!group) 572 if (!group)
566 return NULL; 573 return NULL;
567 574
575 group->retries = 3;
568 group->port = port; 576 group->port = port;
569 group->rec.mgid = *mgid; 577 group->rec.mgid = *mgid;
570 group->pkey_index = MCAST_INVALID_PKEY_INDEX; 578 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1865049e80f7..82543716d59e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
109 .remove = ib_sa_remove_one 109 .remove = ib_sa_remove_one
110}; 110};
111 111
112static spinlock_t idr_lock; 112static DEFINE_SPINLOCK(idr_lock);
113static DEFINE_IDR(query_idr); 113static DEFINE_IDR(query_idr);
114 114
115static spinlock_t tid_lock; 115static DEFINE_SPINLOCK(tid_lock);
116static u32 tid; 116static u32 tid;
117 117
118#define PATH_REC_FIELD(field) \ 118#define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
1077{ 1077{
1078 int ret; 1078 int ret;
1079 1079
1080 spin_lock_init(&idr_lock);
1081 spin_lock_init(&tid_lock);
1082
1083 get_random_bytes(&tid, sizeof tid); 1080 get_random_bytes(&tid, sizeof tid);
1084 1081
1085 ret = ib_register_client(&sa_client); 1082 ret = ib_register_client(&sa_client);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 87236753bce9..5855e4405d9b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
52 hop_cnt = smp->hop_cnt; 52 hop_cnt = smp->hop_cnt;
53 53
54 /* See section 14.2.2.2, Vol 1 IB spec */ 54 /* See section 14.2.2.2, Vol 1 IB spec */
55 /* C14-6 -- valid hop_cnt values are from 0 to 63 */
56 if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
57 return IB_SMI_DISCARD;
58
55 if (!ib_get_smp_direction(smp)) { 59 if (!ib_get_smp_direction(smp)) {
56 /* C14-9:1 */ 60 /* C14-9:1 */
57 if (hop_cnt && hop_ptr == 0) { 61 if (hop_cnt && hop_ptr == 0) {
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
133 hop_cnt = smp->hop_cnt; 137 hop_cnt = smp->hop_cnt;
134 138
135 /* See section 14.2.2.2, Vol 1 IB spec */ 139 /* See section 14.2.2.2, Vol 1 IB spec */
140 /* C14-6 -- valid hop_cnt values are from 0 to 63 */
141 if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
142 return IB_SMI_DISCARD;
143
136 if (!ib_get_smp_direction(smp)) { 144 if (!ib_get_smp_direction(smp)) {
137 /* C14-9:1 -- sender should have incremented hop_ptr */ 145 /* C14-9:1 -- sender should have incremented hop_ptr */
138 if (hop_cnt && hop_ptr == 0) 146 if (hop_cnt && hop_ptr == 0)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index eb36a81dd09b..d3fff9e008a3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
73DEFINE_IDR(ib_uverbs_qp_idr); 73DEFINE_IDR(ib_uverbs_qp_idr);
74DEFINE_IDR(ib_uverbs_srq_idr); 74DEFINE_IDR(ib_uverbs_srq_idr);
75 75
76static spinlock_t map_lock; 76static DEFINE_SPINLOCK(map_lock);
77static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; 77static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
79 79
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
584 584
585 if (hdr.command < 0 || 585 if (hdr.command < 0 ||
586 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || 586 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
587 !uverbs_cmd_table[hdr.command] || 587 !uverbs_cmd_table[hdr.command])
588 !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
589 return -EINVAL; 588 return -EINVAL;
590 589
591 if (!file->ucontext && 590 if (!file->ucontext &&
592 hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) 591 hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
593 return -EINVAL; 592 return -EINVAL;
594 593
594 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
595 return -ENOSYS;
596
595 return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, 597 return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
596 hdr.in_words * 4, hdr.out_words * 4); 598 hdr.in_words * 4, hdr.out_words * 4);
597} 599}
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
836{ 838{
837 int ret; 839 int ret;
838 840
839 spin_lock_init(&map_lock);
840
841 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, 841 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
842 "infiniband_verbs"); 842 "infiniband_verbs");
843 if (ret) { 843 if (ret) {
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfbb6d2f762..8250740c94b0 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
86 86
87static void c2_print_macaddr(struct net_device *netdev) 87static void c2_print_macaddr(struct net_device *netdev)
88{ 88{
89 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " 89 pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
90 "IRQ %u\n", netdev->name,
91 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
92 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
93 netdev->irq);
94} 90}
95 91
96static void c2_set_rxbufsize(struct c2_port *c2_port) 92static void c2_set_rxbufsize(struct c2_port *c2_port)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index f1948fad85d7..ad723bd8bf49 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
780 /* Register pseudo network device */ 780 /* Register pseudo network device */
781 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 781 dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
782 if (!dev->pseudo_netdev) 782 if (!dev->pseudo_netdev)
783 goto out3; 783 goto out;
784 784
785 ret = register_netdev(dev->pseudo_netdev); 785 ret = register_netdev(dev->pseudo_netdev);
786 if (ret) 786 if (ret)
787 goto out2; 787 goto out_free_netdev;
788 788
789 pr_debug("%s:%u\n", __func__, __LINE__); 789 pr_debug("%s:%u\n", __func__, __LINE__);
790 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 790 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
851 dev->ibdev.post_recv = c2_post_receive; 851 dev->ibdev.post_recv = c2_post_receive;
852 852
853 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); 853 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
854 if (dev->ibdev.iwcm == NULL) {
855 ret = -ENOMEM;
856 goto out_unregister_netdev;
857 }
854 dev->ibdev.iwcm->add_ref = c2_add_ref; 858 dev->ibdev.iwcm->add_ref = c2_add_ref;
855 dev->ibdev.iwcm->rem_ref = c2_rem_ref; 859 dev->ibdev.iwcm->rem_ref = c2_rem_ref;
856 dev->ibdev.iwcm->get_qp = c2_get_qp; 860 dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
862 866
863 ret = ib_register_device(&dev->ibdev); 867 ret = ib_register_device(&dev->ibdev);
864 if (ret) 868 if (ret)
865 goto out1; 869 goto out_free_iwcm;
866 870
867 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { 871 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
868 ret = device_create_file(&dev->ibdev.dev, 872 ret = device_create_file(&dev->ibdev.dev,
869 c2_dev_attributes[i]); 873 c2_dev_attributes[i]);
870 if (ret) 874 if (ret)
871 goto out0; 875 goto out_unregister_ibdev;
872 } 876 }
873 goto out3; 877 goto out;
874 878
875out0: 879out_unregister_ibdev:
876 ib_unregister_device(&dev->ibdev); 880 ib_unregister_device(&dev->ibdev);
877out1: 881out_free_iwcm:
882 kfree(dev->ibdev.iwcm);
883out_unregister_netdev:
878 unregister_netdev(dev->pseudo_netdev); 884 unregister_netdev(dev->pseudo_netdev);
879out2: 885out_free_netdev:
880 free_netdev(dev->pseudo_netdev); 886 free_netdev(dev->pseudo_netdev);
881out3: 887out:
882 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); 888 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
883 return ret; 889 return ret;
884} 890}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 62f9cf2f94ec..72ed3396b721 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
852 wqe->qpcaps = attr->qpcaps; 852 wqe->qpcaps = attr->qpcaps;
853 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); 853 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
854 wqe->rqe_count = cpu_to_be16(attr->rqe_count); 854 wqe->rqe_count = cpu_to_be16(attr->rqe_count);
855 wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type)); 855 wqe->flags_rtr_type = cpu_to_be16(attr->flags |
856 V_RTR_TYPE(attr->rtr_type) |
857 V_CHAN(attr->chan));
856 wqe->ord = cpu_to_be32(attr->ord); 858 wqe->ord = cpu_to_be32(attr->ord);
857 wqe->ird = cpu_to_be32(attr->ird); 859 wqe->ird = cpu_to_be32(attr->ird);
858 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); 860 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
1032err2: 1034err2:
1033 cxio_hal_destroy_ctrl_qp(rdev_p); 1035 cxio_hal_destroy_ctrl_qp(rdev_p);
1034err1: 1036err1:
1037 rdev_p->t3cdev_p->ulp = NULL;
1035 list_del(&rdev_p->entry); 1038 list_del(&rdev_p->entry);
1036 return err; 1039 return err;
1037} 1040}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 32e3b1461d81..a197a5b7ac7f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
327#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE) 327#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE) 328#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
329 329
330#define S_CHAN 4
331#define M_CHAN 0x3
332#define V_CHAN(x) ((x) << S_CHAN)
333#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
334
330struct t3_rdma_init_attr { 335struct t3_rdma_init_attr {
331 u32 tid; 336 u32 tid;
332 u32 qpid; 337 u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
346 u16 flags; 351 u16 flags;
347 u16 rqe_count; 352 u16 rqe_count;
348 u32 irs; 353 u32 irs;
354 u32 chan;
349}; 355};
350 356
351struct t3_rdma_init_wr { 357struct t3_rdma_init_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 26fc0a4eaa74..b0ea0105ddf6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51 51
52static void open_rnic_dev(struct t3cdev *); 52static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 53static void close_rnic_dev(struct t3cdev *);
54static void iwch_err_handler(struct t3cdev *, u32, u32); 54static void iwch_event_handler(struct t3cdev *, u32, u32);
55 55
56struct cxgb3_client t3c_client = { 56struct cxgb3_client t3c_client = {
57 .name = "iw_cxgb3", 57 .name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
59 .remove = close_rnic_dev, 59 .remove = close_rnic_dev,
60 .handlers = t3c_handlers, 60 .handlers = t3c_handlers,
61 .redirect = iwch_ep_redirect, 61 .redirect = iwch_ep_redirect,
62 .err_handler = iwch_err_handler 62 .event_handler = iwch_event_handler
63}; 63};
64 64
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
105static void open_rnic_dev(struct t3cdev *tdev) 105static void open_rnic_dev(struct t3cdev *tdev)
106{ 106{
107 struct iwch_dev *rnicp; 107 struct iwch_dev *rnicp;
108 static int vers_printed;
109 108
110 PDBG("%s t3cdev %p\n", __func__, tdev); 109 PDBG("%s t3cdev %p\n", __func__, tdev);
111 if (!vers_printed++) 110 printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
112 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
113 DRV_VERSION); 111 DRV_VERSION);
114 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); 112 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
115 if (!rnicp) { 113 if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
162 mutex_unlock(&dev_mutex); 160 mutex_unlock(&dev_mutex);
163} 161}
164 162
165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) 163static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
166{ 164{
167 struct cxio_rdev *rdev = tdev->ulp; 165 struct cxio_rdev *rdev = tdev->ulp;
168 struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); 166 struct iwch_dev *rnicp;
169 struct ib_event event; 167 struct ib_event event;
168 u32 portnum = port_id + 1;
170 169
171 if (status == OFFLOAD_STATUS_DOWN) { 170 if (!rdev)
171 return;
172 rnicp = rdev_to_iwch_dev(rdev);
173 switch (evt) {
174 case OFFLOAD_STATUS_DOWN: {
172 rdev->flags = CXIO_ERROR_FATAL; 175 rdev->flags = CXIO_ERROR_FATAL;
173
174 event.device = &rnicp->ibdev;
175 event.event = IB_EVENT_DEVICE_FATAL; 176 event.event = IB_EVENT_DEVICE_FATAL;
176 event.element.port_num = 0; 177 break;
177 ib_dispatch_event(&event); 178 }
179 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR;
181 break;
182 }
183 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE;
185 break;
186 }
178 } 187 }
179 188
189 event.device = &rnicp->ibdev;
190 event.element.port_num = portnum;
191 ib_dispatch_event(&event);
192
180 return; 193 return;
181} 194}
182 195
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 52d7bb0c2a12..66b41351910a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
286 ep = container_of(container_of(kref, struct iwch_ep_common, kref), 286 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
287 struct iwch_ep, com); 287 struct iwch_ep, com);
288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
289 if (ep->com.flags & RELEASE_RESOURCES) { 289 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
291 dst_release(ep->dst); 291 dst_release(ep->dst);
292 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 292 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
297static void release_ep_resources(struct iwch_ep *ep) 297static void release_ep_resources(struct iwch_ep *ep)
298{ 298{
299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); 299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
300 ep->com.flags |= RELEASE_RESOURCES; 300 set_bit(RELEASE_RESOURCES, &ep->com.flags);
301 put_ep(&ep->com); 301 put_ep(&ep->com);
302} 302}
303 303
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
786 event.private_data_len = ep->plen; 786 event.private_data_len = ep->plen;
787 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 787 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
788 event.provider_data = ep; 788 event.provider_data = ep;
789 if (state_read(&ep->parent_ep->com) != DEAD) 789 if (state_read(&ep->parent_ep->com) != DEAD) {
790 get_ep(&ep->com);
790 ep->parent_ep->com.cm_id->event_handler( 791 ep->parent_ep->com.cm_id->event_handler(
791 ep->parent_ep->com.cm_id, 792 ep->parent_ep->com.cm_id,
792 &event); 793 &event);
794 }
793 put_ep(&ep->parent_ep->com); 795 put_ep(&ep->parent_ep->com);
794 ep->parent_ep = NULL; 796 ep->parent_ep = NULL;
795} 797}
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1156 * We get 2 abort replies from the HW. The first one must 1158 * We get 2 abort replies from the HW. The first one must
1157 * be ignored except for scribbling that we need one more. 1159 * be ignored except for scribbling that we need one more.
1158 */ 1160 */
1159 if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { 1161 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
1160 ep->com.flags |= ABORT_REQ_IN_PROGRESS;
1161 return CPL_RET_BUF_DONE; 1162 return CPL_RET_BUF_DONE;
1162 } 1163 }
1163 1164
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1477 /* 1478 /*
1478 * We're gonna mark this puppy DEAD, but keep 1479 * We're gonna mark this puppy DEAD, but keep
1479 * the reference on it until the ULP accepts or 1480 * the reference on it until the ULP accepts or
1480 * rejects the CR. 1481 * rejects the CR. Also wake up anyone waiting
1482 * in rdma connection migration (see iwch_accept_cr()).
1481 */ 1483 */
1482 __state_set(&ep->com, CLOSING); 1484 __state_set(&ep->com, CLOSING);
1483 get_ep(&ep->com); 1485 ep->com.rpl_done = 1;
1486 ep->com.rpl_err = -ECONNRESET;
1487 PDBG("waking up ep %p\n", ep);
1488 wake_up(&ep->com.waitq);
1484 break; 1489 break;
1485 case MPA_REP_SENT: 1490 case MPA_REP_SENT:
1486 __state_set(&ep->com, CLOSING); 1491 __state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1561 * We get 2 peer aborts from the HW. The first one must 1566 * We get 2 peer aborts from the HW. The first one must
1562 * be ignored except for scribbling that we need one more. 1567 * be ignored except for scribbling that we need one more.
1563 */ 1568 */
1564 if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { 1569 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
1565 ep->com.flags |= PEER_ABORT_IN_PROGRESS;
1566 return CPL_RET_BUF_DONE; 1570 return CPL_RET_BUF_DONE;
1567 } 1571 }
1568 1572
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1589 /* 1593 /*
1590 * We're gonna mark this puppy DEAD, but keep 1594 * We're gonna mark this puppy DEAD, but keep
1591 * the reference on it until the ULP accepts or 1595 * the reference on it until the ULP accepts or
1592 * rejects the CR. 1596 * rejects the CR. Also wake up anyone waiting
1597 * in rdma connection migration (see iwch_accept_cr()).
1593 */ 1598 */
1594 get_ep(&ep->com); 1599 ep->com.rpl_done = 1;
1600 ep->com.rpl_err = -ECONNRESET;
1601 PDBG("waking up ep %p\n", ep);
1602 wake_up(&ep->com.waitq);
1595 break; 1603 break;
1596 case MORIBUND: 1604 case MORIBUND:
1597 case CLOSING: 1605 case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1797 err = send_mpa_reject(ep, pdata, pdata_len); 1805 err = send_mpa_reject(ep, pdata, pdata_len);
1798 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); 1806 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1799 } 1807 }
1808 put_ep(&ep->com);
1800 return 0; 1809 return 0;
1801} 1810}
1802 1811
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1810 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1819 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1811 1820
1812 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1821 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1813 if (state_read(&ep->com) == DEAD) 1822 if (state_read(&ep->com) == DEAD) {
1814 return -ECONNRESET; 1823 err = -ECONNRESET;
1824 goto err;
1825 }
1815 1826
1816 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1827 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1817 BUG_ON(!qp); 1828 BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1819 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || 1830 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1820 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { 1831 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1821 abort_connection(ep, NULL, GFP_KERNEL); 1832 abort_connection(ep, NULL, GFP_KERNEL);
1822 return -EINVAL; 1833 err = -EINVAL;
1834 goto err;
1823 } 1835 }
1824 1836
1825 cm_id->add_ref(cm_id); 1837 cm_id->add_ref(cm_id);
1826 ep->com.cm_id = cm_id; 1838 ep->com.cm_id = cm_id;
1827 ep->com.qp = qp; 1839 ep->com.qp = qp;
1828 1840
1829 ep->com.rpl_done = 0;
1830 ep->com.rpl_err = 0;
1831 ep->ird = conn_param->ird; 1841 ep->ird = conn_param->ird;
1832 ep->ord = conn_param->ord; 1842 ep->ord = conn_param->ord;
1833 1843
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1836 1846
1837 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 1847 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838 1848
1839 get_ep(&ep->com);
1840
1841 /* bind QP to EP and move to RTS */ 1849 /* bind QP to EP and move to RTS */
1842 attrs.mpa_attr = ep->mpa_attr; 1850 attrs.mpa_attr = ep->mpa_attr;
1843 attrs.max_ird = ep->ird; 1851 attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1855 err = iwch_modify_qp(ep->com.qp->rhp, 1863 err = iwch_modify_qp(ep->com.qp->rhp,
1856 ep->com.qp, mask, &attrs, 1); 1864 ep->com.qp, mask, &attrs, 1);
1857 if (err) 1865 if (err)
1858 goto err; 1866 goto err1;
1859 1867
1860 /* if needed, wait for wr_ack */ 1868 /* if needed, wait for wr_ack */
1861 if (iwch_rqes_posted(qp)) { 1869 if (iwch_rqes_posted(qp)) {
1862 wait_event(ep->com.waitq, ep->com.rpl_done); 1870 wait_event(ep->com.waitq, ep->com.rpl_done);
1863 err = ep->com.rpl_err; 1871 err = ep->com.rpl_err;
1864 if (err) 1872 if (err)
1865 goto err; 1873 goto err1;
1866 } 1874 }
1867 1875
1868 err = send_mpa_reply(ep, conn_param->private_data, 1876 err = send_mpa_reply(ep, conn_param->private_data,
1869 conn_param->private_data_len); 1877 conn_param->private_data_len);
1870 if (err) 1878 if (err)
1871 goto err; 1879 goto err1;
1872 1880
1873 1881
1874 state_set(&ep->com, FPDU_MODE); 1882 state_set(&ep->com, FPDU_MODE);
1875 established_upcall(ep); 1883 established_upcall(ep);
1876 put_ep(&ep->com); 1884 put_ep(&ep->com);
1877 return 0; 1885 return 0;
1878err: 1886err1:
1879 ep->com.cm_id = NULL; 1887 ep->com.cm_id = NULL;
1880 ep->com.qp = NULL; 1888 ep->com.qp = NULL;
1881 cm_id->rem_ref(cm_id); 1889 cm_id->rem_ref(cm_id);
1890err:
1882 put_ep(&ep->com); 1891 put_ep(&ep->com);
1883 return err; 1892 return err;
1884} 1893}
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2097 ep->com.state = CLOSING; 2106 ep->com.state = CLOSING;
2098 start_ep_timer(ep); 2107 start_ep_timer(ep);
2099 } 2108 }
2109 set_bit(CLOSE_SENT, &ep->com.flags);
2100 break; 2110 break;
2101 case CLOSING: 2111 case CLOSING:
2102 close = 1; 2112 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2103 if (abrupt) { 2113 close = 1;
2104 stop_ep_timer(ep); 2114 if (abrupt) {
2105 ep->com.state = ABORTING; 2115 stop_ep_timer(ep);
2106 } else 2116 ep->com.state = ABORTING;
2107 ep->com.state = MORIBUND; 2117 } else
2118 ep->com.state = MORIBUND;
2119 }
2108 break; 2120 break;
2109 case MORIBUND: 2121 case MORIBUND:
2110 case ABORTING: 2122 case ABORTING:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 43c0aea7eadc..b9efadfffb4f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -145,9 +145,10 @@ enum iwch_ep_state {
145}; 145};
146 146
147enum iwch_ep_flags { 147enum iwch_ep_flags {
148 PEER_ABORT_IN_PROGRESS = (1 << 0), 148 PEER_ABORT_IN_PROGRESS = 0,
149 ABORT_REQ_IN_PROGRESS = (1 << 1), 149 ABORT_REQ_IN_PROGRESS = 1,
150 RELEASE_RESOURCES = (1 << 2), 150 RELEASE_RESOURCES = 2,
151 CLOSE_SENT = 3,
151}; 152};
152 153
153struct iwch_ep_common { 154struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
162 wait_queue_head_t waitq; 163 wait_queue_head_t waitq;
163 int rpl_done; 164 int rpl_done;
164 int rpl_err; 165 int rpl_err;
165 u32 flags; 166 unsigned long flags;
166}; 167};
167 168
168struct iwch_listen_ep { 169struct iwch_listen_ep {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index ec49a5cbdebb..e1ec65ebb016 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -39,7 +39,7 @@
39#include "iwch.h" 39#include "iwch.h"
40#include "iwch_provider.h" 40#include "iwch_provider.h"
41 41
42static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) 42static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
43{ 43{
44 u32 mmid; 44 u32 mmid;
45 45
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
47 mhp->attr.stag = stag; 47 mhp->attr.stag = stag;
48 mmid = stag >> 8; 48 mmid = stag >> 8;
49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
50 insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
51 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); 50 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
51 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
52} 52}
53 53
54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
55 struct iwch_mr *mhp, int shift) 55 struct iwch_mr *mhp, int shift)
56{ 56{
57 u32 stag; 57 u32 stag;
58 int ret;
58 59
59 if (cxio_register_phys_mem(&rhp->rdev, 60 if (cxio_register_phys_mem(&rhp->rdev,
60 &stag, mhp->attr.pdid, 61 &stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
66 mhp->attr.pbl_size, mhp->attr.pbl_addr)) 67 mhp->attr.pbl_size, mhp->attr.pbl_addr))
67 return -ENOMEM; 68 return -ENOMEM;
68 69
69 iwch_finish_mem_reg(mhp, stag); 70 ret = iwch_finish_mem_reg(mhp, stag);
70 71 if (ret)
71 return 0; 72 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
73 mhp->attr.pbl_addr);
74 return ret;
72} 75}
73 76
74int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 77int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
77 int npages) 80 int npages)
78{ 81{
79 u32 stag; 82 u32 stag;
83 int ret;
80 84
81 /* We could support this... */ 85 /* We could support this... */
82 if (npages > mhp->attr.pbl_size) 86 if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
93 mhp->attr.pbl_size, mhp->attr.pbl_addr)) 97 mhp->attr.pbl_size, mhp->attr.pbl_addr))
94 return -ENOMEM; 98 return -ENOMEM;
95 99
96 iwch_finish_mem_reg(mhp, stag); 100 ret = iwch_finish_mem_reg(mhp, stag);
101 if (ret)
102 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
103 mhp->attr.pbl_addr);
97 104
98 return 0; 105 return ret;
99} 106}
100 107
101int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) 108int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e2a63214008a..6895523779d0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
195 spin_lock_init(&chp->lock); 195 spin_lock_init(&chp->lock);
196 atomic_set(&chp->refcnt, 1); 196 atomic_set(&chp->refcnt, 1);
197 init_waitqueue_head(&chp->wait); 197 init_waitqueue_head(&chp->wait);
198 insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 198 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
199 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
200 kfree(chp);
201 return ERR_PTR(-ENOMEM);
202 }
199 203
200 if (ucontext) { 204 if (ucontext) {
201 struct iwch_mm_entry *mm; 205 struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
750 mhp->attr.stag = stag; 754 mhp->attr.stag = stag;
751 mmid = (stag) >> 8; 755 mmid = (stag) >> 8;
752 mhp->ibmw.rkey = stag; 756 mhp->ibmw.rkey = stag;
753 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 757 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
758 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
759 kfree(mhp);
760 return ERR_PTR(-ENOMEM);
761 }
754 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 762 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
755 return &(mhp->ibmw); 763 return &(mhp->ibmw);
756} 764}
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
778 struct iwch_mr *mhp; 786 struct iwch_mr *mhp;
779 u32 mmid; 787 u32 mmid;
780 u32 stag = 0; 788 u32 stag = 0;
781 int ret; 789 int ret = 0;
782 790
783 php = to_iwch_pd(pd); 791 php = to_iwch_pd(pd);
784 rhp = php->rhp; 792 rhp = php->rhp;
785 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 793 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
786 if (!mhp) 794 if (!mhp)
787 return ERR_PTR(-ENOMEM); 795 goto err;
788 796
789 mhp->rhp = rhp; 797 mhp->rhp = rhp;
790 ret = iwch_alloc_pbl(mhp, pbl_depth); 798 ret = iwch_alloc_pbl(mhp, pbl_depth);
791 if (ret) { 799 if (ret)
792 kfree(mhp); 800 goto err1;
793 return ERR_PTR(ret);
794 }
795 mhp->attr.pbl_size = pbl_depth; 801 mhp->attr.pbl_size = pbl_depth;
796 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, 802 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
797 mhp->attr.pbl_size, mhp->attr.pbl_addr); 803 mhp->attr.pbl_size, mhp->attr.pbl_addr);
798 if (ret) { 804 if (ret)
799 iwch_free_pbl(mhp); 805 goto err2;
800 kfree(mhp);
801 return ERR_PTR(ret);
802 }
803 mhp->attr.pdid = php->pdid; 806 mhp->attr.pdid = php->pdid;
804 mhp->attr.type = TPT_NON_SHARED_MR; 807 mhp->attr.type = TPT_NON_SHARED_MR;
805 mhp->attr.stag = stag; 808 mhp->attr.stag = stag;
806 mhp->attr.state = 1; 809 mhp->attr.state = 1;
807 mmid = (stag) >> 8; 810 mmid = (stag) >> 8;
808 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 811 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
809 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 812 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
813 goto err3;
814
810 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 815 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
811 return &(mhp->ibmr); 816 return &(mhp->ibmr);
817err3:
818 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
819 mhp->attr.pbl_addr);
820err2:
821 iwch_free_pbl(mhp);
822err1:
823 kfree(mhp);
824err:
825 return ERR_PTR(ret);
812} 826}
813 827
814static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( 828static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
961 spin_lock_init(&qhp->lock); 975 spin_lock_init(&qhp->lock);
962 init_waitqueue_head(&qhp->wait); 976 init_waitqueue_head(&qhp->wait);
963 atomic_set(&qhp->refcnt, 1); 977 atomic_set(&qhp->refcnt, 1);
964 insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); 978
979 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
980 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
981 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
982 kfree(qhp);
983 return ERR_PTR(-ENOMEM);
984 }
965 985
966 if (udata) { 986 if (udata) {
967 987
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
1418bail2: 1438bail2:
1419 ib_unregister_device(&dev->ibdev); 1439 ib_unregister_device(&dev->ibdev);
1420bail1: 1440bail1:
1441 kfree(dev->ibdev.iwcm);
1421 return ret; 1442 return ret;
1422} 1443}
1423 1444
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
1430 device_remove_file(&dev->ibdev.dev, 1451 device_remove_file(&dev->ibdev.dev,
1431 iwch_class_attributes[i]); 1452 iwch_class_attributes[i]);
1432 ib_unregister_device(&dev->ibdev); 1453 ib_unregister_device(&dev->ibdev);
1454 kfree(dev->ibdev.iwcm);
1433 return; 1455 return;
1434} 1456}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 27bbdc8e773a..6e8653471941 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
890 init_attr.rqe_count = iwch_rqes_posted(qhp); 890 init_attr.rqe_count = iwch_rqes_posted(qhp);
891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; 891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
892 init_attr.chan = qhp->ep->l2t->smt_idx;
892 if (peer2peer) { 893 if (peer2peer) {
893 init_attr.rtr_type = RTR_READ; 894 init_attr.rtr_type = RTR_READ;
894 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) 895 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fab18a2c74a8..5b635aa5947e 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52#include "ehca_tools.h" 52#include "ehca_tools.h"
53#include "hcp_if.h" 53#include "hcp_if.h"
54 54
55#define HCAD_VERSION "0028" 55#define HCAD_VERSION "0029"
56 56
57MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
64static int ehca_poll_all_eqs = 1; 64static int ehca_poll_all_eqs = 1;
65 65
66int ehca_debug_level = 0; 66int ehca_debug_level = 0;
67int ehca_nr_ports = 2; 67int ehca_nr_ports = -1;
68int ehca_use_hp_mr = 0; 68int ehca_use_hp_mr = 0;
69int ehca_port_act_time = 30; 69int ehca_port_act_time = 30;
70int ehca_static_rate = -1; 70int ehca_static_rate = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
95 "Hardware level (0: autosensing (default), " 95 "Hardware level (0: autosensing (default), "
96 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); 96 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
97MODULE_PARM_DESC(nr_ports, 97MODULE_PARM_DESC(nr_ports,
98 "number of connected ports (-1: autodetect, 1: port one only, " 98 "number of connected ports (-1: autodetect (default), "
99 "2: two ports (default)"); 99 "1: port one only, 2: two ports)");
100MODULE_PARM_DESC(use_hp_mr, 100MODULE_PARM_DESC(use_hp_mr,
101 "Use high performance MRs (default: no)"); 101 "Use high performance MRs (default: no)");
102MODULE_PARM_DESC(port_act_time, 102MODULE_PARM_DESC(port_act_time,
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 5a3d96f84c79..8fd88cd828fd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -786,7 +786,11 @@ repoll:
786 wc->slid = cqe->rlid; 786 wc->slid = cqe->rlid;
787 wc->dlid_path_bits = cqe->dlid; 787 wc->dlid_path_bits = cqe->dlid;
788 wc->src_qp = cqe->remote_qp_number; 788 wc->src_qp = cqe->remote_qp_number;
789 wc->wc_flags = cqe->w_completion_flags; 789 /*
790 * HW has "Immed data present" and "GRH present" in bits 6 and 5.
791 * SW defines those in bits 1 and 0, so we can just shift and mask.
792 */
793 wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
790 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); 794 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
791 wc->sl = cqe->service_level; 795 wc->sl = cqe->service_level;
792 796
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index c568b28f4e20..8c1213f8916a 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -125,14 +125,30 @@ struct ib_perf {
125 u8 data[192]; 125 u8 data[192];
126} __attribute__ ((packed)); 126} __attribute__ ((packed));
127 127
128/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
129struct tcslfl {
130 u32 tc:8;
131 u32 sl:4;
132 u32 fl:20;
133} __attribute__ ((packed));
134
135/* IP Version/TC/FL packed into 32 bits, as in GRH */
136struct vertcfl {
137 u32 ver:4;
138 u32 tc:8;
139 u32 fl:20;
140} __attribute__ ((packed));
128 141
129static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, 142static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 struct ib_wc *in_wc, struct ib_grh *in_grh,
130 struct ib_mad *in_mad, struct ib_mad *out_mad) 144 struct ib_mad *in_mad, struct ib_mad *out_mad)
131{ 145{
132 struct ib_perf *in_perf = (struct ib_perf *)in_mad; 146 struct ib_perf *in_perf = (struct ib_perf *)in_mad;
133 struct ib_perf *out_perf = (struct ib_perf *)out_mad; 147 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
134 struct ib_class_port_info *poi = 148 struct ib_class_port_info *poi =
135 (struct ib_class_port_info *)out_perf->data; 149 (struct ib_class_port_info *)out_perf->data;
150 struct tcslfl *tcslfl =
151 (struct tcslfl *)&poi->redirect_tcslfl;
136 struct ehca_shca *shca = 152 struct ehca_shca *shca =
137 container_of(ibdev, struct ehca_shca, ib_device); 153 container_of(ibdev, struct ehca_shca, ib_device);
138 struct ehca_sport *sport = &shca->sport[port_num - 1]; 154 struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
158 poi->base_version = 1; 174 poi->base_version = 1;
159 poi->class_version = 1; 175 poi->class_version = 1;
160 poi->resp_time_value = 18; 176 poi->resp_time_value = 18;
161 poi->redirect_lid = sport->saved_attr.lid; 177
162 poi->redirect_qp = sport->pma_qp_nr; 178 /* copy local routing information from WC where applicable */
179 tcslfl->sl = in_wc->sl;
180 poi->redirect_lid =
181 sport->saved_attr.lid | in_wc->dlid_path_bits;
182 poi->redirect_qp = sport->pma_qp_nr;
163 poi->redirect_qkey = IB_QP1_QKEY; 183 poi->redirect_qkey = IB_QP1_QKEY;
164 poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; 184
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 &poi->redirect_pkey);
187
188 /* if request was globally routed, copy route info */
189 if (in_grh) {
190 struct vertcfl *vertcfl =
191 (struct vertcfl *)&in_grh->version_tclass_flow;
192 memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 sizeof(poi->redirect_gid));
194 tcslfl->tc = vertcfl->tc;
195 tcslfl->fl = vertcfl->fl;
196 } else
197 /* else only fill in default GID */
198 ehca_query_gid(ibdev, port_num, 0,
199 (union ib_gid *)&poi->redirect_gid);
165 200
166 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", 201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
167 sport->saved_attr.lid, sport->pma_qp_nr); 202 sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
183 218
184int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 219int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
185 struct ib_wc *in_wc, struct ib_grh *in_grh, 220 struct ib_wc *in_wc, struct ib_grh *in_grh,
186 struct ib_mad *in_mad, 221 struct ib_mad *in_mad, struct ib_mad *out_mad)
187 struct ib_mad *out_mad)
188{ 222{
189 int ret; 223 int ret;
190 224
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
196 return IB_MAD_RESULT_SUCCESS; 230 return IB_MAD_RESULT_SUCCESS;
197 231
198 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); 232 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
199 ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); 233 ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
234 in_mad, out_mad);
200 235
201 return ret; 236 return ret;
202} 237}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 23173982b32c..38a287006612 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1616 pd->port_cnt = 1; 1616 pd->port_cnt = 1;
1617 port_fp(fp) = pd; 1617 port_fp(fp) = pd;
1618 pd->port_pid = get_pid(task_pid(current)); 1618 pd->port_pid = get_pid(task_pid(current));
1619 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1619 strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1620 ipath_stats.sps_ports++; 1620 ipath_stats.sps_ports++;
1621 ret = 0; 1621 ret = 0;
1622 } else 1622 } else
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 16a702d46018..ceb98ee78666 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
60 if (smp->attr_mod) 60 if (smp->attr_mod)
61 smp->status |= IB_SMP_INVALID_FIELD; 61 smp->status |= IB_SMP_INVALID_FIELD;
62 62
63 strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); 63 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
64 64
65 return reply(smp); 65 return reply(smp);
66} 66}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ae3d7590346e..3cb3f47a10b8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
342 struct mlx4_ib_alloc_ucontext_resp resp; 342 struct mlx4_ib_alloc_ucontext_resp resp;
343 int err; 343 int err;
344 344
345 if (!dev->ib_active)
346 return ERR_PTR(-EAGAIN);
347
345 resp.qp_tab_size = dev->dev->caps.num_qps; 348 resp.qp_tab_size = dev->dev->caps.num_qps;
346 resp.bf_reg_size = dev->dev->caps.bf_reg_size; 349 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
347 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 350 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
540 543
541static void *mlx4_ib_add(struct mlx4_dev *dev) 544static void *mlx4_ib_add(struct mlx4_dev *dev)
542{ 545{
543 static int mlx4_ib_version_printed;
544 struct mlx4_ib_dev *ibdev; 546 struct mlx4_ib_dev *ibdev;
545 int num_ports = 0; 547 int num_ports = 0;
546 int i; 548 int i;
547 549
548 if (!mlx4_ib_version_printed) { 550 printk_once(KERN_INFO "%s", mlx4_ib_version);
549 printk(KERN_INFO "%s", mlx4_ib_version);
550 ++mlx4_ib_version_printed;
551 }
552 551
553 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 552 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
554 num_ports++; 553 num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
673 goto err_reg; 672 goto err_reg;
674 } 673 }
675 674
675 ibdev->ib_active = true;
676
676 return ibdev; 677 return ibdev;
677 678
678err_reg: 679err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
729 break; 730 break;
730 731
731 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 732 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
733 ibdev->ib_active = false;
732 ibev.event = IB_EVENT_DEVICE_FATAL; 734 ibev.event = IB_EVENT_DEVICE_FATAL;
733 break; 735 break;
734 736
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 8a7dd6795fa0..3486d7675e56 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
175 spinlock_t sm_lock; 175 spinlock_t sm_lock;
176 176
177 struct mutex cap_mask_mutex; 177 struct mutex cap_mask_mutex;
178 bool ib_active;
178}; 179};
179 180
180static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 181static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c4a02648c8af..219b10397b4d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
615} 615}
616 616
617static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 617static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
618 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
618{ 619{
619 if (send_cq == recv_cq) 620 if (send_cq == recv_cq) {
620 spin_lock_irq(&send_cq->lock); 621 spin_lock_irq(&send_cq->lock);
621 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 622 __acquire(&recv_cq->lock);
623 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
622 spin_lock_irq(&send_cq->lock); 624 spin_lock_irq(&send_cq->lock);
623 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 625 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
624 } else { 626 } else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
628} 630}
629 631
630static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 632static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
633 __releases(&send_cq->lock) __releases(&recv_cq->lock)
631{ 634{
632 if (send_cq == recv_cq) 635 if (send_cq == recv_cq) {
636 __release(&recv_cq->lock);
633 spin_unlock_irq(&send_cq->lock); 637 spin_unlock_irq(&send_cq->lock);
634 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 638 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
635 spin_unlock(&recv_cq->lock); 639 spin_unlock(&recv_cq->lock);
636 spin_unlock_irq(&send_cq->lock); 640 spin_unlock_irq(&send_cq->lock);
637 } else { 641 } else {
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 65ad359fdf16..056b2a4c6970 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
88 event.device = &dev->ib_dev; 88 event.device = &dev->ib_dev;
89 event.event = IB_EVENT_DEVICE_FATAL; 89 event.event = IB_EVENT_DEVICE_FATAL;
90 event.element.port_num = 0; 90 event.element.port_num = 0;
91 dev->active = false;
91 92
92 ib_dispatch_event(&event); 93 ib_dispatch_event(&event);
93 94
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index 75671f75cac4..155bc66395be 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -34,8 +34,6 @@
34#ifndef MTHCA_CONFIG_REG_H 34#ifndef MTHCA_CONFIG_REG_H
35#define MTHCA_CONFIG_REG_H 35#define MTHCA_CONFIG_REG_H
36 36
37#include <asm/page.h>
38
39#define MTHCA_HCR_BASE 0x80680 37#define MTHCA_HCR_BASE 0x80680
40#define MTHCA_HCR_SIZE 0x0001c 38#define MTHCA_HCR_SIZE 0x0001c
41#define MTHCA_ECR_BASE 0x80700 39#define MTHCA_ECR_BASE 0x80700
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 9ef611f6dd36..7e6a6d64ad4e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -357,6 +357,7 @@ struct mthca_dev {
357 struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; 357 struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
358 spinlock_t sm_lock; 358 spinlock_t sm_lock;
359 u8 rate[MTHCA_MAX_PORTS]; 359 u8 rate[MTHCA_MAX_PORTS];
360 bool active;
360}; 361};
361 362
362#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 363#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 90e4e450a120..8c31fa36e95e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
829 829
830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { 830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
831 static const char *eq_name[] = { 831 static const char *eq_name[] = {
832 [MTHCA_EQ_COMP] = DRV_NAME " (comp)", 832 [MTHCA_EQ_COMP] = DRV_NAME "-comp",
833 [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", 833 [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
834 [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" 834 [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
835 }; 835 };
836 836
837 for (i = 0; i < MTHCA_NUM_EQ; ++i) { 837 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
838 snprintf(dev->eq_table.eq[i].irq_name,
839 IB_DEVICE_NAME_MAX,
840 "%s@pci:%s", eq_name[i],
841 pci_name(dev->pdev));
838 err = request_irq(dev->eq_table.eq[i].msi_x_vector, 842 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
839 mthca_is_memfree(dev) ? 843 mthca_is_memfree(dev) ?
840 mthca_arbel_msi_x_interrupt : 844 mthca_arbel_msi_x_interrupt :
841 mthca_tavor_msi_x_interrupt, 845 mthca_tavor_msi_x_interrupt,
842 0, eq_name[i], dev->eq_table.eq + i); 846 0, dev->eq_table.eq[i].irq_name,
847 dev->eq_table.eq + i);
843 if (err) 848 if (err)
844 goto err_out_cmd; 849 goto err_out_cmd;
845 dev->eq_table.eq[i].have_irq = 1; 850 dev->eq_table.eq[i].have_irq = 1;
846 } 851 }
847 } else { 852 } else {
853 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
854 DRV_NAME "@pci:%s", pci_name(dev->pdev));
848 err = request_irq(dev->pdev->irq, 855 err = request_irq(dev->pdev->irq,
849 mthca_is_memfree(dev) ? 856 mthca_is_memfree(dev) ?
850 mthca_arbel_interrupt : 857 mthca_arbel_interrupt :
851 mthca_tavor_interrupt, 858 mthca_tavor_interrupt,
852 IRQF_SHARED, DRV_NAME, dev); 859 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
853 if (err) 860 if (err)
854 goto err_out_cmd; 861 goto err_out_cmd;
855 dev->eq_table.have_irq = 1; 862 dev->eq_table.have_irq = 1;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 13da9f1d24c0..b01b28987874 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1116 pci_set_drvdata(pdev, mdev); 1116 pci_set_drvdata(pdev, mdev);
1117 mdev->hca_type = hca_type; 1117 mdev->hca_type = hca_type;
1118 1118
1119 mdev->active = true;
1120
1119 return 0; 1121 return 0;
1120 1122
1121err_unregister: 1123err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
1215static int __devinit mthca_init_one(struct pci_dev *pdev, 1217static int __devinit mthca_init_one(struct pci_dev *pdev,
1216 const struct pci_device_id *id) 1218 const struct pci_device_id *id)
1217{ 1219{
1218 static int mthca_version_printed = 0;
1219 int ret; 1220 int ret;
1220 1221
1221 mutex_lock(&mthca_device_mutex); 1222 mutex_lock(&mthca_device_mutex);
1222 1223
1223 if (!mthca_version_printed) { 1224 printk_once(KERN_INFO "%s", mthca_version);
1224 printk(KERN_INFO "%s", mthca_version);
1225 ++mthca_version_printed;
1226 }
1227 1225
1228 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { 1226 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1229 printk(KERN_ERR PFX "%s has invalid driver data %lx\n", 1227 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 87ad889e367b..bcf7a4014820 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
334 struct mthca_ucontext *context; 334 struct mthca_ucontext *context;
335 int err; 335 int err;
336 336
337 if (!(to_mdev(ibdev)->active))
338 return ERR_PTR(-EAGAIN);
339
337 memset(&uresp, 0, sizeof uresp); 340 memset(&uresp, 0, sizeof uresp);
338 341
339 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; 342 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index c621f8794b88..90f4c4d2e983 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -113,6 +113,7 @@ struct mthca_eq {
113 int nent; 113 int nent;
114 struct mthca_buf_list *page_list; 114 struct mthca_buf_list *page_list;
115 struct mthca_mr mr; 115 struct mthca_mr mr;
116 char irq_name[IB_DEVICE_NAME_MAX];
116}; 117};
117 118
118struct mthca_av; 119struct mthca_av;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f5081bfde6db..c10576fa60c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1319} 1319}
1320 1320
1321static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1321static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1322 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1322{ 1323{
1323 if (send_cq == recv_cq) 1324 if (send_cq == recv_cq) {
1324 spin_lock_irq(&send_cq->lock); 1325 spin_lock_irq(&send_cq->lock);
1325 else if (send_cq->cqn < recv_cq->cqn) { 1326 __acquire(&recv_cq->lock);
1327 } else if (send_cq->cqn < recv_cq->cqn) {
1326 spin_lock_irq(&send_cq->lock); 1328 spin_lock_irq(&send_cq->lock);
1327 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1329 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1328 } else { 1330 } else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1332} 1334}
1333 1335
1334static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1336static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1337 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1335{ 1338{
1336 if (send_cq == recv_cq) 1339 if (send_cq == recv_cq) {
1340 __release(&recv_cq->lock);
1337 spin_unlock_irq(&send_cq->lock); 1341 spin_unlock_irq(&send_cq->lock);
1338 else if (send_cq->cqn < recv_cq->cqn) { 1342 } else if (send_cq->cqn < recv_cq->cqn) {
1339 spin_unlock(&recv_cq->lock); 1343 spin_unlock(&recv_cq->lock);
1340 spin_unlock_irq(&send_cq->lock); 1344 spin_unlock_irq(&send_cq->lock);
1341 } else { 1345 } else {
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index acb6817f6060..2a13a163d337 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -30,7 +30,6 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/init.h>
34#include <linux/errno.h> 33#include <linux/errno.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/delay.h> 35#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bf1720f7f35f..bcc6abc4faff 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
523void nes_cm_disconn_worker(void *); 523void nes_cm_disconn_worker(void *);
524 524
525/* nes_verbs.c */ 525/* nes_verbs.c */
526int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); 526int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
527int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); 527int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
528struct nes_ib_device *nes_init_ofa_device(struct net_device *); 528struct nes_ib_device *nes_init_ofa_device(struct net_device *);
529void nes_destroy_ofa_device(struct nes_ib_device *); 529void nes_destroy_ofa_device(struct nes_ib_device *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 114b802771ad..73473db19863 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
2450 */ 2450 */
2451int nes_cm_disconn(struct nes_qp *nesqp) 2451int nes_cm_disconn(struct nes_qp *nesqp)
2452{ 2452{
2453 unsigned long flags; 2453 struct disconn_work *work;
2454
2455 spin_lock_irqsave(&nesqp->lock, flags);
2456 if (nesqp->disconn_pending == 0) {
2457 nesqp->disconn_pending++;
2458 spin_unlock_irqrestore(&nesqp->lock, flags);
2459 /* init our disconnect work element, to */
2460 INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
2461 2454
2462 queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); 2455 work = kzalloc(sizeof *work, GFP_ATOMIC);
2463 } else 2456 if (!work)
2464 spin_unlock_irqrestore(&nesqp->lock, flags); 2457 return -ENOMEM; /* Timer will clean up */
2465 2458
2459 nes_add_ref(&nesqp->ibqp);
2460 work->nesqp = nesqp;
2461 INIT_WORK(&work->work, nes_disconnect_worker);
2462 queue_work(g_cm_core->disconn_wq, &work->work);
2466 return 0; 2463 return 0;
2467} 2464}
2468 2465
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
2472 */ 2469 */
2473static void nes_disconnect_worker(struct work_struct *work) 2470static void nes_disconnect_worker(struct work_struct *work)
2474{ 2471{
2475 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); 2472 struct disconn_work *dwork = container_of(work, struct disconn_work, work);
2473 struct nes_qp *nesqp = dwork->nesqp;
2476 2474
2475 kfree(dwork);
2477 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", 2476 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
2478 nesqp->last_aeq, nesqp->hwqp.qp_id); 2477 nesqp->last_aeq, nesqp->hwqp.qp_id);
2479 nes_cm_disconn_true(nesqp); 2478 nes_cm_disconn_true(nesqp);
2479 nes_rem_ref(&nesqp->ibqp);
2480} 2480}
2481 2481
2482 2482
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2493 u16 last_ae; 2493 u16 last_ae;
2494 u8 original_hw_tcp_state; 2494 u8 original_hw_tcp_state;
2495 u8 original_ibqp_state; 2495 u8 original_ibqp_state;
2496 u8 issued_disconnect_reset = 0; 2496 enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
2497 int issue_disconn = 0;
2498 int issue_close = 0;
2499 int issue_flush = 0;
2500 u32 flush_q = NES_CQP_FLUSH_RQ;
2501 struct ib_event ibevent;
2497 2502
2498 if (!nesqp) { 2503 if (!nesqp) {
2499 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); 2504 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2517 original_ibqp_state = nesqp->ibqp_state; 2522 original_ibqp_state = nesqp->ibqp_state;
2518 last_ae = nesqp->last_aeq; 2523 last_ae = nesqp->last_aeq;
2519 2524
2525 if (nesqp->term_flags) {
2526 issue_disconn = 1;
2527 issue_close = 1;
2528 nesqp->cm_id = NULL;
2529 if (nesqp->flush_issued == 0) {
2530 nesqp->flush_issued = 1;
2531 issue_flush = 1;
2532 }
2533 } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2534 ((original_ibqp_state == IB_QPS_RTS) &&
2535 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2536 issue_disconn = 1;
2537 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
2538 disconn_status = IW_CM_EVENT_STATUS_RESET;
2539 }
2540
2541 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2542 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2543 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2544 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2545 issue_close = 1;
2546 nesqp->cm_id = NULL;
2547 if (nesqp->flush_issued == 0) {
2548 nesqp->flush_issued = 1;
2549 issue_flush = 1;
2550 }
2551 }
2552
2553 spin_unlock_irqrestore(&nesqp->lock, flags);
2520 2554
2521 nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); 2555 if ((issue_flush) && (nesqp->destroyed == 0)) {
2556 /* Flush the queue(s) */
2557 if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
2558 flush_q |= NES_CQP_FLUSH_SQ;
2559 flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
2522 2560
2523 if ((nesqp->cm_id) && (cm_id->event_handler)) { 2561 if (nesqp->term_flags) {
2524 if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || 2562 ibevent.device = nesqp->ibqp.device;
2525 ((original_ibqp_state == IB_QPS_RTS) && 2563 ibevent.event = nesqp->terminate_eventtype;
2526 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { 2564 ibevent.element.qp = &nesqp->ibqp;
2565 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2566 }
2567 }
2568
2569 if ((cm_id) && (cm_id->event_handler)) {
2570 if (issue_disconn) {
2527 atomic_inc(&cm_disconnects); 2571 atomic_inc(&cm_disconnects);
2528 cm_event.event = IW_CM_EVENT_DISCONNECT; 2572 cm_event.event = IW_CM_EVENT_DISCONNECT;
2529 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { 2573 cm_event.status = disconn_status;
2530 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2531 nes_debug(NES_DBG_CM, "Generating a CM "
2532 "Disconnect Event (status reset) for "
2533 "QP%u, cm_id = %p. \n",
2534 nesqp->hwqp.qp_id, cm_id);
2535 } else
2536 cm_event.status = IW_CM_EVENT_STATUS_OK;
2537
2538 cm_event.local_addr = cm_id->local_addr; 2574 cm_event.local_addr = cm_id->local_addr;
2539 cm_event.remote_addr = cm_id->remote_addr; 2575 cm_event.remote_addr = cm_id->remote_addr;
2540 cm_event.private_data = NULL; 2576 cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2547 nesqp->hwqp.sq_tail, cm_id, 2583 nesqp->hwqp.sq_tail, cm_id,
2548 atomic_read(&nesqp->refcount)); 2584 atomic_read(&nesqp->refcount));
2549 2585
2550 spin_unlock_irqrestore(&nesqp->lock, flags);
2551 ret = cm_id->event_handler(cm_id, &cm_event); 2586 ret = cm_id->event_handler(cm_id, &cm_event);
2552 if (ret) 2587 if (ret)
2553 nes_debug(NES_DBG_CM, "OFA CM event_handler " 2588 nes_debug(NES_DBG_CM, "OFA CM event_handler "
2554 "returned, ret=%d\n", ret); 2589 "returned, ret=%d\n", ret);
2555 spin_lock_irqsave(&nesqp->lock, flags);
2556 } 2590 }
2557 2591
2558 nesqp->disconn_pending = 0; 2592 if (issue_close) {
2559 /* There might have been another AE while the lock was released */
2560 original_hw_tcp_state = nesqp->hw_tcp_state;
2561 original_ibqp_state = nesqp->ibqp_state;
2562 last_ae = nesqp->last_aeq;
2563
2564 if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
2565 ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2566 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2567 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2568 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2569 atomic_inc(&cm_closes); 2593 atomic_inc(&cm_closes);
2570 nesqp->cm_id = NULL;
2571 nesqp->in_disconnect = 0;
2572 spin_unlock_irqrestore(&nesqp->lock, flags);
2573 nes_disconnect(nesqp, 1); 2594 nes_disconnect(nesqp, 1);
2574 2595
2575 cm_id->provider_data = nesqp; 2596 cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2588 } 2609 }
2589 2610
2590 cm_id->rem_ref(cm_id); 2611 cm_id->rem_ref(cm_id);
2591
2592 spin_lock_irqsave(&nesqp->lock, flags);
2593 if (nesqp->flush_issued == 0) {
2594 nesqp->flush_issued = 1;
2595 spin_unlock_irqrestore(&nesqp->lock, flags);
2596 flush_wqes(nesvnic->nesdev, nesqp,
2597 NES_CQP_FLUSH_RQ, 1);
2598 } else
2599 spin_unlock_irqrestore(&nesqp->lock, flags);
2600 } else {
2601 cm_id = nesqp->cm_id;
2602 spin_unlock_irqrestore(&nesqp->lock, flags);
2603 /* check to see if the inbound reset beat the outbound reset */
2604 if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
2605 nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
2606 "due to inbound reset beating the "
2607 "outbound reset.\n", nesqp->hwqp.qp_id);
2608 }
2609 } 2612 }
2610 } else {
2611 nesqp->disconn_pending = 0;
2612 spin_unlock_irqrestore(&nesqp->lock, flags);
2613 } 2613 }
2614 2614
2615 return 0; 2615 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 8b7e7c0e496e..90e8e4d8a5ce 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -410,8 +410,6 @@ struct nes_cm_ops {
410int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, 410int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
411 enum nes_timer_type, int, int); 411 enum nes_timer_type, int, int);
412 412
413int nes_cm_disconn(struct nes_qp *);
414
415int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); 413int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
416int nes_reject(struct iw_cm_id *, const void *, u8); 414int nes_reject(struct iw_cm_id *, const void *, u8);
417int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); 415int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4a84d02ece06..63a1a8e1e8a3 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
74static void process_critical_error(struct nes_device *nesdev); 74static void process_critical_error(struct nes_device *nesdev);
75static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); 75static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
76static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); 76static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
77static void nes_terminate_timeout(unsigned long context);
78static void nes_terminate_start_timer(struct nes_qp *nesqp);
77 79
78#ifdef CONFIG_INFINIBAND_NES_DEBUG 80#ifdef CONFIG_INFINIBAND_NES_DEBUG
79static unsigned char *nes_iwarp_state_str[] = { 81static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2903} 2905}
2904 2906
2905 2907
2908static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
2909{
2910 u16 pkt_len;
2911
2912 if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
2913 /* skip over ethernet header */
2914 pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
2915 pkt += ETH_HLEN;
2916
2917 /* Skip over IP and TCP headers */
2918 pkt += 4 * (pkt[0] & 0x0f);
2919 pkt += 4 * ((pkt[12] >> 4) & 0x0f);
2920 }
2921 return pkt;
2922}
2923
2924/* Determine if incoming error pkt is rdma layer */
2925static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
2926{
2927 u8 *pkt;
2928 u16 *mpa;
2929 u32 opcode = 0xffffffff;
2930
2931 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
2932 pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
2933 mpa = (u16 *)locate_mpa(pkt, aeq_info);
2934 opcode = be16_to_cpu(mpa[1]) & 0xf;
2935 }
2936
2937 return opcode;
2938}
2939
2940/* Build iWARP terminate header */
2941static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
2942{
2943 u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
2944 u16 ddp_seg_len;
2945 int copy_len = 0;
2946 u8 is_tagged = 0;
2947 u8 flush_code = 0;
2948 struct nes_terminate_hdr *termhdr;
2949
2950 termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
2951 memset(termhdr, 0, 64);
2952
2953 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
2954
2955 /* Use data from offending packet to fill in ddp & rdma hdrs */
2956 pkt = locate_mpa(pkt, aeq_info);
2957 ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
2958 if (ddp_seg_len) {
2959 copy_len = 2;
2960 termhdr->hdrct = DDP_LEN_FLAG;
2961 if (pkt[2] & 0x80) {
2962 is_tagged = 1;
2963 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
2964 copy_len += TERM_DDP_LEN_TAGGED;
2965 termhdr->hdrct |= DDP_HDR_FLAG;
2966 }
2967 } else {
2968 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
2969 copy_len += TERM_DDP_LEN_UNTAGGED;
2970 termhdr->hdrct |= DDP_HDR_FLAG;
2971 }
2972
2973 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
2974 if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
2975 copy_len += TERM_RDMA_LEN;
2976 termhdr->hdrct |= RDMA_HDR_FLAG;
2977 }
2978 }
2979 }
2980 }
2981 }
2982
2983 switch (async_event_id) {
2984 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
2985 switch (iwarp_opcode(nesqp, aeq_info)) {
2986 case IWARP_OPCODE_WRITE:
2987 flush_code = IB_WC_LOC_PROT_ERR;
2988 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
2989 termhdr->error_code = DDP_TAGGED_INV_STAG;
2990 break;
2991 default:
2992 flush_code = IB_WC_REM_ACCESS_ERR;
2993 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
2994 termhdr->error_code = RDMAP_INV_STAG;
2995 }
2996 break;
2997 case NES_AEQE_AEID_AMP_INVALID_STAG:
2998 flush_code = IB_WC_REM_ACCESS_ERR;
2999 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3000 termhdr->error_code = RDMAP_INV_STAG;
3001 break;
3002 case NES_AEQE_AEID_AMP_BAD_QP:
3003 flush_code = IB_WC_LOC_QP_OP_ERR;
3004 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3005 termhdr->error_code = DDP_UNTAGGED_INV_QN;
3006 break;
3007 case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
3008 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
3009 switch (iwarp_opcode(nesqp, aeq_info)) {
3010 case IWARP_OPCODE_SEND_INV:
3011 case IWARP_OPCODE_SEND_SE_INV:
3012 flush_code = IB_WC_REM_OP_ERR;
3013 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3014 termhdr->error_code = RDMAP_CANT_INV_STAG;
3015 break;
3016 default:
3017 flush_code = IB_WC_REM_ACCESS_ERR;
3018 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3019 termhdr->error_code = RDMAP_INV_STAG;
3020 }
3021 break;
3022 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3023 if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
3024 flush_code = IB_WC_LOC_PROT_ERR;
3025 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3026 termhdr->error_code = DDP_TAGGED_BOUNDS;
3027 } else {
3028 flush_code = IB_WC_REM_ACCESS_ERR;
3029 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3030 termhdr->error_code = RDMAP_INV_BOUNDS;
3031 }
3032 break;
3033 case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
3034 case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
3035 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
3036 flush_code = IB_WC_REM_ACCESS_ERR;
3037 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3038 termhdr->error_code = RDMAP_ACCESS;
3039 break;
3040 case NES_AEQE_AEID_AMP_TO_WRAP:
3041 flush_code = IB_WC_REM_ACCESS_ERR;
3042 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3043 termhdr->error_code = RDMAP_TO_WRAP;
3044 break;
3045 case NES_AEQE_AEID_AMP_BAD_PD:
3046 switch (iwarp_opcode(nesqp, aeq_info)) {
3047 case IWARP_OPCODE_WRITE:
3048 flush_code = IB_WC_LOC_PROT_ERR;
3049 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3050 termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
3051 break;
3052 case IWARP_OPCODE_SEND_INV:
3053 case IWARP_OPCODE_SEND_SE_INV:
3054 flush_code = IB_WC_REM_ACCESS_ERR;
3055 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3056 termhdr->error_code = RDMAP_CANT_INV_STAG;
3057 break;
3058 default:
3059 flush_code = IB_WC_REM_ACCESS_ERR;
3060 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3061 termhdr->error_code = RDMAP_UNASSOC_STAG;
3062 }
3063 break;
3064 case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
3065 flush_code = IB_WC_LOC_LEN_ERR;
3066 termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
3067 termhdr->error_code = MPA_MARKER;
3068 break;
3069 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3070 flush_code = IB_WC_GENERAL_ERR;
3071 termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
3072 termhdr->error_code = MPA_CRC;
3073 break;
3074 case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
3075 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3076 flush_code = IB_WC_LOC_LEN_ERR;
3077 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
3078 termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
3079 break;
3080 case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
3081 case NES_AEQE_AEID_DDP_NO_L_BIT:
3082 flush_code = IB_WC_FATAL_ERR;
3083 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
3084 termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
3085 break;
3086 case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
3087 case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
3088 flush_code = IB_WC_GENERAL_ERR;
3089 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3090 termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
3091 break;
3092 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3093 flush_code = IB_WC_LOC_LEN_ERR;
3094 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3095 termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
3096 break;
3097 case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
3098 flush_code = IB_WC_GENERAL_ERR;
3099 if (is_tagged) {
3100 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3101 termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
3102 } else {
3103 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3104 termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
3105 }
3106 break;
3107 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3108 flush_code = IB_WC_GENERAL_ERR;
3109 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3110 termhdr->error_code = DDP_UNTAGGED_INV_MO;
3111 break;
3112 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3113 flush_code = IB_WC_REM_OP_ERR;
3114 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3115 termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
3116 break;
3117 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3118 flush_code = IB_WC_GENERAL_ERR;
3119 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3120 termhdr->error_code = DDP_UNTAGGED_INV_QN;
3121 break;
3122 case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
3123 flush_code = IB_WC_GENERAL_ERR;
3124 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3125 termhdr->error_code = RDMAP_INV_RDMAP_VER;
3126 break;
3127 case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
3128 flush_code = IB_WC_LOC_QP_OP_ERR;
3129 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3130 termhdr->error_code = RDMAP_UNEXPECTED_OP;
3131 break;
3132 default:
3133 flush_code = IB_WC_FATAL_ERR;
3134 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3135 termhdr->error_code = RDMAP_UNSPECIFIED;
3136 break;
3137 }
3138
3139 if (copy_len)
3140 memcpy(termhdr + 1, pkt, copy_len);
3141
3142 if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
3143 if (aeq_info & NES_AEQE_SQ)
3144 nesqp->term_sq_flush_code = flush_code;
3145 else
3146 nesqp->term_rq_flush_code = flush_code;
3147 }
3148
3149 return sizeof(struct nes_terminate_hdr) + copy_len;
3150}
3151
3152static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
3153 struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
3154{
3155 u64 context;
3156 unsigned long flags;
3157 u32 aeq_info;
3158 u16 async_event_id;
3159 u8 tcp_state;
3160 u8 iwarp_state;
3161 u32 termlen = 0;
3162 u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
3163 NES_CQP_QP_TERM_DONT_SEND_FIN;
3164 struct nes_adapter *nesadapter = nesdev->nesadapter;
3165
3166 if (nesqp->term_flags & NES_TERM_SENT)
3167 return; /* Sanity check */
3168
3169 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3170 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3171 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3172 async_event_id = (u16)aeq_info;
3173
3174 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
3175 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
3176 if (!context) {
3177 WARN_ON(!context);
3178 return;
3179 }
3180
3181 nesqp = (struct nes_qp *)(unsigned long)context;
3182 spin_lock_irqsave(&nesqp->lock, flags);
3183 nesqp->hw_iwarp_state = iwarp_state;
3184 nesqp->hw_tcp_state = tcp_state;
3185 nesqp->last_aeq = async_event_id;
3186 nesqp->terminate_eventtype = eventtype;
3187 spin_unlock_irqrestore(&nesqp->lock, flags);
3188
3189 if (nesadapter->send_term_ok)
3190 termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
3191 else
3192 mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
3193
3194 nes_terminate_start_timer(nesqp);
3195 nesqp->term_flags |= NES_TERM_SENT;
3196 nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
3197}
3198
3199static void nes_terminate_send_fin(struct nes_device *nesdev,
3200 struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
3201{
3202 u32 aeq_info;
3203 u16 async_event_id;
3204 u8 tcp_state;
3205 u8 iwarp_state;
3206 unsigned long flags;
3207
3208 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3209 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3210 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3211 async_event_id = (u16)aeq_info;
3212
3213 spin_lock_irqsave(&nesqp->lock, flags);
3214 nesqp->hw_iwarp_state = iwarp_state;
3215 nesqp->hw_tcp_state = tcp_state;
3216 nesqp->last_aeq = async_event_id;
3217 spin_unlock_irqrestore(&nesqp->lock, flags);
3218
3219 /* Send the fin only */
3220 nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
3221 NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
3222}
3223
3224/* Cleanup after a terminate sent or received */
3225static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
3226{
3227 u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3228 unsigned long flags;
3229 struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
3230 struct nes_device *nesdev = nesvnic->nesdev;
3231 u8 first_time = 0;
3232
3233 spin_lock_irqsave(&nesqp->lock, flags);
3234 if (nesqp->hte_added) {
3235 nesqp->hte_added = 0;
3236 next_iwarp_state |= NES_CQP_QP_DEL_HTE;
3237 }
3238
3239 first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
3240 nesqp->term_flags |= NES_TERM_DONE;
3241 spin_unlock_irqrestore(&nesqp->lock, flags);
3242
3243 /* Make sure we go through this only once */
3244 if (first_time) {
3245 if (timeout_occurred == 0)
3246 del_timer(&nesqp->terminate_timer);
3247 else
3248 next_iwarp_state |= NES_CQP_QP_RESET;
3249
3250 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3251 nes_cm_disconn(nesqp);
3252 }
3253}
3254
3255static void nes_terminate_received(struct nes_device *nesdev,
3256 struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
3257{
3258 u32 aeq_info;
3259 u8 *pkt;
3260 u32 *mpa;
3261 u8 ddp_ctl;
3262 u8 rdma_ctl;
3263 u16 aeq_id = 0;
3264
3265 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3266 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
3267 /* Terminate is not a performance path so the silicon */
3268 /* did not validate the frame - do it now */
3269 pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
3270 mpa = (u32 *)locate_mpa(pkt, aeq_info);
3271 ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
3272 rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
3273 if ((ddp_ctl & 0xc0) != 0x40)
3274 aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
3275 else if ((ddp_ctl & 0x03) != 1)
3276 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
3277 else if (be32_to_cpu(mpa[2]) != 2)
3278 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
3279 else if (be32_to_cpu(mpa[3]) != 1)
3280 aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
3281 else if (be32_to_cpu(mpa[4]) != 0)
3282 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
3283 else if ((rdma_ctl & 0xc0) != 0x40)
3284 aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
3285
3286 if (aeq_id) {
3287 /* Bad terminate recvd - send back a terminate */
3288 aeq_info = (aeq_info & 0xffff0000) | aeq_id;
3289 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
3290 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3291 return;
3292 }
3293 }
3294
3295 nesqp->term_flags |= NES_TERM_RCVD;
3296 nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
3297 nes_terminate_start_timer(nesqp);
3298 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3299}
3300
3301/* Timeout routine in case terminate fails to complete */
3302static void nes_terminate_timeout(unsigned long context)
3303{
3304 struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
3305
3306 nes_terminate_done(nesqp, 1);
3307}
3308
3309/* Set a timer in case hw cannot complete the terminate sequence */
3310static void nes_terminate_start_timer(struct nes_qp *nesqp)
3311{
3312 init_timer(&nesqp->terminate_timer);
3313 nesqp->terminate_timer.function = nes_terminate_timeout;
3314 nesqp->terminate_timer.expires = jiffies + HZ;
3315 nesqp->terminate_timer.data = (unsigned long)nesqp;
3316 add_timer(&nesqp->terminate_timer);
3317}
3318
2906/** 3319/**
2907 * nes_process_iwarp_aeqe 3320 * nes_process_iwarp_aeqe
2908 */ 3321 */
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2910 struct nes_hw_aeqe *aeqe) 3323 struct nes_hw_aeqe *aeqe)
2911{ 3324{
2912 u64 context; 3325 u64 context;
2913 u64 aeqe_context = 0;
2914 unsigned long flags; 3326 unsigned long flags;
2915 struct nes_qp *nesqp; 3327 struct nes_qp *nesqp;
3328 struct nes_hw_cq *hw_cq;
3329 struct nes_cq *nescq;
2916 int resource_allocated; 3330 int resource_allocated;
2917 /* struct iw_cm_id *cm_id; */
2918 struct nes_adapter *nesadapter = nesdev->nesadapter; 3331 struct nes_adapter *nesadapter = nesdev->nesadapter;
2919 struct ib_event ibevent;
2920 /* struct iw_cm_event cm_event; */
2921 u32 aeq_info; 3332 u32 aeq_info;
2922 u32 next_iwarp_state = 0; 3333 u32 next_iwarp_state = 0;
2923 u16 async_event_id; 3334 u16 async_event_id;
2924 u8 tcp_state; 3335 u8 tcp_state;
2925 u8 iwarp_state; 3336 u8 iwarp_state;
3337 int must_disconn = 1;
3338 int must_terminate = 0;
3339 struct ib_event ibevent;
2926 3340
2927 nes_debug(NES_DBG_AEQ, "\n"); 3341 nes_debug(NES_DBG_AEQ, "\n");
2928 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); 3342 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
2929 if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { 3343 if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
2930 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); 3344 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2931 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; 3345 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2932 } else { 3346 } else {
2933 aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2934 aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2935 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( 3347 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
2936 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; 3348 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
2937 BUG_ON(!context); 3349 BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2948 3360
2949 switch (async_event_id) { 3361 switch (async_event_id) {
2950 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 3362 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
2951 nesqp = *((struct nes_qp **)&context); 3363 nesqp = (struct nes_qp *)(unsigned long)context;
3364
3365 if (nesqp->term_flags)
3366 return; /* Ignore it, wait for close complete */
3367
2952 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3368 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
2953 nesqp->cm_id->add_ref(nesqp->cm_id); 3369 nesqp->cm_id->add_ref(nesqp->cm_id);
2954 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3370 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2959 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3375 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
2960 async_event_id, nesqp->last_aeq, tcp_state); 3376 async_event_id, nesqp->last_aeq, tcp_state);
2961 } 3377 }
3378
2962 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || 3379 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2963 (nesqp->ibqp_state != IB_QPS_RTS)) { 3380 (nesqp->ibqp_state != IB_QPS_RTS)) {
2964 /* FIN Received but tcp state or IB state moved on, 3381 /* FIN Received but tcp state or IB state moved on,
2965 should expect a close complete */ 3382 should expect a close complete */
2966 return; 3383 return;
2967 } 3384 }
3385
2968 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3386 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3387 nesqp = (struct nes_qp *)(unsigned long)context;
3388 if (nesqp->term_flags) {
3389 nes_terminate_done(nesqp, 0);
3390 return;
3391 }
3392
2969 case NES_AEQE_AEID_LLP_CONNECTION_RESET: 3393 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
2970 case NES_AEQE_AEID_TERMINATE_SENT:
2971 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
2972 case NES_AEQE_AEID_RESET_SENT: 3394 case NES_AEQE_AEID_RESET_SENT:
2973 nesqp = *((struct nes_qp **)&context); 3395 nesqp = (struct nes_qp *)(unsigned long)context;
2974 if (async_event_id == NES_AEQE_AEID_RESET_SENT) { 3396 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
2975 tcp_state = NES_AEQE_TCP_STATE_CLOSED; 3397 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2976 } 3398 }
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2982 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || 3404 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2983 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { 3405 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
2984 nesqp->hte_added = 0; 3406 nesqp->hte_added = 0;
2985 spin_unlock_irqrestore(&nesqp->lock, flags); 3407 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
2986 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
2987 nesqp->hwqp.qp_id);
2988 nes_hw_modify_qp(nesdev, nesqp,
2989 NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
2990 spin_lock_irqsave(&nesqp->lock, flags);
2991 } 3408 }
2992 3409
2993 if ((nesqp->ibqp_state == IB_QPS_RTS) && 3410 if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2999 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; 3416 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3000 break; 3417 break;
3001 case NES_AEQE_IWARP_STATE_TERMINATE: 3418 case NES_AEQE_IWARP_STATE_TERMINATE:
3002 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; 3419 must_disconn = 0; /* terminate path takes care of disconn */
3003 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; 3420 if (nesqp->term_flags == 0)
3004 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { 3421 must_terminate = 1;
3005 next_iwarp_state |= 0x02000000;
3006 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3007 }
3008 break; 3422 break;
3009 default:
3010 next_iwarp_state = 0;
3011 }
3012 spin_unlock_irqrestore(&nesqp->lock, flags);
3013 if (next_iwarp_state) {
3014 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
3015 " also added another reference\n",
3016 nesqp->hwqp.qp_id, next_iwarp_state);
3017 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3018 } 3423 }
3019 nes_cm_disconn(nesqp);
3020 } else { 3424 } else {
3021 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { 3425 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
3022 /* FIN Received but ib state not RTS, 3426 /* FIN Received but ib state not RTS,
3023 close complete will be on its way */ 3427 close complete will be on its way */
3024 spin_unlock_irqrestore(&nesqp->lock, flags); 3428 must_disconn = 0;
3025 return;
3026 }
3027 spin_unlock_irqrestore(&nesqp->lock, flags);
3028 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
3029 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
3030 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3031 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
3032 " also added another reference\n",
3033 nesqp->hwqp.qp_id, next_iwarp_state);
3034 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3035 } 3429 }
3036 nes_cm_disconn(nesqp);
3037 } 3430 }
3038 break;
3039 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3040 nesqp = *((struct nes_qp **)&context);
3041 spin_lock_irqsave(&nesqp->lock, flags);
3042 nesqp->hw_iwarp_state = iwarp_state;
3043 nesqp->hw_tcp_state = tcp_state;
3044 nesqp->last_aeq = async_event_id;
3045 spin_unlock_irqrestore(&nesqp->lock, flags); 3431 spin_unlock_irqrestore(&nesqp->lock, flags);
3046 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED" 3432
3047 " event on QP%u \n Q2 Data:\n", 3433 if (must_terminate)
3048 nesqp->hwqp.qp_id); 3434 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3049 if (nesqp->ibqp.event_handler) { 3435 else if (must_disconn) {
3050 ibevent.device = nesqp->ibqp.device; 3436 if (next_iwarp_state) {
3051 ibevent.element.qp = &nesqp->ibqp; 3437 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
3052 ibevent.event = IB_EVENT_QP_FATAL; 3438 nesqp->hwqp.qp_id, next_iwarp_state);
3053 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3439 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3054 } 3440 }
3055 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
3056 ((nesqp->ibqp_state == IB_QPS_RTS)&&
3057 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
3058 nes_cm_disconn(nesqp); 3441 nes_cm_disconn(nesqp);
3059 } else {
3060 nesqp->in_disconnect = 0;
3061 wake_up(&nesqp->kick_waitq);
3062 } 3442 }
3063 break; 3443 break;
3064 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: 3444
3065 nesqp = *((struct nes_qp **)&context); 3445 case NES_AEQE_AEID_TERMINATE_SENT:
3066 spin_lock_irqsave(&nesqp->lock, flags); 3446 nesqp = (struct nes_qp *)(unsigned long)context;
3067 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; 3447 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3068 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3069 nesqp->last_aeq = async_event_id;
3070 if (nesqp->cm_id) {
3071 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
3072 " event on QP%u, remote IP = 0x%08X \n",
3073 nesqp->hwqp.qp_id,
3074 ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
3075 } else {
3076 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
3077 " event on QP%u \n",
3078 nesqp->hwqp.qp_id);
3079 }
3080 spin_unlock_irqrestore(&nesqp->lock, flags);
3081 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
3082 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3083 if (nesqp->ibqp.event_handler) {
3084 ibevent.device = nesqp->ibqp.device;
3085 ibevent.element.qp = &nesqp->ibqp;
3086 ibevent.event = IB_EVENT_QP_FATAL;
3087 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3088 }
3089 break; 3448 break;
3090 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: 3449
3091 if (NES_AEQE_INBOUND_RDMA&aeq_info) { 3450 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3092 nesqp = nesadapter->qp_table[le32_to_cpu( 3451 nesqp = (struct nes_qp *)(unsigned long)context;
3093 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 3452 nes_terminate_received(nesdev, nesqp, aeqe);
3094 } else {
3095 /* TODO: get the actual WQE and mask off wqe index */
3096 context &= ~((u64)511);
3097 nesqp = *((struct nes_qp **)&context);
3098 }
3099 spin_lock_irqsave(&nesqp->lock, flags);
3100 nesqp->hw_iwarp_state = iwarp_state;
3101 nesqp->hw_tcp_state = tcp_state;
3102 nesqp->last_aeq = async_event_id;
3103 spin_unlock_irqrestore(&nesqp->lock, flags);
3104 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
3105 nesqp->hwqp.qp_id);
3106 if (nesqp->ibqp.event_handler) {
3107 ibevent.device = nesqp->ibqp.device;
3108 ibevent.element.qp = &nesqp->ibqp;
3109 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3110 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3111 }
3112 break; 3453 break;
3454
3455 case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
3456 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
3113 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: 3457 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
3114 nesqp = *((struct nes_qp **)&context); 3458 case NES_AEQE_AEID_AMP_INVALID_STAG:
3115 spin_lock_irqsave(&nesqp->lock, flags); 3459 case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
3116 nesqp->hw_iwarp_state = iwarp_state; 3460 case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
3117 nesqp->hw_tcp_state = tcp_state;
3118 nesqp->last_aeq = async_event_id;
3119 spin_unlock_irqrestore(&nesqp->lock, flags);
3120 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
3121 nesqp->hwqp.qp_id);
3122 if (nesqp->ibqp.event_handler) {
3123 ibevent.device = nesqp->ibqp.device;
3124 ibevent.element.qp = &nesqp->ibqp;
3125 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3126 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3127 }
3128 break;
3129 case NES_AEQE_AEID_PRIV_OPERATION_DENIED: 3461 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
3130 nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words 3462 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3131 [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 3463 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3132 spin_lock_irqsave(&nesqp->lock, flags); 3464 case NES_AEQE_AEID_AMP_TO_WRAP:
3133 nesqp->hw_iwarp_state = iwarp_state; 3465 nesqp = (struct nes_qp *)(unsigned long)context;
3134 nesqp->hw_tcp_state = tcp_state; 3466 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
3135 nesqp->last_aeq = async_event_id; 3467 break;
3136 spin_unlock_irqrestore(&nesqp->lock, flags); 3468
3137 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u," 3469 case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
3138 " nesqp = %p, AE reported %p\n", 3470 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3139 nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context)); 3471 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3140 if (nesqp->ibqp.event_handler) { 3472 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3141 ibevent.device = nesqp->ibqp.device; 3473 nesqp = (struct nes_qp *)(unsigned long)context;
3142 ibevent.element.qp = &nesqp->ibqp; 3474 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
3143 ibevent.event = IB_EVENT_QP_ACCESS_ERR; 3475 aeq_info &= 0xffff0000;
3144 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3476 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
3477 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
3145 } 3478 }
3479
3480 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
3481 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
3482 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3483 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3484 case NES_AEQE_AEID_AMP_BAD_QP:
3485 case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
3486 case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
3487 case NES_AEQE_AEID_DDP_NO_L_BIT:
3488 case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
3489 case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
3490 case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
3491 case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
3492 case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
3493 case NES_AEQE_AEID_AMP_BAD_PD:
3494 case NES_AEQE_AEID_AMP_FASTREG_SHARED:
3495 case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
3496 case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
3497 case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
3498 case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
3499 case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
3500 case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
3501 case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
3502 case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
3503 case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
3504 case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
3505 case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
3506 case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
3507 case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
3508 case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
3509 case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
3510 case NES_AEQE_AEID_BAD_CLOSE:
3511 case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
3512 case NES_AEQE_AEID_STAG_ZERO_INVALID:
3513 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
3514 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
3515 nesqp = (struct nes_qp *)(unsigned long)context;
3516 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3146 break; 3517 break;
3518
3147 case NES_AEQE_AEID_CQ_OPERATION_ERROR: 3519 case NES_AEQE_AEID_CQ_OPERATION_ERROR:
3148 context <<= 1; 3520 context <<= 1;
3149 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", 3521 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3153 if (resource_allocated) { 3525 if (resource_allocated) {
3154 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", 3526 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
3155 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 3527 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
3528 hw_cq = (struct nes_hw_cq *)(unsigned long)context;
3529 if (hw_cq) {
3530 nescq = container_of(hw_cq, struct nes_cq, hw_cq);
3531 if (nescq->ibcq.event_handler) {
3532 ibevent.device = nescq->ibcq.device;
3533 ibevent.event = IB_EVENT_CQ_ERR;
3534 ibevent.element.cq = &nescq->ibcq;
3535 nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
3536 }
3537 }
3156 } 3538 }
3157 break; 3539 break;
3158 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 3540
3159 nesqp = nesadapter->qp_table[le32_to_cpu(
3160 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
3161 spin_lock_irqsave(&nesqp->lock, flags);
3162 nesqp->hw_iwarp_state = iwarp_state;
3163 nesqp->hw_tcp_state = tcp_state;
3164 nesqp->last_aeq = async_event_id;
3165 spin_unlock_irqrestore(&nesqp->lock, flags);
3166 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
3167 "_FOR_AVAILABLE_BUFFER event on QP%u\n",
3168 nesqp->hwqp.qp_id);
3169 if (nesqp->ibqp.event_handler) {
3170 ibevent.device = nesqp->ibqp.device;
3171 ibevent.element.qp = &nesqp->ibqp;
3172 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3173 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3174 }
3175 /* tell cm to disconnect, cm will queue work to thread */
3176 nes_cm_disconn(nesqp);
3177 break;
3178 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3179 nesqp = *((struct nes_qp **)&context);
3180 spin_lock_irqsave(&nesqp->lock, flags);
3181 nesqp->hw_iwarp_state = iwarp_state;
3182 nesqp->hw_tcp_state = tcp_state;
3183 nesqp->last_aeq = async_event_id;
3184 spin_unlock_irqrestore(&nesqp->lock, flags);
3185 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
3186 "_NO_BUFFER_AVAILABLE event on QP%u\n",
3187 nesqp->hwqp.qp_id);
3188 if (nesqp->ibqp.event_handler) {
3189 ibevent.device = nesqp->ibqp.device;
3190 ibevent.element.qp = &nesqp->ibqp;
3191 ibevent.event = IB_EVENT_QP_FATAL;
3192 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3193 }
3194 /* tell cm to disconnect, cm will queue work to thread */
3195 nes_cm_disconn(nesqp);
3196 break;
3197 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3198 nesqp = *((struct nes_qp **)&context);
3199 spin_lock_irqsave(&nesqp->lock, flags);
3200 nesqp->hw_iwarp_state = iwarp_state;
3201 nesqp->hw_tcp_state = tcp_state;
3202 nesqp->last_aeq = async_event_id;
3203 spin_unlock_irqrestore(&nesqp->lock, flags);
3204 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
3205 " event on QP%u \n Q2 Data:\n",
3206 nesqp->hwqp.qp_id);
3207 if (nesqp->ibqp.event_handler) {
3208 ibevent.device = nesqp->ibqp.device;
3209 ibevent.element.qp = &nesqp->ibqp;
3210 ibevent.event = IB_EVENT_QP_FATAL;
3211 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3212 }
3213 /* tell cm to disconnect, cm will queue work to thread */
3214 nes_cm_disconn(nesqp);
3215 break;
3216 /* TODO: additional AEs need to be here */
3217 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3218 nesqp = *((struct nes_qp **)&context);
3219 spin_lock_irqsave(&nesqp->lock, flags);
3220 nesqp->hw_iwarp_state = iwarp_state;
3221 nesqp->hw_tcp_state = tcp_state;
3222 nesqp->last_aeq = async_event_id;
3223 spin_unlock_irqrestore(&nesqp->lock, flags);
3224 if (nesqp->ibqp.event_handler) {
3225 ibevent.device = nesqp->ibqp.device;
3226 ibevent.element.qp = &nesqp->ibqp;
3227 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3228 nesqp->ibqp.event_handler(&ibevent,
3229 nesqp->ibqp.qp_context);
3230 }
3231 nes_cm_disconn(nesqp);
3232 break;
3233 default: 3541 default:
3234 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", 3542 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
3235 async_event_id); 3543 async_event_id);
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3238 3546
3239} 3547}
3240 3548
3241
3242/** 3549/**
3243 * nes_iwarp_ce_handler 3550 * nes_iwarp_ce_handler
3244 */ 3551 */
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3373{ 3680{
3374 struct nes_cqp_request *cqp_request; 3681 struct nes_cqp_request *cqp_request;
3375 struct nes_hw_cqp_wqe *cqp_wqe; 3682 struct nes_hw_cqp_wqe *cqp_wqe;
3683 u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
3684 u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
3376 int ret; 3685 int ret;
3377 3686
3378 cqp_request = nes_get_cqp_request(nesdev); 3687 cqp_request = nes_get_cqp_request(nesdev);
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3389 cqp_wqe = &cqp_request->cqp_wqe; 3698 cqp_wqe = &cqp_request->cqp_wqe;
3390 nes_fill_init_cqp_wqe(cqp_wqe, nesdev); 3699 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
3391 3700
3701 /* If wqe in error was identified, set code to be put into cqe */
3702 if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
3703 which_wq |= NES_CQP_FLUSH_MAJ_MIN;
3704 sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
3705 nesqp->term_sq_flush_code = 0;
3706 }
3707
3708 if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
3709 which_wq |= NES_CQP_FLUSH_MAJ_MIN;
3710 rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
3711 nesqp->term_rq_flush_code = 0;
3712 }
3713
3714 if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
3715 cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
3716 cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
3717 }
3718
3392 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = 3719 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
3393 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); 3720 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
3394 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); 3721 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c3654c6383fe..f28a41ba9fa1 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
241}; 241};
242 242
243#define NES_CQP_OP_IWARP_STATE_SHIFT 28 243#define NES_CQP_OP_IWARP_STATE_SHIFT 28
244#define NES_CQP_OP_TERMLEN_SHIFT 28
244 245
245enum nes_cqp_qp_bits { 246enum nes_cqp_qp_bits {
246 NES_CQP_QP_ARP_VALID = (1<<8), 247 NES_CQP_QP_ARP_VALID = (1<<8),
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
265 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), 266 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
266 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), 267 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
267 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), 268 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
269 NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
270 NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
268 NES_CQP_QP_RESET = (1<<31), 271 NES_CQP_QP_RESET = (1<<31),
269}; 272};
270 273
271enum nes_cqp_qp_wqe_word_idx { 274enum nes_cqp_qp_wqe_word_idx {
272 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, 275 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
273 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, 276 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
277 NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
278 NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
274 NES_CQP_QP_WQE_NEW_MSS_IDX = 15, 279 NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
275}; 280};
276 281
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
361enum nes_cqp_flush_bits { 366enum nes_cqp_flush_bits {
362 NES_CQP_FLUSH_SQ = (1<<30), 367 NES_CQP_FLUSH_SQ = (1<<30),
363 NES_CQP_FLUSH_RQ = (1<<31), 368 NES_CQP_FLUSH_RQ = (1<<31),
369 NES_CQP_FLUSH_MAJ_MIN = (1<<28),
364}; 370};
365 371
366enum nes_cqe_opcode_bits { 372enum nes_cqe_opcode_bits {
@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
633 NES_AEQE_INBOUND_RDMA = (1<<19), 639 NES_AEQE_INBOUND_RDMA = (1<<19),
634 NES_AEQE_IWARP_STATE_MASK = (7<<20), 640 NES_AEQE_IWARP_STATE_MASK = (7<<20),
635 NES_AEQE_TCP_STATE_MASK = (0xf<<24), 641 NES_AEQE_TCP_STATE_MASK = (0xf<<24),
642 NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
636 NES_AEQE_VALID = (1<<31), 643 NES_AEQE_VALID = (1<<31),
637}; 644};
638 645
639#define NES_AEQE_IWARP_STATE_SHIFT 20 646#define NES_AEQE_IWARP_STATE_SHIFT 20
640#define NES_AEQE_TCP_STATE_SHIFT 24 647#define NES_AEQE_TCP_STATE_SHIFT 24
648#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
649#define NES_AEQE_Q2_DATA_MPA (1<<29)
641 650
642enum nes_aeqe_iwarp_state { 651enum nes_aeqe_iwarp_state {
643 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, 652 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
751 NES_IWARP_SQ_OP_NOP = 12, 760 NES_IWARP_SQ_OP_NOP = 12,
752}; 761};
753 762
763enum nes_iwarp_cqe_major_code {
764 NES_IWARP_CQE_MAJOR_FLUSH = 1,
765 NES_IWARP_CQE_MAJOR_DRV = 0x8000
766};
767
768enum nes_iwarp_cqe_minor_code {
769 NES_IWARP_CQE_MINOR_FLUSH = 1
770};
771
754#define NES_EEPROM_READ_REQUEST (1<<16) 772#define NES_EEPROM_READ_REQUEST (1<<16)
755#define NES_MAC_ADDR_VALID (1<<20) 773#define NES_MAC_ADDR_VALID (1<<20)
756 774
@@ -1119,6 +1137,7 @@ struct nes_adapter {
1119 u8 netdev_max; /* from host nic address count in EEPROM */ 1137 u8 netdev_max; /* from host nic address count in EEPROM */
1120 u8 port_count; 1138 u8 port_count;
1121 u8 virtwq; 1139 u8 virtwq;
1140 u8 send_term_ok;
1122 u8 et_use_adaptive_rx_coalesce; 1141 u8 et_use_adaptive_rx_coalesce;
1123 u8 adapter_fcn_count; 1142 u8 adapter_fcn_count;
1124 u8 pft_mcast_map[NES_PFT_SIZE]; 1143 u8 pft_mcast_map[NES_PFT_SIZE];
@@ -1217,6 +1236,90 @@ struct nes_ib_device {
1217 u32 num_pd; 1236 u32 num_pd;
1218}; 1237};
1219 1238
1239enum nes_hdrct_flags {
1240 DDP_LEN_FLAG = 0x80,
1241 DDP_HDR_FLAG = 0x40,
1242 RDMA_HDR_FLAG = 0x20
1243};
1244
1245enum nes_term_layers {
1246 LAYER_RDMA = 0,
1247 LAYER_DDP = 1,
1248 LAYER_MPA = 2
1249};
1250
1251enum nes_term_error_types {
1252 RDMAP_CATASTROPHIC = 0,
1253 RDMAP_REMOTE_PROT = 1,
1254 RDMAP_REMOTE_OP = 2,
1255 DDP_CATASTROPHIC = 0,
1256 DDP_TAGGED_BUFFER = 1,
1257 DDP_UNTAGGED_BUFFER = 2,
1258 DDP_LLP = 3
1259};
1260
1261enum nes_term_rdma_errors {
1262 RDMAP_INV_STAG = 0x00,
1263 RDMAP_INV_BOUNDS = 0x01,
1264 RDMAP_ACCESS = 0x02,
1265 RDMAP_UNASSOC_STAG = 0x03,
1266 RDMAP_TO_WRAP = 0x04,
1267 RDMAP_INV_RDMAP_VER = 0x05,
1268 RDMAP_UNEXPECTED_OP = 0x06,
1269 RDMAP_CATASTROPHIC_LOCAL = 0x07,
1270 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
1271 RDMAP_CANT_INV_STAG = 0x09,
1272 RDMAP_UNSPECIFIED = 0xff
1273};
1274
1275enum nes_term_ddp_errors {
1276 DDP_CATASTROPHIC_LOCAL = 0x00,
1277 DDP_TAGGED_INV_STAG = 0x00,
1278 DDP_TAGGED_BOUNDS = 0x01,
1279 DDP_TAGGED_UNASSOC_STAG = 0x02,
1280 DDP_TAGGED_TO_WRAP = 0x03,
1281 DDP_TAGGED_INV_DDP_VER = 0x04,
1282 DDP_UNTAGGED_INV_QN = 0x01,
1283 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
1284 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
1285 DDP_UNTAGGED_INV_MO = 0x04,
1286 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
1287 DDP_UNTAGGED_INV_DDP_VER = 0x06
1288};
1289
1290enum nes_term_mpa_errors {
1291 MPA_CLOSED = 0x01,
1292 MPA_CRC = 0x02,
1293 MPA_MARKER = 0x03,
1294 MPA_REQ_RSP = 0x04,
1295};
1296
1297struct nes_terminate_hdr {
1298 u8 layer_etype;
1299 u8 error_code;
1300 u8 hdrct;
1301 u8 rsvd;
1302};
1303
1304/* Used to determine how to fill in terminate error codes */
1305#define IWARP_OPCODE_WRITE 0
1306#define IWARP_OPCODE_READREQ 1
1307#define IWARP_OPCODE_READRSP 2
1308#define IWARP_OPCODE_SEND 3
1309#define IWARP_OPCODE_SEND_INV 4
1310#define IWARP_OPCODE_SEND_SE 5
1311#define IWARP_OPCODE_SEND_SE_INV 6
1312#define IWARP_OPCODE_TERM 7
1313
1314/* These values are used only during terminate processing */
1315#define TERM_DDP_LEN_TAGGED 14
1316#define TERM_DDP_LEN_UNTAGGED 18
1317#define TERM_RDMA_LEN 28
1318#define RDMA_OPCODE_MASK 0x0f
1319#define RDMA_READ_REQ_OPCODE 1
1320#define BAD_FRAME_OFFSET 64
1321#define CQE_MAJOR_DRV 0x8000
1322
1220#define nes_vlan_rx vlan_hwaccel_receive_skb 1323#define nes_vlan_rx vlan_hwaccel_receive_skb
1221#define nes_netif_rx netif_receive_skb 1324#define nes_netif_rx netif_receive_skb
1222 1325
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a282031d15c7..9687c397ce1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { 183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
184 nesadapter->virtwq = 1; 184 nesadapter->virtwq = 1;
185 } 185 }
186 if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
187 nesadapter->send_term_ok = 1;
188
186 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + 189 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
187 (u32)((u8)eeprom_data); 190 (u32)((u8)eeprom_data);
188 191
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
548 spin_unlock_irqrestore(&nesdev->cqp.lock, flags); 551 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
549 } 552 }
550 if (cqp_request == NULL) { 553 if (cqp_request == NULL) {
551 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); 554 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
552 if (cqp_request) { 555 if (cqp_request) {
553 cqp_request->dynamic = 1; 556 cqp_request->dynamic = 1;
554 INIT_LIST_HEAD(&cqp_request->list); 557 INIT_LIST_HEAD(&cqp_request->list);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 21e0fd336cf7..a680c42d6e8c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
667 */ 667 */
668static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) 668static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
669{ 669{
670 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
671 struct net_device *netdev = nesvnic->netdev;
672
670 memset(props, 0, sizeof(*props)); 673 memset(props, 0, sizeof(*props));
671 674
672 props->max_mtu = IB_MTU_2048; 675 props->max_mtu = IB_MTU_4096;
673 props->active_mtu = IB_MTU_2048; 676
677 if (netdev->mtu >= 4096)
678 props->active_mtu = IB_MTU_4096;
679 else if (netdev->mtu >= 2048)
680 props->active_mtu = IB_MTU_2048;
681 else if (netdev->mtu >= 1024)
682 props->active_mtu = IB_MTU_1024;
683 else if (netdev->mtu >= 512)
684 props->active_mtu = IB_MTU_512;
685 else
686 props->active_mtu = IB_MTU_256;
687
674 props->lid = 1; 688 props->lid = 1;
675 props->lmc = 0; 689 props->lmc = 0;
676 props->sm_lid = 0; 690 props->sm_lid = 0;
677 props->sm_sl = 0; 691 props->sm_sl = 0;
678 props->state = IB_PORT_ACTIVE; 692 if (nesvnic->linkup)
693 props->state = IB_PORT_ACTIVE;
694 else
695 props->state = IB_PORT_DOWN;
679 props->phys_state = 0; 696 props->phys_state = 0;
680 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 697 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
681 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 698 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
@@ -1506,12 +1523,45 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1506 1523
1507 1524
1508/** 1525/**
1526 * nes_clean_cq
1527 */
1528static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
1529{
1530 u32 cq_head;
1531 u32 lo;
1532 u32 hi;
1533 u64 u64temp;
1534 unsigned long flags = 0;
1535
1536 spin_lock_irqsave(&nescq->lock, flags);
1537
1538 cq_head = nescq->hw_cq.cq_head;
1539 while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
1540 rmb();
1541 lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
1542 hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
1543 u64temp = (((u64)hi) << 32) | ((u64)lo);
1544 u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
1545 if (u64temp == (u64)(unsigned long)nesqp) {
1546 /* Zero the context value so cqe will be ignored */
1547 nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
1548 nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
1549 }
1550
1551 if (++cq_head >= nescq->hw_cq.cq_size)
1552 cq_head = 0;
1553 }
1554
1555 spin_unlock_irqrestore(&nescq->lock, flags);
1556}
1557
1558
1559/**
1509 * nes_destroy_qp 1560 * nes_destroy_qp
1510 */ 1561 */
1511static int nes_destroy_qp(struct ib_qp *ibqp) 1562static int nes_destroy_qp(struct ib_qp *ibqp)
1512{ 1563{
1513 struct nes_qp *nesqp = to_nesqp(ibqp); 1564 struct nes_qp *nesqp = to_nesqp(ibqp);
1514 /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
1515 struct nes_ucontext *nes_ucontext; 1565 struct nes_ucontext *nes_ucontext;
1516 struct ib_qp_attr attr; 1566 struct ib_qp_attr attr;
1517 struct iw_cm_id *cm_id; 1567 struct iw_cm_id *cm_id;
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1548 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); 1598 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
1549 } 1599 }
1550 1600
1551
1552 if (nesqp->user_mode) { 1601 if (nesqp->user_mode) {
1553 if ((ibqp->uobject)&&(ibqp->uobject->context)) { 1602 if ((ibqp->uobject)&&(ibqp->uobject->context)) {
1554 nes_ucontext = to_nesucontext(ibqp->uobject->context); 1603 nes_ucontext = to_nesucontext(ibqp->uobject->context);
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1560 } 1609 }
1561 if (nesqp->pbl_pbase) 1610 if (nesqp->pbl_pbase)
1562 kunmap(nesqp->page); 1611 kunmap(nesqp->page);
1612 } else {
1613 /* Clean any pending completions from the cq(s) */
1614 if (nesqp->nesscq)
1615 nes_clean_cq(nesqp, nesqp->nesscq);
1616
1617 if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
1618 nes_clean_cq(nesqp, nesqp->nesrcq);
1563 } 1619 }
1564 1620
1565 nes_rem_ref(&nesqp->ibqp); 1621 nes_rem_ref(&nesqp->ibqp);
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2884 * nes_hw_modify_qp 2940 * nes_hw_modify_qp
2885 */ 2941 */
2886int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, 2942int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2887 u32 next_iwarp_state, u32 wait_completion) 2943 u32 next_iwarp_state, u32 termlen, u32 wait_completion)
2888{ 2944{
2889 struct nes_hw_cqp_wqe *cqp_wqe; 2945 struct nes_hw_cqp_wqe *cqp_wqe;
2890 /* struct iw_cm_id *cm_id = nesqp->cm_id; */ 2946 /* struct iw_cm_id *cm_id = nesqp->cm_id; */
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2916 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); 2972 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
2917 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); 2973 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
2918 2974
2975 /* If sending a terminate message, fill in the length (in words) */
2976 if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
2977 !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
2978 termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
2979 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
2980 }
2981
2919 atomic_set(&cqp_request->refcount, 2); 2982 atomic_set(&cqp_request->refcount, 2);
2920 nes_post_cqp_request(nesdev, cqp_request); 2983 nes_post_cqp_request(nesdev, cqp_request);
2921 2984
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3086 } 3149 }
3087 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", 3150 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
3088 nesqp->hwqp.qp_id); 3151 nesqp->hwqp.qp_id);
3152 if (nesqp->term_flags)
3153 del_timer(&nesqp->terminate_timer);
3154
3089 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; 3155 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3090 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ 3156 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
3091 if (nesqp->hte_added) { 3157 if (nesqp->hte_added) {
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3163 3229
3164 if (issue_modify_qp) { 3230 if (issue_modify_qp) {
3165 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); 3231 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
3166 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); 3232 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
3167 if (ret) 3233 if (ret)
3168 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" 3234 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
3169 " failed for QP%u.\n", 3235 " failed for QP%u.\n",
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3328 head = nesqp->hwqp.sq_head; 3394 head = nesqp->hwqp.sq_head;
3329 3395
3330 while (ib_wr) { 3396 while (ib_wr) {
3397 /* Check for QP error */
3398 if (nesqp->term_flags) {
3399 err = -EINVAL;
3400 break;
3401 }
3402
3331 /* Check for SQ overflow */ 3403 /* Check for SQ overflow */
3332 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 3404 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
3333 err = -EINVAL; 3405 err = -EINVAL;
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3484 head = nesqp->hwqp.rq_head; 3556 head = nesqp->hwqp.rq_head;
3485 3557
3486 while (ib_wr) { 3558 while (ib_wr) {
3559 /* Check for QP error */
3560 if (nesqp->term_flags) {
3561 err = -EINVAL;
3562 break;
3563 }
3564
3487 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { 3565 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3488 err = -EINVAL; 3566 err = -EINVAL;
3489 break; 3567 break;
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3547{ 3625{
3548 u64 u64temp; 3626 u64 u64temp;
3549 u64 wrid; 3627 u64 wrid;
3550 /* u64 u64temp; */
3551 unsigned long flags = 0; 3628 unsigned long flags = 0;
3552 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); 3629 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
3553 struct nes_device *nesdev = nesvnic->nesdev; 3630 struct nes_device *nesdev = nesvnic->nesdev;
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3555 struct nes_qp *nesqp; 3632 struct nes_qp *nesqp;
3556 struct nes_hw_cqe cqe; 3633 struct nes_hw_cqe cqe;
3557 u32 head; 3634 u32 head;
3558 u32 wq_tail; 3635 u32 wq_tail = 0;
3559 u32 cq_size; 3636 u32 cq_size;
3560 u32 cqe_count = 0; 3637 u32 cqe_count = 0;
3561 u32 wqe_index; 3638 u32 wqe_index;
3562 u32 u32temp; 3639 u32 u32temp;
3563 /* u32 counter; */ 3640 u32 move_cq_head = 1;
3641 u32 err_code;
3564 3642
3565 nes_debug(NES_DBG_CQ, "\n"); 3643 nes_debug(NES_DBG_CQ, "\n");
3566 3644
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3570 cq_size = nescq->hw_cq.cq_size; 3648 cq_size = nescq->hw_cq.cq_size;
3571 3649
3572 while (cqe_count < num_entries) { 3650 while (cqe_count < num_entries) {
3573 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & 3651 if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3574 NES_CQE_VALID) { 3652 NES_CQE_VALID) == 0)
3575 /* 3653 break;
3576 * Make sure we read CQ entry contents *after* 3654
3577 * we've checked the valid bit. 3655 /*
3578 */ 3656 * Make sure we read CQ entry contents *after*
3579 rmb(); 3657 * we've checked the valid bit.
3580 3658 */
3581 cqe = nescq->hw_cq.cq_vbase[head]; 3659 rmb();
3582 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 3660
3583 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); 3661 cqe = nescq->hw_cq.cq_vbase[head];
3584 wqe_index = u32temp & 3662 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3585 (nesdev->nesadapter->max_qp_wr - 1); 3663 wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
3586 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); 3664 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
3587 /* parse CQE, get completion context from WQE (either rq or sq */ 3665 /* parse CQE, get completion context from WQE (either rq or sq) */
3588 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | 3666 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
3589 ((u64)u32temp); 3667 ((u64)u32temp);
3590 nesqp = *((struct nes_qp **)&u64temp); 3668
3669 if (u64temp) {
3670 nesqp = (struct nes_qp *)(unsigned long)u64temp;
3591 memset(entry, 0, sizeof *entry); 3671 memset(entry, 0, sizeof *entry);
3592 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { 3672 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
3593 entry->status = IB_WC_SUCCESS; 3673 entry->status = IB_WC_SUCCESS;
3594 } else { 3674 } else {
3595 entry->status = IB_WC_WR_FLUSH_ERR; 3675 err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
3676 if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
3677 entry->status = err_code & 0x0000ffff;
3678
3679 /* The rest of the cqe's will be marked as flushed */
3680 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
3681 cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
3682 NES_IWARP_CQE_MINOR_FLUSH);
3683 } else
3684 entry->status = IB_WC_WR_FLUSH_ERR;
3596 } 3685 }
3597 3686
3598 entry->qp = &nesqp->ibqp; 3687 entry->qp = &nesqp->ibqp;
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3601 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { 3690 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
3602 if (nesqp->skip_lsmm) { 3691 if (nesqp->skip_lsmm) {
3603 nesqp->skip_lsmm = 0; 3692 nesqp->skip_lsmm = 0;
3604 wq_tail = nesqp->hwqp.sq_tail++; 3693 nesqp->hwqp.sq_tail++;
3605 } 3694 }
3606 3695
3607 /* Working on a SQ Completion*/ 3696 /* Working on a SQ Completion*/
3608 wq_tail = wqe_index; 3697 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
3609 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3610 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
3611 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | 3698 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
3612 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. 3699 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
3613 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); 3700 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
3614 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3701 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3615 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); 3702 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
3616 3703
3617 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3704 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3618 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { 3705 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
3619 case NES_IWARP_SQ_OP_RDMAW: 3706 case NES_IWARP_SQ_OP_RDMAW:
3620 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); 3707 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3623 case NES_IWARP_SQ_OP_RDMAR: 3710 case NES_IWARP_SQ_OP_RDMAR:
3624 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); 3711 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
3625 entry->opcode = IB_WC_RDMA_READ; 3712 entry->opcode = IB_WC_RDMA_READ;
3626 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3713 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3627 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); 3714 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
3628 break; 3715 break;
3629 case NES_IWARP_SQ_OP_SENDINV: 3716 case NES_IWARP_SQ_OP_SENDINV:
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3634 entry->opcode = IB_WC_SEND; 3721 entry->opcode = IB_WC_SEND;
3635 break; 3722 break;
3636 } 3723 }
3724
3725 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3726 if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
3727 move_cq_head = 0;
3728 wq_tail = nesqp->hwqp.sq_tail;
3729 }
3637 } else { 3730 } else {
3638 /* Working on a RQ Completion*/ 3731 /* Working on a RQ Completion*/
3639 wq_tail = wqe_index;
3640 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3641 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); 3732 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
3642 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | 3733 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
3643 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); 3734 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
3644 entry->opcode = IB_WC_RECV; 3735 entry->opcode = IB_WC_RECV;
3736
3737 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3738 if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
3739 move_cq_head = 0;
3740 wq_tail = nesqp->hwqp.rq_tail;
3741 }
3645 } 3742 }
3743
3646 entry->wr_id = wrid; 3744 entry->wr_id = wrid;
3745 entry++;
3746 cqe_count++;
3747 }
3647 3748
3749 if (move_cq_head) {
3750 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
3648 if (++head >= cq_size) 3751 if (++head >= cq_size)
3649 head = 0; 3752 head = 0;
3650 cqe_count++;
3651 nescq->polled_completions++; 3753 nescq->polled_completions++;
3754
3652 if ((nescq->polled_completions > (cq_size / 2)) || 3755 if ((nescq->polled_completions > (cq_size / 2)) ||
3653 (nescq->polled_completions == 255)) { 3756 (nescq->polled_completions == 255)) {
3654 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" 3757 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
3655 " are pending %u of %u.\n", 3758 " are pending %u of %u.\n",
3656 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); 3759 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
3657 nes_write32(nesdev->regs+NES_CQE_ALLOC, 3760 nes_write32(nesdev->regs+NES_CQE_ALLOC,
3658 nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); 3761 nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
3659 nescq->polled_completions = 0; 3762 nescq->polled_completions = 0;
3660 } 3763 }
3661 entry++; 3764 } else {
3662 } else 3765 /* Update the wqe index and set status to flush */
3663 break; 3766 wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3767 wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
3768 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
3769 cpu_to_le32(wqe_index);
3770 move_cq_head = 1; /* ready for next pass */
3771 }
3664 } 3772 }
3665 3773
3666 if (nescq->polled_completions) { 3774 if (nescq->polled_completions) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 41c07f29f7c9..89822d75f82e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -40,6 +40,10 @@ struct nes_device;
40#define NES_MAX_USER_DB_REGIONS 4096 40#define NES_MAX_USER_DB_REGIONS 4096
41#define NES_MAX_USER_WQ_REGIONS 4096 41#define NES_MAX_USER_WQ_REGIONS 4096
42 42
43#define NES_TERM_SENT 0x01
44#define NES_TERM_RCVD 0x02
45#define NES_TERM_DONE 0x04
46
43struct nes_ucontext { 47struct nes_ucontext {
44 struct ib_ucontext ibucontext; 48 struct ib_ucontext ibucontext;
45 struct nes_device *nesdev; 49 struct nes_device *nesdev;
@@ -119,6 +123,11 @@ struct nes_wq {
119 spinlock_t lock; 123 spinlock_t lock;
120}; 124};
121 125
126struct disconn_work {
127 struct work_struct work;
128 struct nes_qp *nesqp;
129};
130
122struct iw_cm_id; 131struct iw_cm_id;
123struct ietf_mpa_frame; 132struct ietf_mpa_frame;
124 133
@@ -127,7 +136,6 @@ struct nes_qp {
127 void *allocated_buffer; 136 void *allocated_buffer;
128 struct iw_cm_id *cm_id; 137 struct iw_cm_id *cm_id;
129 struct workqueue_struct *wq; 138 struct workqueue_struct *wq;
130 struct work_struct disconn_work;
131 struct nes_cq *nesscq; 139 struct nes_cq *nesscq;
132 struct nes_cq *nesrcq; 140 struct nes_cq *nesrcq;
133 struct nes_pd *nespd; 141 struct nes_pd *nespd;
@@ -155,9 +163,13 @@ struct nes_qp {
155 void *pbl_vbase; 163 void *pbl_vbase;
156 dma_addr_t pbl_pbase; 164 dma_addr_t pbl_pbase;
157 struct page *page; 165 struct page *page;
166 struct timer_list terminate_timer;
167 enum ib_event_type terminate_eventtype;
158 wait_queue_head_t kick_waitq; 168 wait_queue_head_t kick_waitq;
159 u16 in_disconnect; 169 u16 in_disconnect;
160 u16 private_data_len; 170 u16 private_data_len;
171 u16 term_sq_flush_code;
172 u16 term_rq_flush_code;
161 u8 active_conn; 173 u8 active_conn;
162 u8 skip_lsmm; 174 u8 skip_lsmm;
163 u8 user_mode; 175 u8 user_mode;
@@ -165,7 +177,7 @@ struct nes_qp {
165 u8 hw_iwarp_state; 177 u8 hw_iwarp_state;
166 u8 flush_issued; 178 u8 flush_issued;
167 u8 hw_tcp_state; 179 u8 hw_tcp_state;
168 u8 disconn_pending; 180 u8 term_flags;
169 u8 destroyed; 181 u8 destroyed;
170}; 182};
171#endif /* NES_VERBS_H */ 183#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 181b1f32325f..8f4b4fca2a1d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -31,7 +31,6 @@
31 */ 31 */
32 32
33#include <rdma/ib_cm.h> 33#include <rdma/ib_cm.h>
34#include <rdma/ib_cache.h>
35#include <net/dst.h> 34#include <net/dst.h>
36#include <net/icmp.h> 35#include <net/icmp.h>
37#include <linux/icmpv6.h> 36#include <linux/icmpv6.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e7e5adf84e84..e35f4a0ea9d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -36,7 +36,6 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38 38
39#include <rdma/ib_cache.h>
40#include <linux/ip.h> 39#include <linux/ip.h>
41#include <linux/tcp.h> 40#include <linux/tcp.h>
42 41
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e319d91f60a6..2bf5116deec4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
604 skb_queue_len(&neigh->queue)); 604 skb_queue_len(&neigh->queue));
605 goto err_drop; 605 goto err_drop;
606 } 606 }
607 } else 607 } else {
608 spin_unlock_irqrestore(&priv->lock, flags);
608 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); 609 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
610 return;
611 }
609 } else { 612 } else {
610 neigh->ah = NULL; 613 neigh->ah = NULL;
611 614
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
688 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 691 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
689 be16_to_cpu(path->pathrec.dlid)); 692 be16_to_cpu(path->pathrec.dlid));
690 693
694 spin_unlock_irqrestore(&priv->lock, flags);
691 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); 695 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
696 return;
692 } else if ((path->query || !path_rec_start(dev, path)) && 697 } else if ((path->query || !path_rec_start(dev, path)) &&
693 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 698 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
694 /* put pseudoheader back on for next time */ 699 /* put pseudoheader back on for next time */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a0e97532e714..25874fc680c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -720,7 +720,9 @@ out:
720 } 720 }
721 } 721 }
722 722
723 spin_unlock_irqrestore(&priv->lock, flags);
723 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 724 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
725 return;
724 } 726 }
725 727
726unlock: 728unlock:
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
758 } 760 }
759} 761}
760 762
763static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
764 const u8 *broadcast)
765{
766 if (addrlen != INFINIBAND_ALEN)
767 return 0;
768 /* reserved QPN, prefix, scope */
769 if (memcmp(addr, broadcast, 6))
770 return 0;
771 /* signature lower, pkey */
772 if (memcmp(addr + 7, broadcast + 7, 3))
773 return 0;
774 return 1;
775}
776
761void ipoib_mcast_restart_task(struct work_struct *work) 777void ipoib_mcast_restart_task(struct work_struct *work)
762{ 778{
763 struct ipoib_dev_priv *priv = 779 struct ipoib_dev_priv *priv =
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
791 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 807 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
792 union ib_gid mgid; 808 union ib_gid mgid;
793 809
810 if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
811 mclist->dmi_addrlen,
812 dev->broadcast))
813 continue;
814
794 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 815 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
795 816
796 mcast = __ipoib_mcast_find(dev, &mgid); 817 mcast = __ipoib_mcast_find(dev, &mgid);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 95fe0452dae4..6c6a09b1c0fe 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
880}; 880};
881 881
882/* 882/*
883 * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
884 * release for their volume buttons
885 */
886static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
887 0xae, 0xb0, -1U
888};
889
890/*
883 * Samsung NC10,NC20 with Fn+F? key release not working 891 * Samsung NC10,NC20 with Fn+F? key release not working
884 */ 892 */
885static unsigned int atkbd_samsung_forced_release_keys[] = { 893static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -1537,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1537 .driver_data = atkbd_hp_zv6100_forced_release_keys, 1545 .driver_data = atkbd_hp_zv6100_forced_release_keys,
1538 }, 1546 },
1539 { 1547 {
1548 .ident = "HP Presario R4000",
1549 .matches = {
1550 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1551 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
1552 },
1553 .callback = atkbd_setup_forced_release,
1554 .driver_data = atkbd_hp_r4000_forced_release_keys,
1555 },
1556 {
1557 .ident = "HP Presario R4100",
1558 .matches = {
1559 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1560 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
1561 },
1562 .callback = atkbd_setup_forced_release,
1563 .driver_data = atkbd_hp_r4000_forced_release_keys,
1564 },
1565 {
1566 .ident = "HP Presario R4200",
1567 .matches = {
1568 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1569 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
1570 },
1571 .callback = atkbd_setup_forced_release,
1572 .driver_data = atkbd_hp_r4000_forced_release_keys,
1573 },
1574 {
1540 .ident = "Inventec Symphony", 1575 .ident = "Inventec Symphony",
1541 .matches = { 1576 .matches = {
1542 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), 1577 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ae04d8a494e5..ccbf23ece8e3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
382 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), 382 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
383 }, 383 },
384 }, 384 },
385 {
386 .ident = "Acer Aspire 5536",
387 .matches = {
388 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
389 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
390 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
391 },
392 },
385 { } 393 { }
386}; 394};
387 395
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3710ff88fc10..556acff3952f 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store,
171 */ 171 */
172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); 172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
173 173
174 return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
175 error);
176}
177
178int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
179 unsigned long chunk_size_ulong,
180 char **error)
181{
174 /* Check chunk_size is a power of 2 */ 182 /* Check chunk_size is a power of 2 */
175 if (!is_power_of_2(chunk_size_ulong)) { 183 if (!is_power_of_2(chunk_size_ulong)) {
176 *error = "Chunk size is not a power of 2"; 184 *error = "Chunk size is not a power of 2";
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store,
183 return -EINVAL; 191 return -EINVAL;
184 } 192 }
185 193
194 if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
195 *error = "Chunk size is too high";
196 return -EINVAL;
197 }
198
186 store->chunk_size = chunk_size_ulong; 199 store->chunk_size = chunk_size_ulong;
187 store->chunk_mask = chunk_size_ulong - 1; 200 store->chunk_mask = chunk_size_ulong - 1;
188 store->chunk_shift = ffs(chunk_size_ulong) - 1; 201 store->chunk_shift = ffs(chunk_size_ulong) - 1;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 2442c8c07898..812c71872ba0 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
168int dm_exception_store_type_register(struct dm_exception_store_type *type); 168int dm_exception_store_type_register(struct dm_exception_store_type *type);
169int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 169int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
170 170
171int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
172 unsigned long chunk_size_ulong,
173 char **error);
174
171int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
172 unsigned *args_used, 176 unsigned *args_used,
173 struct dm_exception_store **store); 177 struct dm_exception_store **store);
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index e69b96560997..652bd33109e3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -21,6 +21,7 @@ struct log_c {
21 struct dm_target *ti; 21 struct dm_target *ti;
22 uint32_t region_size; 22 uint32_t region_size;
23 region_t region_count; 23 region_t region_count;
24 uint64_t luid;
24 char uuid[DM_UUID_LEN]; 25 char uuid[DM_UUID_LEN];
25 26
26 char *usr_argv_str; 27 char *usr_argv_str;
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
63 * restored. 64 * restored.
64 */ 65 */
65retry: 66retry:
66 r = dm_consult_userspace(uuid, request_type, data, 67 r = dm_consult_userspace(uuid, lc->luid, request_type, data,
67 data_size, rdata, rdata_size); 68 data_size, rdata, rdata_size);
68 69
69 if (r != -ESRCH) 70 if (r != -ESRCH)
@@ -74,14 +75,15 @@ retry:
74 set_current_state(TASK_INTERRUPTIBLE); 75 set_current_state(TASK_INTERRUPTIBLE);
75 schedule_timeout(2*HZ); 76 schedule_timeout(2*HZ);
76 DMWARN("Attempting to contact userspace log server..."); 77 DMWARN("Attempting to contact userspace log server...");
77 r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, 78 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
79 lc->usr_argv_str,
78 strlen(lc->usr_argv_str) + 1, 80 strlen(lc->usr_argv_str) + 1,
79 NULL, NULL); 81 NULL, NULL);
80 if (!r) 82 if (!r)
81 break; 83 break;
82 } 84 }
83 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); 85 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
84 r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, 86 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
85 0, NULL, NULL); 87 0, NULL, NULL);
86 if (!r) 88 if (!r)
87 goto retry; 89 goto retry;
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti,
111 return -ENOMEM; 113 return -ENOMEM;
112 } 114 }
113 115
114 for (i = 0, str_size = 0; i < argc; i++) 116 str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
115 str_size += sprintf(str + str_size, "%s ", argv[i]); 117 for (i = 0; i < argc; i++)
116 str_size += sprintf(str + str_size, "%llu", 118 str_size += sprintf(str + str_size, " %s", argv[i]);
117 (unsigned long long)ti->len);
118 119
119 *ctr_str = str; 120 *ctr_str = str;
120 return str_size; 121 return str_size;
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
154 return -ENOMEM; 155 return -ENOMEM;
155 } 156 }
156 157
158 /* The ptr value is sufficient for local unique id */
159 lc->luid = (uint64_t)lc;
160
157 lc->ti = ti; 161 lc->ti = ti;
158 162
159 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { 163 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
173 } 177 }
174 178
175 /* Send table string */ 179 /* Send table string */
176 r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, 180 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
177 ctr_str, str_size, NULL, NULL); 181 ctr_str, str_size, NULL, NULL);
178 182
179 if (r == -ESRCH) { 183 if (r == -ESRCH) {
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
183 187
184 /* Since the region size does not change, get it now */ 188 /* Since the region size does not change, get it now */
185 rdata_size = sizeof(rdata); 189 rdata_size = sizeof(rdata);
186 r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, 190 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
187 NULL, 0, (char *)&rdata, &rdata_size); 191 NULL, 0, (char *)&rdata, &rdata_size);
188 192
189 if (r) { 193 if (r) {
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
212 int r; 216 int r;
213 struct log_c *lc = log->context; 217 struct log_c *lc = log->context;
214 218
215 r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, 219 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
216 NULL, 0, 220 NULL, 0,
217 NULL, NULL); 221 NULL, NULL);
218 222
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
227 int r; 231 int r;
228 struct log_c *lc = log->context; 232 struct log_c *lc = log->context;
229 233
230 r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, 234 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
231 NULL, 0, 235 NULL, 0,
232 NULL, NULL); 236 NULL, NULL);
233 237
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
239 int r; 243 int r;
240 struct log_c *lc = log->context; 244 struct log_c *lc = log->context;
241 245
242 r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, 246 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
243 NULL, 0, 247 NULL, 0,
244 NULL, NULL); 248 NULL, NULL);
245 249
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log)
252 struct log_c *lc = log->context; 256 struct log_c *lc = log->context;
253 257
254 lc->in_sync_hint = 0; 258 lc->in_sync_hint = 0;
255 r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, 259 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
256 NULL, 0, 260 NULL, 0,
257 NULL, NULL); 261 NULL, NULL);
258 262
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
561 char *result, unsigned maxlen) 565 char *result, unsigned maxlen)
562{ 566{
563 int r = 0; 567 int r = 0;
568 char *table_args;
564 size_t sz = (size_t)maxlen; 569 size_t sz = (size_t)maxlen;
565 struct log_c *lc = log->context; 570 struct log_c *lc = log->context;
566 571
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
577 break; 582 break;
578 case STATUSTYPE_TABLE: 583 case STATUSTYPE_TABLE:
579 sz = 0; 584 sz = 0;
580 DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, 585 table_args = strchr(lc->usr_argv_str, ' ');
581 lc->uuid, lc->usr_argv_str); 586 BUG_ON(!table_args); /* There will always be a ' ' */
587 table_args++;
588
589 DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
590 lc->uuid, table_args);
582 break; 591 break;
583 } 592 }
584 return (r) ? 0 : (int)sz; 593 return (r) ? 0 : (int)sz;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 8ce74d95ae4d..ba0edad2d048 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data)
147 147
148/** 148/**
149 * dm_consult_userspace 149 * dm_consult_userspace
150 * @uuid: log's uuid (must be DM_UUID_LEN in size) 150 * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
151 * @luid: log's local unique identifier
151 * @request_type: found in include/linux/dm-log-userspace.h 152 * @request_type: found in include/linux/dm-log-userspace.h
152 * @data: data to tx to the server 153 * @data: data to tx to the server
153 * @data_size: size of data in bytes 154 * @data_size: size of data in bytes
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data)
163 * 164 *
164 * Returns: 0 on success, -EXXX on failure 165 * Returns: 0 on success, -EXXX on failure
165 **/ 166 **/
166int dm_consult_userspace(const char *uuid, int request_type, 167int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
167 char *data, size_t data_size, 168 char *data, size_t data_size,
168 char *rdata, size_t *rdata_size) 169 char *rdata, size_t *rdata_size)
169{ 170{
@@ -190,6 +191,7 @@ resend:
190 191
191 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 192 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
192 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 193 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
194 tfr->luid = luid;
193 tfr->seq = dm_ulog_seq++; 195 tfr->seq = dm_ulog_seq++;
194 196
195 /* 197 /*
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
index c26d8e4e2710..04ee874f9153 100644
--- a/drivers/md/dm-log-userspace-transfer.h
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -11,7 +11,7 @@
11 11
12int dm_ulog_tfr_init(void); 12int dm_ulog_tfr_init(void);
13void dm_ulog_tfr_exit(void); 13void dm_ulog_tfr_exit(void);
14int dm_consult_userspace(const char *uuid, int request_type, 14int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
15 char *data, size_t data_size, 15 char *data, size_t data_size,
16 char *rdata, size_t *rdata_size); 16 char *rdata, size_t *rdata_size);
17 17
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9726577cde49..33f179e66bf5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
648 */ 648 */
649 dm_rh_inc_pending(ms->rh, &sync); 649 dm_rh_inc_pending(ms->rh, &sync);
650 dm_rh_inc_pending(ms->rh, &nosync); 650 dm_rh_inc_pending(ms->rh, &nosync);
651 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; 651
652 /*
653 * If the flush fails on a previous call and succeeds here,
654 * we must not reset the log_failure variable. We need
655 * userspace interaction to do that.
656 */
657 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
652 658
653 /* 659 /*
654 * Dispatch io. 660 * Dispatch io.
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 6e3fe4f14934..d5b2e08750d5 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -106,6 +106,13 @@ struct pstore {
106 void *zero_area; 106 void *zero_area;
107 107
108 /* 108 /*
109 * An area used for header. The header can be written
110 * concurrently with metadata (when invalidating the snapshot),
111 * so it needs a separate buffer.
112 */
113 void *header_area;
114
115 /*
109 * Used to keep track of which metadata area the data in 116 * Used to keep track of which metadata area the data in
110 * 'chunk' refers to. 117 * 'chunk' refers to.
111 */ 118 */
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
148 */ 155 */
149 ps->area = vmalloc(len); 156 ps->area = vmalloc(len);
150 if (!ps->area) 157 if (!ps->area)
151 return r; 158 goto err_area;
152 159
153 ps->zero_area = vmalloc(len); 160 ps->zero_area = vmalloc(len);
154 if (!ps->zero_area) { 161 if (!ps->zero_area)
155 vfree(ps->area); 162 goto err_zero_area;
156 return r;
157 }
158 memset(ps->zero_area, 0, len); 163 memset(ps->zero_area, 0, len);
159 164
165 ps->header_area = vmalloc(len);
166 if (!ps->header_area)
167 goto err_header_area;
168
160 return 0; 169 return 0;
170
171err_header_area:
172 vfree(ps->zero_area);
173
174err_zero_area:
175 vfree(ps->area);
176
177err_area:
178 return r;
161} 179}
162 180
163static void free_area(struct pstore *ps) 181static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
169 if (ps->zero_area) 187 if (ps->zero_area)
170 vfree(ps->zero_area); 188 vfree(ps->zero_area);
171 ps->zero_area = NULL; 189 ps->zero_area = NULL;
190
191 if (ps->header_area)
192 vfree(ps->header_area);
193 ps->header_area = NULL;
172} 194}
173 195
174struct mdata_req { 196struct mdata_req {
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work)
188/* 210/*
189 * Read or write a chunk aligned and sized block of data from a device. 211 * Read or write a chunk aligned and sized block of data from a device.
190 */ 212 */
191static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) 213static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
214 int metadata)
192{ 215{
193 struct dm_io_region where = { 216 struct dm_io_region where = {
194 .bdev = ps->store->cow->bdev, 217 .bdev = ps->store->cow->bdev,
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
198 struct dm_io_request io_req = { 221 struct dm_io_request io_req = {
199 .bi_rw = rw, 222 .bi_rw = rw,
200 .mem.type = DM_IO_VMA, 223 .mem.type = DM_IO_VMA,
201 .mem.ptr.vma = ps->area, 224 .mem.ptr.vma = area,
202 .client = ps->io_client, 225 .client = ps->io_client,
203 .notify.fn = NULL, 226 .notify.fn = NULL,
204 }; 227 };
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw)
240 263
241 chunk = area_location(ps, ps->current_area); 264 chunk = area_location(ps, ps->current_area);
242 265
243 r = chunk_io(ps, chunk, rw, 0); 266 r = chunk_io(ps, ps->area, chunk, rw, 0);
244 if (r) 267 if (r)
245 return r; 268 return r;
246 269
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps)
254 277
255static int zero_disk_area(struct pstore *ps, chunk_t area) 278static int zero_disk_area(struct pstore *ps, chunk_t area)
256{ 279{
257 struct dm_io_region where = { 280 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
258 .bdev = ps->store->cow->bdev,
259 .sector = ps->store->chunk_size * area_location(ps, area),
260 .count = ps->store->chunk_size,
261 };
262 struct dm_io_request io_req = {
263 .bi_rw = WRITE,
264 .mem.type = DM_IO_VMA,
265 .mem.ptr.vma = ps->zero_area,
266 .client = ps->io_client,
267 .notify.fn = NULL,
268 };
269
270 return dm_io(&io_req, 1, &where, NULL);
271} 281}
272 282
273static int read_header(struct pstore *ps, int *new_snapshot) 283static int read_header(struct pstore *ps, int *new_snapshot)
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
276 struct disk_header *dh; 286 struct disk_header *dh;
277 chunk_t chunk_size; 287 chunk_t chunk_size;
278 int chunk_size_supplied = 1; 288 int chunk_size_supplied = 1;
289 char *chunk_err;
279 290
280 /* 291 /*
281 * Use default chunk size (or hardsect_size, if larger) if none supplied 292 * Use default chunk size (or hardsect_size, if larger) if none supplied
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
297 if (r) 308 if (r)
298 return r; 309 return r;
299 310
300 r = chunk_io(ps, 0, READ, 1); 311 r = chunk_io(ps, ps->header_area, 0, READ, 1);
301 if (r) 312 if (r)
302 goto bad; 313 goto bad;
303 314
304 dh = (struct disk_header *) ps->area; 315 dh = ps->header_area;
305 316
306 if (le32_to_cpu(dh->magic) == 0) { 317 if (le32_to_cpu(dh->magic) == 0) {
307 *new_snapshot = 1; 318 *new_snapshot = 1;
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot)
319 ps->version = le32_to_cpu(dh->version); 330 ps->version = le32_to_cpu(dh->version);
320 chunk_size = le32_to_cpu(dh->chunk_size); 331 chunk_size = le32_to_cpu(dh->chunk_size);
321 332
322 if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) 333 if (ps->store->chunk_size == chunk_size)
323 return 0; 334 return 0;
324 335
325 DMWARN("chunk size %llu in device metadata overrides " 336 if (chunk_size_supplied)
326 "table chunk size of %llu.", 337 DMWARN("chunk size %llu in device metadata overrides "
327 (unsigned long long)chunk_size, 338 "table chunk size of %llu.",
328 (unsigned long long)ps->store->chunk_size); 339 (unsigned long long)chunk_size,
340 (unsigned long long)ps->store->chunk_size);
329 341
330 /* We had a bogus chunk_size. Fix stuff up. */ 342 /* We had a bogus chunk_size. Fix stuff up. */
331 free_area(ps); 343 free_area(ps);
332 344
333 ps->store->chunk_size = chunk_size; 345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
334 ps->store->chunk_mask = chunk_size - 1; 346 &chunk_err);
335 ps->store->chunk_shift = ffs(chunk_size) - 1; 347 if (r) {
348 DMERR("invalid on-disk chunk size %llu: %s.",
349 (unsigned long long)chunk_size, chunk_err);
350 return r;
351 }
336 352
337 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), 353 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
338 ps->io_client); 354 ps->io_client);
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps)
351{ 367{
352 struct disk_header *dh; 368 struct disk_header *dh;
353 369
354 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); 370 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
355 371
356 dh = (struct disk_header *) ps->area; 372 dh = ps->header_area;
357 dh->magic = cpu_to_le32(SNAP_MAGIC); 373 dh->magic = cpu_to_le32(SNAP_MAGIC);
358 dh->valid = cpu_to_le32(ps->valid); 374 dh->valid = cpu_to_le32(ps->valid);
359 dh->version = cpu_to_le32(ps->version); 375 dh->version = cpu_to_le32(ps->version);
360 dh->chunk_size = cpu_to_le32(ps->store->chunk_size); 376 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
361 377
362 return chunk_io(ps, 0, WRITE, 1); 378 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
363} 379}
364 380
365/* 381/*
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store,
679 ps->valid = 1; 695 ps->valid = 1;
680 ps->version = SNAPSHOT_DISK_VERSION; 696 ps->version = SNAPSHOT_DISK_VERSION;
681 ps->area = NULL; 697 ps->area = NULL;
698 ps->zero_area = NULL;
699 ps->header_area = NULL;
682 ps->next_free = 2; /* skipping the header and first area */ 700 ps->next_free = 2; /* skipping the header and first area */
683 ps->current_committed = 0; 701 ps->current_committed = 0;
684 702
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d573165cd2b7..57f1bf7f3b7a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1176 return 0; 1176 return 0;
1177} 1177}
1178 1178
1179static int snapshot_iterate_devices(struct dm_target *ti,
1180 iterate_devices_callout_fn fn, void *data)
1181{
1182 struct dm_snapshot *snap = ti->private;
1183
1184 return fn(ti, snap->origin, 0, ti->len, data);
1185}
1186
1187
1179/*----------------------------------------------------------------- 1188/*-----------------------------------------------------------------
1180 * Origin methods 1189 * Origin methods
1181 *---------------------------------------------------------------*/ 1190 *---------------------------------------------------------------*/
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1410 return 0; 1419 return 0;
1411} 1420}
1412 1421
1422static int origin_iterate_devices(struct dm_target *ti,
1423 iterate_devices_callout_fn fn, void *data)
1424{
1425 struct dm_dev *dev = ti->private;
1426
1427 return fn(ti, dev, 0, ti->len, data);
1428}
1429
1413static struct target_type origin_target = { 1430static struct target_type origin_target = {
1414 .name = "snapshot-origin", 1431 .name = "snapshot-origin",
1415 .version = {1, 6, 0}, 1432 .version = {1, 7, 0},
1416 .module = THIS_MODULE, 1433 .module = THIS_MODULE,
1417 .ctr = origin_ctr, 1434 .ctr = origin_ctr,
1418 .dtr = origin_dtr, 1435 .dtr = origin_dtr,
1419 .map = origin_map, 1436 .map = origin_map,
1420 .resume = origin_resume, 1437 .resume = origin_resume,
1421 .status = origin_status, 1438 .status = origin_status,
1439 .iterate_devices = origin_iterate_devices,
1422}; 1440};
1423 1441
1424static struct target_type snapshot_target = { 1442static struct target_type snapshot_target = {
1425 .name = "snapshot", 1443 .name = "snapshot",
1426 .version = {1, 6, 0}, 1444 .version = {1, 7, 0},
1427 .module = THIS_MODULE, 1445 .module = THIS_MODULE,
1428 .ctr = snapshot_ctr, 1446 .ctr = snapshot_ctr,
1429 .dtr = snapshot_dtr, 1447 .dtr = snapshot_dtr,
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = {
1431 .end_io = snapshot_end_io, 1449 .end_io = snapshot_end_io,
1432 .resume = snapshot_resume, 1450 .resume = snapshot_resume,
1433 .status = snapshot_status, 1451 .status = snapshot_status,
1452 .iterate_devices = snapshot_iterate_devices,
1434}; 1453};
1435 1454
1436static int __init dm_snapshot_init(void) 1455static int __init dm_snapshot_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4e0e5937e42a..3e563d251733 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti,
329 return ret; 329 return ret;
330} 330}
331 331
332static void stripe_io_hints(struct dm_target *ti,
333 struct queue_limits *limits)
334{
335 struct stripe_c *sc = ti->private;
336 unsigned chunk_size = (sc->chunk_mask + 1) << 9;
337
338 blk_limits_io_min(limits, chunk_size);
339 limits->io_opt = chunk_size * sc->stripes;
340}
341
332static struct target_type stripe_target = { 342static struct target_type stripe_target = {
333 .name = "striped", 343 .name = "striped",
334 .version = {1, 2, 0}, 344 .version = {1, 3, 0},
335 .module = THIS_MODULE, 345 .module = THIS_MODULE,
336 .ctr = stripe_ctr, 346 .ctr = stripe_ctr,
337 .dtr = stripe_dtr, 347 .dtr = stripe_dtr,
@@ -339,6 +349,7 @@ static struct target_type stripe_target = {
339 .end_io = stripe_end_io, 349 .end_io = stripe_end_io,
340 .status = stripe_status, 350 .status = stripe_status,
341 .iterate_devices = stripe_iterate_devices, 351 .iterate_devices = stripe_iterate_devices,
352 .io_hints = stripe_io_hints,
342}; 353};
343 354
344int __init dm_stripe_init(void) 355int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d952b3441913..1a6cb3c7822e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
343} 343}
344 344
345/* 345/*
346 * If possible, this checks an area of a destination device is valid. 346 * If possible, this checks an area of a destination device is invalid.
347 */ 347 */
348static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, 348static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
349 sector_t start, sector_t len, void *data) 349 sector_t start, sector_t len, void *data)
350{ 350{
351 struct queue_limits *limits = data; 351 struct queue_limits *limits = data;
352 struct block_device *bdev = dev->bdev; 352 struct block_device *bdev = dev->bdev;
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
357 char b[BDEVNAME_SIZE]; 357 char b[BDEVNAME_SIZE];
358 358
359 if (!dev_size) 359 if (!dev_size)
360 return 1; 360 return 0;
361 361
362 if ((start >= dev_size) || (start + len > dev_size)) { 362 if ((start >= dev_size) || (start + len > dev_size)) {
363 DMWARN("%s: %s too small for target", 363 DMWARN("%s: %s too small for target: "
364 dm_device_name(ti->table->md), bdevname(bdev, b)); 364 "start=%llu, len=%llu, dev_size=%llu",
365 return 0; 365 dm_device_name(ti->table->md), bdevname(bdev, b),
366 (unsigned long long)start,
367 (unsigned long long)len,
368 (unsigned long long)dev_size);
369 return 1;
366 } 370 }
367 371
368 if (logical_block_size_sectors <= 1) 372 if (logical_block_size_sectors <= 1)
369 return 1; 373 return 0;
370 374
371 if (start & (logical_block_size_sectors - 1)) { 375 if (start & (logical_block_size_sectors - 1)) {
372 DMWARN("%s: start=%llu not aligned to h/w " 376 DMWARN("%s: start=%llu not aligned to h/w "
373 "logical block size %hu of %s", 377 "logical block size %u of %s",
374 dm_device_name(ti->table->md), 378 dm_device_name(ti->table->md),
375 (unsigned long long)start, 379 (unsigned long long)start,
376 limits->logical_block_size, bdevname(bdev, b)); 380 limits->logical_block_size, bdevname(bdev, b));
377 return 0; 381 return 1;
378 } 382 }
379 383
380 if (len & (logical_block_size_sectors - 1)) { 384 if (len & (logical_block_size_sectors - 1)) {
381 DMWARN("%s: len=%llu not aligned to h/w " 385 DMWARN("%s: len=%llu not aligned to h/w "
382 "logical block size %hu of %s", 386 "logical block size %u of %s",
383 dm_device_name(ti->table->md), 387 dm_device_name(ti->table->md),
384 (unsigned long long)len, 388 (unsigned long long)len,
385 limits->logical_block_size, bdevname(bdev, b)); 389 limits->logical_block_size, bdevname(bdev, b));
386 return 0; 390 return 1;
387 } 391 }
388 392
389 return 1; 393 return 0;
390} 394}
391 395
392/* 396/*
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
496 } 500 }
497 501
498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 502 if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
499 DMWARN("%s: target device %s is misaligned", 503 DMWARN("%s: target device %s is misaligned: "
500 dm_device_name(ti->table->md), bdevname(bdev, b)); 504 "physical_block_size=%u, logical_block_size=%u, "
505 "alignment_offset=%u, start=%llu",
506 dm_device_name(ti->table->md), bdevname(bdev, b),
507 q->limits.physical_block_size,
508 q->limits.logical_block_size,
509 q->limits.alignment_offset,
510 (unsigned long long) start << 9);
511
501 512
502 /* 513 /*
503 * Check if merge fn is supported. 514 * Check if merge fn is supported.
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
698 709
699 if (remaining) { 710 if (remaining) {
700 DMWARN("%s: table line %u (start sect %llu len %llu) " 711 DMWARN("%s: table line %u (start sect %llu len %llu) "
701 "not aligned to h/w logical block size %hu", 712 "not aligned to h/w logical block size %u",
702 dm_device_name(table->md), i, 713 dm_device_name(table->md), i,
703 (unsigned long long) ti->begin, 714 (unsigned long long) ti->begin,
704 (unsigned long long) ti->len, 715 (unsigned long long) ti->len,
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table,
996 ti->type->iterate_devices(ti, dm_set_device_limits, 1007 ti->type->iterate_devices(ti, dm_set_device_limits,
997 &ti_limits); 1008 &ti_limits);
998 1009
1010 /* Set I/O hints portion of queue limits */
1011 if (ti->type->io_hints)
1012 ti->type->io_hints(ti, &ti_limits);
1013
999 /* 1014 /*
1000 * Check each device area is consistent with the target's 1015 * Check each device area is consistent with the target's
1001 * overall queue limits. 1016 * overall queue limits.
1002 */ 1017 */
1003 if (!ti->type->iterate_devices(ti, device_area_is_valid, 1018 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1004 &ti_limits)) 1019 &ti_limits))
1005 return -EINVAL; 1020 return -EINVAL;
1006 1021
1007combine_limits: 1022combine_limits:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8a311ea0d441..b4845b14740d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
738 dm_put(md); 738 dm_put(md);
739} 739}
740 740
741static void free_rq_clone(struct request *clone)
742{
743 struct dm_rq_target_io *tio = clone->end_io_data;
744
745 blk_rq_unprep_clone(clone);
746 free_rq_tio(tio);
747}
748
741static void dm_unprep_request(struct request *rq) 749static void dm_unprep_request(struct request *rq)
742{ 750{
743 struct request *clone = rq->special; 751 struct request *clone = rq->special;
744 struct dm_rq_target_io *tio = clone->end_io_data;
745 752
746 rq->special = NULL; 753 rq->special = NULL;
747 rq->cmd_flags &= ~REQ_DONTPREP; 754 rq->cmd_flags &= ~REQ_DONTPREP;
748 755
749 blk_rq_unprep_clone(clone); 756 free_rq_clone(clone);
750 free_rq_tio(tio);
751} 757}
752 758
753/* 759/*
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error)
825 rq->sense_len = clone->sense_len; 831 rq->sense_len = clone->sense_len;
826 } 832 }
827 833
828 BUG_ON(clone->bio); 834 free_rq_clone(clone);
829 free_rq_tio(tio);
830 835
831 blk_end_request_all(rq, error); 836 blk_end_request_all(rq, error);
832 837
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ae5fe91867e1..10ed195c0c1c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
736 flash->partitioned = 1; 736 flash->partitioned = 1;
737 return add_mtd_partitions(&flash->mtd, parts, nr_parts); 737 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
738 } 738 }
739 } else if (data->nr_parts) 739 } else if (data && data->nr_parts)
740 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 740 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
741 data->nr_parts, data->name); 741 data->nr_parts, data->name);
742 742
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index fb86cacd5bdb..1002e1882996 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
135int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, 135int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
136 size_t *retlen, uint8_t *buf) 136 size_t *retlen, uint8_t *buf)
137{ 137{
138 loff_t mask = mtd->writesize - 1;
138 struct mtd_oob_ops ops; 139 struct mtd_oob_ops ops;
139 int res; 140 int res;
140 141
141 ops.mode = MTD_OOB_PLACE; 142 ops.mode = MTD_OOB_PLACE;
142 ops.ooboffs = offs & (mtd->writesize - 1); 143 ops.ooboffs = offs & mask;
143 ops.ooblen = len; 144 ops.ooblen = len;
144 ops.oobbuf = buf; 145 ops.oobbuf = buf;
145 ops.datbuf = NULL; 146 ops.datbuf = NULL;
146 147
147 res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 148 res = mtd->read_oob(mtd, offs & ~mask, &ops);
148 *retlen = ops.oobretlen; 149 *retlen = ops.oobretlen;
149 return res; 150 return res;
150} 151}
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
155int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, 156int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
156 size_t *retlen, uint8_t *buf) 157 size_t *retlen, uint8_t *buf)
157{ 158{
159 loff_t mask = mtd->writesize - 1;
158 struct mtd_oob_ops ops; 160 struct mtd_oob_ops ops;
159 int res; 161 int res;
160 162
161 ops.mode = MTD_OOB_PLACE; 163 ops.mode = MTD_OOB_PLACE;
162 ops.ooboffs = offs & (mtd->writesize - 1); 164 ops.ooboffs = offs & mask;
163 ops.ooblen = len; 165 ops.ooblen = len;
164 ops.oobbuf = buf; 166 ops.oobbuf = buf;
165 ops.datbuf = NULL; 167 ops.datbuf = NULL;
166 168
167 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 169 res = mtd->write_oob(mtd, offs & ~mask, &ops);
168 *retlen = ops.oobretlen; 170 *retlen = ops.oobretlen;
169 return res; 171 return res;
170} 172}
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
177static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, 179static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
178 size_t *retlen, uint8_t *buf, uint8_t *oob) 180 size_t *retlen, uint8_t *buf, uint8_t *oob)
179{ 181{
182 loff_t mask = mtd->writesize - 1;
180 struct mtd_oob_ops ops; 183 struct mtd_oob_ops ops;
181 int res; 184 int res;
182 185
183 ops.mode = MTD_OOB_PLACE; 186 ops.mode = MTD_OOB_PLACE;
184 ops.ooboffs = offs; 187 ops.ooboffs = offs & mask;
185 ops.ooblen = mtd->oobsize; 188 ops.ooblen = mtd->oobsize;
186 ops.oobbuf = oob; 189 ops.oobbuf = oob;
187 ops.datbuf = buf; 190 ops.datbuf = buf;
188 ops.len = len; 191 ops.len = len;
189 192
190 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 193 res = mtd->write_oob(mtd, offs & ~mask, &ops);
191 *retlen = ops.retlen; 194 *retlen = ops.retlen;
192 return res; 195 return res;
193} 196}
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index fb5df5c6203e..c97ab82ec743 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,6 +1286,7 @@ static int cxgb_open(struct net_device *dev)
1286 if (!other_ports) 1286 if (!other_ports)
1287 schedule_chk_task(adapter); 1287 schedule_chk_task(adapter);
1288 1288
1289 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1289 return 0; 1290 return 0;
1290} 1291}
1291 1292
@@ -1318,6 +1319,7 @@ static int cxgb_close(struct net_device *dev)
1318 if (!adapter->open_device_map) 1319 if (!adapter->open_device_map)
1319 cxgb_down(adapter); 1320 cxgb_down(adapter);
1320 1321
1322 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1321 return 0; 1323 return 0;
1322} 1324}
1323 1325
@@ -2717,7 +2719,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
2717 2719
2718 if (is_offload(adapter) && 2720 if (is_offload(adapter) &&
2719 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2721 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2720 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); 2722 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2721 offload_close(&adapter->tdev); 2723 offload_close(&adapter->tdev);
2722 } 2724 }
2723 2725
@@ -2782,7 +2784,7 @@ static void t3_resume_ports(struct adapter *adapter)
2782 } 2784 }
2783 2785
2784 if (is_offload(adapter) && !ofld_disable) 2786 if (is_offload(adapter) && !ofld_disable)
2785 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); 2787 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2786} 2788}
2787 2789
2788/* 2790/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index f9f54b57b28c..75064eea1d87 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
153 mutex_unlock(&cxgb3_db_lock); 153 mutex_unlock(&cxgb3_db_lock);
154} 154}
155 155
156void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) 156void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
157{ 157{
158 struct cxgb3_client *client; 158 struct cxgb3_client *client;
159 159
160 mutex_lock(&cxgb3_db_lock); 160 mutex_lock(&cxgb3_db_lock);
161 list_for_each_entry(client, &client_list, client_list) { 161 list_for_each_entry(client, &client_list, client_list) {
162 if (client->err_handler) 162 if (client->event_handler)
163 client->err_handler(tdev, status, error); 163 client->event_handler(tdev, event, port);
164 } 164 }
165 mutex_unlock(&cxgb3_db_lock); 165 mutex_unlock(&cxgb3_db_lock);
166} 166}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 55945f422aec..670aa62042da 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
64void cxgb3_unregister_client(struct cxgb3_client *client); 64void cxgb3_unregister_client(struct cxgb3_client *client);
65void cxgb3_add_clients(struct t3cdev *tdev); 65void cxgb3_add_clients(struct t3cdev *tdev);
66void cxgb3_remove_clients(struct t3cdev *tdev); 66void cxgb3_remove_clients(struct t3cdev *tdev);
67void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); 67void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
68 68
69typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, 69typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
70 struct sk_buff *skb, void *ctx); 70 struct sk_buff *skb, void *ctx);
71 71
72enum { 72enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP
75}; 77};
76 78
77struct cxgb3_client { 79struct cxgb3_client {
@@ -82,7 +84,7 @@ struct cxgb3_client {
82 int (*redirect)(void *ctx, struct dst_entry *old, 84 int (*redirect)(void *ctx, struct dst_entry *old,
83 struct dst_entry *new, struct l2t_entry *l2t); 85 struct dst_entry *new, struct l2t_entry *l2t);
84 struct list_head client_list; 86 struct list_head client_list;
85 void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); 87 void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
86}; 88};
87 89
88/* 90/*
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e212f2c5448b..a00ec639c380 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -491,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev)
491 491
492 dev_set_drvdata(&ofdev->dev, NULL); 492 dev_set_drvdata(&ofdev->dev, NULL);
493 493
494 unregister_netdev(priv->ndev);
494 iounmap(priv->regs); 495 iounmap(priv->regs);
495 free_netdev(priv->ndev); 496 free_netdev(priv->ndev);
496 497
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index ac57b6a42c6e..ccfe276943f0 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -34,7 +34,6 @@
34 * SOFTWARE. 34 * SOFTWARE.
35 */ 35 */
36 36
37#include <linux/init.h>
38#include <linux/hardirq.h> 37#include <linux/hardirq.h>
39 38
40#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index b9ceddde46c0..bffb7995cb70 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/interrupt.h> 34#include <linux/interrupt.h>
36#include <linux/mm.h> 35#include <linux/mm.h>
37#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
@@ -42,6 +41,10 @@
42#include "fw.h" 41#include "fw.h"
43 42
44enum { 43enum {
44 MLX4_IRQNAME_SIZE = 64
45};
46
47enum {
45 MLX4_NUM_ASYNC_EQE = 0x100, 48 MLX4_NUM_ASYNC_EQE = 0x100,
46 MLX4_NUM_SPARE_EQE = 0x80, 49 MLX4_NUM_SPARE_EQE = 0x80,
47 MLX4_EQ_ENTRY_SIZE = 0x20 50 MLX4_EQ_ENTRY_SIZE = 0x20
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
526 iounmap(priv->clr_base); 529 iounmap(priv->clr_base);
527} 530}
528 531
529int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
530{
531 struct mlx4_priv *priv = mlx4_priv(dev);
532 int ret;
533
534 /*
535 * We assume that mapping one page is enough for the whole EQ
536 * context table. This is fine with all current HCAs, because
537 * we only use 32 EQs and each EQ uses 64 bytes of context
538 * memory, or 1 KB total.
539 */
540 priv->eq_table.icm_virt = icm_virt;
541 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
542 if (!priv->eq_table.icm_page)
543 return -ENOMEM;
544 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
545 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
546 if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
547 __free_page(priv->eq_table.icm_page);
548 return -ENOMEM;
549 }
550
551 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
552 if (ret) {
553 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
554 PCI_DMA_BIDIRECTIONAL);
555 __free_page(priv->eq_table.icm_page);
556 }
557
558 return ret;
559}
560
561void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
562{
563 struct mlx4_priv *priv = mlx4_priv(dev);
564
565 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
566 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
567 PCI_DMA_BIDIRECTIONAL);
568 __free_page(priv->eq_table.icm_page);
569}
570
571int mlx4_alloc_eq_table(struct mlx4_dev *dev) 532int mlx4_alloc_eq_table(struct mlx4_dev *dev)
572{ 533{
573 struct mlx4_priv *priv = mlx4_priv(dev); 534 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
615 priv->eq_table.clr_int = priv->clr_base + 576 priv->eq_table.clr_int = priv->clr_base +
616 (priv->eq_table.inta_pin < 32 ? 4 : 0); 577 (priv->eq_table.inta_pin < 32 ? 4 : 0);
617 578
618 priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); 579 priv->eq_table.irq_names =
580 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
581 GFP_KERNEL);
619 if (!priv->eq_table.irq_names) { 582 if (!priv->eq_table.irq_names) {
620 err = -ENOMEM; 583 err = -ENOMEM;
621 goto err_out_bitmap; 584 goto err_out_bitmap;
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
638 goto err_out_comp; 601 goto err_out_comp;
639 602
640 if (dev->flags & MLX4_FLAG_MSI_X) { 603 if (dev->flags & MLX4_FLAG_MSI_X) {
641 static const char async_eq_name[] = "mlx4-async";
642 const char *eq_name; 604 const char *eq_name;
643 605
644 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 606 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
645 if (i < dev->caps.num_comp_vectors) { 607 if (i < dev->caps.num_comp_vectors) {
646 snprintf(priv->eq_table.irq_names + i * 16, 16, 608 snprintf(priv->eq_table.irq_names +
647 "mlx4-comp-%d", i); 609 i * MLX4_IRQNAME_SIZE,
648 eq_name = priv->eq_table.irq_names + i * 16; 610 MLX4_IRQNAME_SIZE,
649 } else 611 "mlx4-comp-%d@pci:%s", i,
650 eq_name = async_eq_name; 612 pci_name(dev->pdev));
613 } else {
614 snprintf(priv->eq_table.irq_names +
615 i * MLX4_IRQNAME_SIZE,
616 MLX4_IRQNAME_SIZE,
617 "mlx4-async@pci:%s",
618 pci_name(dev->pdev));
619 }
651 620
621 eq_name = priv->eq_table.irq_names +
622 i * MLX4_IRQNAME_SIZE;
652 err = request_irq(priv->eq_table.eq[i].irq, 623 err = request_irq(priv->eq_table.eq[i].irq,
653 mlx4_msi_x_interrupt, 0, eq_name, 624 mlx4_msi_x_interrupt, 0, eq_name,
654 priv->eq_table.eq + i); 625 priv->eq_table.eq + i);
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
658 priv->eq_table.eq[i].have_irq = 1; 629 priv->eq_table.eq[i].have_irq = 1;
659 } 630 }
660 } else { 631 } else {
632 snprintf(priv->eq_table.irq_names,
633 MLX4_IRQNAME_SIZE,
634 DRV_NAME "@pci:%s",
635 pci_name(dev->pdev));
661 err = request_irq(dev->pdev->irq, mlx4_interrupt, 636 err = request_irq(dev->pdev->irq, mlx4_interrupt,
662 IRQF_SHARED, DRV_NAME, dev); 637 IRQF_SHARED, priv->eq_table.irq_names, dev);
663 if (err) 638 if (err)
664 goto err_out_async; 639 goto err_out_async;
665 640
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index baf4bf66062c..04b382fcb8c8 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36#include <linux/mm.h> 35#include <linux/mm.h>
37#include <linux/scatterlist.h> 36#include <linux/scatterlist.h>
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index dac621b1e9fc..3dd481e77f92 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
525 goto err_unmap_aux; 525 goto err_unmap_aux;
526 } 526 }
527 527
528 err = mlx4_map_eq_icm(dev, init_hca->eqc_base); 528 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
529 init_hca->eqc_base, dev_cap->eqc_entry_sz,
530 dev->caps.num_eqs, dev->caps.num_eqs,
531 0, 0);
529 if (err) { 532 if (err) {
530 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 533 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
531 goto err_unmap_cmpt; 534 goto err_unmap_cmpt;
@@ -668,7 +671,7 @@ err_unmap_mtt:
668 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 671 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
669 672
670err_unmap_eq: 673err_unmap_eq:
671 mlx4_unmap_eq_icm(dev); 674 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
672 675
673err_unmap_cmpt: 676err_unmap_cmpt:
674 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 677 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
698 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 701 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
699 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 702 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
700 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 703 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
704 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
701 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 705 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
702 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 706 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
703 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 707 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
704 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 708 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
705 mlx4_unmap_eq_icm(dev);
706 709
707 mlx4_UNMAP_ICM_AUX(dev); 710 mlx4_UNMAP_ICM_AUX(dev);
708 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 711 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
786 return 0; 789 return 0;
787 790
788err_close: 791err_close:
789 mlx4_close_hca(dev); 792 mlx4_CLOSE_HCA(dev, 0);
790 793
791err_free_icm: 794err_free_icm:
792 mlx4_free_icms(dev); 795 mlx4_free_icms(dev);
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1070 goto err_disable_pdev; 1073 goto err_disable_pdev;
1071 } 1074 }
1072 1075
1073 err = pci_request_region(pdev, 0, DRV_NAME); 1076 err = pci_request_regions(pdev, DRV_NAME);
1074 if (err) { 1077 if (err) {
1075 dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); 1078 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1076 goto err_disable_pdev; 1079 goto err_disable_pdev;
1077 } 1080 }
1078 1081
1079 err = pci_request_region(pdev, 2, DRV_NAME);
1080 if (err) {
1081 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
1082 goto err_release_bar0;
1083 }
1084
1085 pci_set_master(pdev); 1082 pci_set_master(pdev);
1086 1083
1087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1084 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1090 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1091 if (err) { 1088 if (err) {
1092 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1089 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1093 goto err_release_bar2; 1090 goto err_release_regions;
1094 } 1091 }
1095 } 1092 }
1096 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1093 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1101 if (err) { 1098 if (err) {
1102 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1099 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1103 "aborting.\n"); 1100 "aborting.\n");
1104 goto err_release_bar2; 1101 goto err_release_regions;
1105 } 1102 }
1106 } 1103 }
1107 1104
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1110 dev_err(&pdev->dev, "Device struct alloc failed, " 1107 dev_err(&pdev->dev, "Device struct alloc failed, "
1111 "aborting.\n"); 1108 "aborting.\n");
1112 err = -ENOMEM; 1109 err = -ENOMEM;
1113 goto err_release_bar2; 1110 goto err_release_regions;
1114 } 1111 }
1115 1112
1116 dev = &priv->dev; 1113 dev = &priv->dev;
@@ -1205,11 +1202,8 @@ err_cmd:
1205err_free_dev: 1202err_free_dev:
1206 kfree(priv); 1203 kfree(priv);
1207 1204
1208err_release_bar2: 1205err_release_regions:
1209 pci_release_region(pdev, 2); 1206 pci_release_regions(pdev);
1210
1211err_release_bar0:
1212 pci_release_region(pdev, 0);
1213 1207
1214err_disable_pdev: 1208err_disable_pdev:
1215 pci_disable_device(pdev); 1209 pci_disable_device(pdev);
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1265 pci_disable_msix(pdev); 1259 pci_disable_msix(pdev);
1266 1260
1267 kfree(priv); 1261 kfree(priv);
1268 pci_release_region(pdev, 2); 1262 pci_release_regions(pdev);
1269 pci_release_region(pdev, 0);
1270 pci_disable_device(pdev); 1263 pci_disable_device(pdev);
1271 pci_set_drvdata(pdev, NULL); 1264 pci_set_drvdata(pdev, NULL);
1272 } 1265 }
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 6053c357a470..5ccbce9866fe 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/string.h> 34#include <linux/string.h>
36#include <linux/slab.h> 35#include <linux/slab.h>
37 36
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5bd79c2b184f..bc72d6e4919b 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -205,9 +205,7 @@ struct mlx4_eq_table {
205 void __iomem **uar_map; 205 void __iomem **uar_map;
206 u32 clr_mask; 206 u32 clr_mask;
207 struct mlx4_eq *eq; 207 struct mlx4_eq *eq;
208 u64 icm_virt; 208 struct mlx4_icm_table table;
209 struct page *icm_page;
210 dma_addr_t icm_dma;
211 struct mlx4_icm_table cmpt_table; 209 struct mlx4_icm_table cmpt_table;
212 int have_irq; 210 int have_irq;
213 u8 inta_pin; 211 u8 inta_pin;
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
373 struct mlx4_dev_cap *dev_cap, 371 struct mlx4_dev_cap *dev_cap,
374 struct mlx4_init_hca_param *init_hca); 372 struct mlx4_init_hca_param *init_hca);
375 373
376int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
377void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
378
379int mlx4_cmd_init(struct mlx4_dev *dev); 374int mlx4_cmd_init(struct mlx4_dev *dev);
380void mlx4_cmd_cleanup(struct mlx4_dev *dev); 375void mlx4_cmd_cleanup(struct mlx4_dev *dev);
381void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 376void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index f96948be0a44..ca7ab8e7b4cc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -32,7 +32,6 @@
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35#include <linux/init.h>
36#include <linux/errno.h> 35#include <linux/errno.h>
37 36
38#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 26d1a7a9e375..c4988d6bd5b2 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36 35
37#include <asm/page.h> 36#include <asm/page.h>
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index bd22df95adf9..ca25b9dc8378 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -32,8 +32,6 @@
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35#include <linux/init.h>
36
37#include "mlx4.h" 35#include "mlx4.h"
38#include "fw.h" 36#include "fw.h"
39 37
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 1c565ef8d179..42ab9fc01d3e 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -33,8 +33,6 @@
33 * SOFTWARE. 33 * SOFTWARE.
34 */ 34 */
35 35
36#include <linux/init.h>
37
38#include <linux/mlx4/cmd.h> 36#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/qp.h> 37#include <linux/mlx4/qp.h>
40 38
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index 3951b884c0fb..e5741dab3825 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36#include <linux/pci.h> 35#include <linux/pci.h>
37#include <linux/delay.h> 36#include <linux/delay.h>
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index fe9f218691f5..1377d0dc8f1f 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -31,8 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35
36#include <linux/mlx4/cmd.h> 34#include <linux/mlx4/cmd.h>
37 35
38#include "mlx4.h" 36#include "mlx4.h"
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 42b6c6319bc2..87214a257d2a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -130,17 +130,10 @@ static inline struct tun_sock *tun_sk(struct sock *sk)
130static int tun_attach(struct tun_struct *tun, struct file *file) 130static int tun_attach(struct tun_struct *tun, struct file *file)
131{ 131{
132 struct tun_file *tfile = file->private_data; 132 struct tun_file *tfile = file->private_data;
133 const struct cred *cred = current_cred();
134 int err; 133 int err;
135 134
136 ASSERT_RTNL(); 135 ASSERT_RTNL();
137 136
138 /* Check permissions */
139 if (((tun->owner != -1 && cred->euid != tun->owner) ||
140 (tun->group != -1 && !in_egroup_p(tun->group))) &&
141 !capable(CAP_NET_ADMIN))
142 return -EPERM;
143
144 netif_tx_lock_bh(tun->dev); 137 netif_tx_lock_bh(tun->dev);
145 138
146 err = -EINVAL; 139 err = -EINVAL;
@@ -926,6 +919,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
926 919
927 dev = __dev_get_by_name(net, ifr->ifr_name); 920 dev = __dev_get_by_name(net, ifr->ifr_name);
928 if (dev) { 921 if (dev) {
922 const struct cred *cred = current_cred();
923
929 if (ifr->ifr_flags & IFF_TUN_EXCL) 924 if (ifr->ifr_flags & IFF_TUN_EXCL)
930 return -EBUSY; 925 return -EBUSY;
931 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 926 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -935,6 +930,14 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
935 else 930 else
936 return -EINVAL; 931 return -EINVAL;
937 932
933 if (((tun->owner != -1 && cred->euid != tun->owner) ||
934 (tun->group != -1 && !in_egroup_p(tun->group))) &&
935 !capable(CAP_NET_ADMIN))
936 return -EPERM;
937 err = security_tun_dev_attach(tun->sk);
938 if (err < 0)
939 return err;
940
938 err = tun_attach(tun, file); 941 err = tun_attach(tun, file);
939 if (err < 0) 942 if (err < 0)
940 return err; 943 return err;
@@ -947,6 +950,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
947 950
948 if (!capable(CAP_NET_ADMIN)) 951 if (!capable(CAP_NET_ADMIN))
949 return -EPERM; 952 return -EPERM;
953 err = security_tun_dev_create();
954 if (err < 0)
955 return err;
950 956
951 /* Set dev type */ 957 /* Set dev type */
952 if (ifr->ifr_flags & IFF_TUN) { 958 if (ifr->ifr_flags & IFF_TUN) {
@@ -989,6 +995,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
989 tun->sk = sk; 995 tun->sk = sk;
990 container_of(sk, struct tun_sock, sk)->tun = tun; 996 container_of(sk, struct tun_sock, sk)->tun = tun;
991 997
998 security_tun_dev_post_create(sk);
999
992 tun_net_init(dev); 1000 tun_net_init(dev);
993 1001
994 if (strchr(dev->name, '%')) { 1002 if (strchr(dev->name, '%')) {
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6dcac73b4d29..f593fbbb4e52 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2874 return 0; 2874 return 0;
2875} 2875}
2876 2876
2877static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, 2877static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2878 u32 src_phys, u32 dest_address, u32 length) 2878 int nr, u32 dest_address, u32 len)
2879{ 2879{
2880 u32 bytes_left = length; 2880 int ret, i;
2881 u32 src_offset = 0; 2881 u32 size;
2882 u32 dest_offset = 0; 2882
2883 int status = 0;
2884 IPW_DEBUG_FW(">> \n"); 2883 IPW_DEBUG_FW(">> \n");
2885 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", 2884 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2886 src_phys, dest_address, length); 2885 nr, dest_address, len);
2887 while (bytes_left > CB_MAX_LENGTH) { 2886
2888 status = ipw_fw_dma_add_command_block(priv, 2887 for (i = 0; i < nr; i++) {
2889 src_phys + src_offset, 2888 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2890 dest_address + 2889 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2891 dest_offset, 2890 dest_address +
2892 CB_MAX_LENGTH, 0, 0); 2891 i * CB_MAX_LENGTH, size,
2893 if (status) { 2892 0, 0);
2893 if (ret) {
2894 IPW_DEBUG_FW_INFO(": Failed\n"); 2894 IPW_DEBUG_FW_INFO(": Failed\n");
2895 return -1; 2895 return -1;
2896 } else 2896 } else
2897 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2897 IPW_DEBUG_FW_INFO(": Added new cb\n");
2898
2899 src_offset += CB_MAX_LENGTH;
2900 dest_offset += CB_MAX_LENGTH;
2901 bytes_left -= CB_MAX_LENGTH;
2902 }
2903
2904 /* add the buffer tail */
2905 if (bytes_left > 0) {
2906 status =
2907 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2908 dest_address + dest_offset,
2909 bytes_left, 0, 0);
2910 if (status) {
2911 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2912 return -1;
2913 } else
2914 IPW_DEBUG_FW_INFO
2915 (": Adding new cb - the buffer tail\n");
2916 } 2898 }
2917 2899
2918 IPW_DEBUG_FW("<< \n"); 2900 IPW_DEBUG_FW("<< \n");
@@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3160 3142
3161static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3143static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3162{ 3144{
3163 int rc = -1; 3145 int ret = -1;
3164 int offset = 0; 3146 int offset = 0;
3165 struct fw_chunk *chunk; 3147 struct fw_chunk *chunk;
3166 dma_addr_t shared_phys; 3148 int total_nr = 0;
3167 u8 *shared_virt; 3149 int i;
3150 struct pci_pool *pool;
3151 u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
3152 dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
3168 3153
3169 IPW_DEBUG_TRACE("<< : \n"); 3154 IPW_DEBUG_TRACE("<< : \n");
3170 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3171 3155
3172 if (!shared_virt) 3156 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3157 if (!pool) {
3158 IPW_ERROR("pci_pool_create failed\n");
3173 return -ENOMEM; 3159 return -ENOMEM;
3174 3160 }
3175 memmove(shared_virt, data, len);
3176 3161
3177 /* Start the Dma */ 3162 /* Start the Dma */
3178 rc = ipw_fw_dma_enable(priv); 3163 ret = ipw_fw_dma_enable(priv);
3179 3164
3180 /* the DMA is already ready this would be a bug. */ 3165 /* the DMA is already ready this would be a bug. */
3181 BUG_ON(priv->sram_desc.last_cb_index > 0); 3166 BUG_ON(priv->sram_desc.last_cb_index > 0);
3182 3167
3183 do { 3168 do {
3169 u32 chunk_len;
3170 u8 *start;
3171 int size;
3172 int nr = 0;
3173
3184 chunk = (struct fw_chunk *)(data + offset); 3174 chunk = (struct fw_chunk *)(data + offset);
3185 offset += sizeof(struct fw_chunk); 3175 offset += sizeof(struct fw_chunk);
3176 chunk_len = le32_to_cpu(chunk->length);
3177 start = data + offset;
3178
3179 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3180 for (i = 0; i < nr; i++) {
3181 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3182 &phys[total_nr]);
3183 if (!virts[total_nr]) {
3184 ret = -ENOMEM;
3185 goto out;
3186 }
3187 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3188 CB_MAX_LENGTH);
3189 memcpy(virts[total_nr], start, size);
3190 start += size;
3191 total_nr++;
3192 /* We don't support fw chunk larger than 64*8K */
3193 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3194 }
3195
3186 /* build DMA packet and queue up for sending */ 3196 /* build DMA packet and queue up for sending */
3187 /* dma to chunk->address, the chunk->length bytes from data + 3197 /* dma to chunk->address, the chunk->length bytes from data +
3188 * offeset*/ 3198 * offeset*/
3189 /* Dma loading */ 3199 /* Dma loading */
3190 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset, 3200 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3191 le32_to_cpu(chunk->address), 3201 nr, le32_to_cpu(chunk->address),
3192 le32_to_cpu(chunk->length)); 3202 chunk_len);
3193 if (rc) { 3203 if (ret) {
3194 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3204 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3195 goto out; 3205 goto out;
3196 } 3206 }
3197 3207
3198 offset += le32_to_cpu(chunk->length); 3208 offset += chunk_len;
3199 } while (offset < len); 3209 } while (offset < len);
3200 3210
3201 /* Run the DMA and wait for the answer */ 3211 /* Run the DMA and wait for the answer */
3202 rc = ipw_fw_dma_kick(priv); 3212 ret = ipw_fw_dma_kick(priv);
3203 if (rc) { 3213 if (ret) {
3204 IPW_ERROR("dmaKick Failed\n"); 3214 IPW_ERROR("dmaKick Failed\n");
3205 goto out; 3215 goto out;
3206 } 3216 }
3207 3217
3208 rc = ipw_fw_dma_wait(priv); 3218 ret = ipw_fw_dma_wait(priv);
3209 if (rc) { 3219 if (ret) {
3210 IPW_ERROR("dmaWaitSync Failed\n"); 3220 IPW_ERROR("dmaWaitSync Failed\n");
3211 goto out; 3221 goto out;
3212 } 3222 }
3213 out: 3223 out:
3214 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys); 3224 for (i = 0; i < total_nr; i++)
3215 return rc; 3225 pci_pool_free(pool, virts[i], phys[i]);
3226
3227 pci_pool_destroy(pool);
3228
3229 return ret;
3216} 3230}
3217 3231
3218/* stop nic */ 3232/* stop nic */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 242257b19441..a7aae24f2889 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -21,7 +21,6 @@
21 21
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/oprofile.h> 23#include <linux/oprofile.h>
24#include <linux/vmalloc.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26 25
27#include "event_buffer.h" 26#include "event_buffer.h"
@@ -407,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val)
407 return op_cpu_buffer_add_data(entry, val); 406 return op_cpu_buffer_add_data(entry, val);
408} 407}
409 408
409int oprofile_add_data64(struct op_entry *entry, u64 val)
410{
411 if (!entry->event)
412 return 0;
413 if (op_cpu_buffer_get_size(entry) < 2)
414 /*
415 * the function returns 0 to indicate a too small
416 * buffer, even if there is some space left
417 */
418 return 0;
419 if (!op_cpu_buffer_add_data(entry, (u32)val))
420 return 0;
421 return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
422}
423
410int oprofile_write_commit(struct op_entry *entry) 424int oprofile_write_commit(struct op_entry *entry)
411{ 425{
412 if (!entry->event) 426 if (!entry->event)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 3cffce90f82a..dc8a0428260d 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -12,6 +12,8 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/workqueue.h>
16#include <linux/time.h>
15#include <asm/mutex.h> 17#include <asm/mutex.h>
16 18
17#include "oprof.h" 19#include "oprof.h"
@@ -87,6 +89,69 @@ out:
87 return err; 89 return err;
88} 90}
89 91
92#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
93
94static void switch_worker(struct work_struct *work);
95static DECLARE_DELAYED_WORK(switch_work, switch_worker);
96
97static void start_switch_worker(void)
98{
99 if (oprofile_ops.switch_events)
100 schedule_delayed_work(&switch_work, oprofile_time_slice);
101}
102
103static void stop_switch_worker(void)
104{
105 cancel_delayed_work_sync(&switch_work);
106}
107
108static void switch_worker(struct work_struct *work)
109{
110 if (oprofile_ops.switch_events())
111 return;
112
113 atomic_inc(&oprofile_stats.multiplex_counter);
114 start_switch_worker();
115}
116
117/* User inputs in ms, converts to jiffies */
118int oprofile_set_timeout(unsigned long val_msec)
119{
120 int err = 0;
121 unsigned long time_slice;
122
123 mutex_lock(&start_mutex);
124
125 if (oprofile_started) {
126 err = -EBUSY;
127 goto out;
128 }
129
130 if (!oprofile_ops.switch_events) {
131 err = -EINVAL;
132 goto out;
133 }
134
135 time_slice = msecs_to_jiffies(val_msec);
136 if (time_slice == MAX_JIFFY_OFFSET) {
137 err = -EINVAL;
138 goto out;
139 }
140
141 oprofile_time_slice = time_slice;
142
143out:
144 mutex_unlock(&start_mutex);
145 return err;
146
147}
148
149#else
150
151static inline void start_switch_worker(void) { }
152static inline void stop_switch_worker(void) { }
153
154#endif
90 155
91/* Actually start profiling (echo 1>/dev/oprofile/enable) */ 156/* Actually start profiling (echo 1>/dev/oprofile/enable) */
92int oprofile_start(void) 157int oprofile_start(void)
@@ -108,6 +173,8 @@ int oprofile_start(void)
108 if ((err = oprofile_ops.start())) 173 if ((err = oprofile_ops.start()))
109 goto out; 174 goto out;
110 175
176 start_switch_worker();
177
111 oprofile_started = 1; 178 oprofile_started = 1;
112out: 179out:
113 mutex_unlock(&start_mutex); 180 mutex_unlock(&start_mutex);
@@ -123,6 +190,9 @@ void oprofile_stop(void)
123 goto out; 190 goto out;
124 oprofile_ops.stop(); 191 oprofile_ops.stop();
125 oprofile_started = 0; 192 oprofile_started = 0;
193
194 stop_switch_worker();
195
126 /* wake up the daemon to read what remains */ 196 /* wake up the daemon to read what remains */
127 wake_up_buffer_waiter(); 197 wake_up_buffer_waiter();
128out: 198out:
@@ -155,7 +225,6 @@ post_sync:
155 mutex_unlock(&start_mutex); 225 mutex_unlock(&start_mutex);
156} 226}
157 227
158
159int oprofile_set_backtrace(unsigned long val) 228int oprofile_set_backtrace(unsigned long val)
160{ 229{
161 int err = 0; 230 int err = 0;
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index c288d3c24b50..cb92f5c98c1a 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -24,6 +24,8 @@ struct oprofile_operations;
24extern unsigned long oprofile_buffer_size; 24extern unsigned long oprofile_buffer_size;
25extern unsigned long oprofile_cpu_buffer_size; 25extern unsigned long oprofile_cpu_buffer_size;
26extern unsigned long oprofile_buffer_watershed; 26extern unsigned long oprofile_buffer_watershed;
27extern unsigned long oprofile_time_slice;
28
27extern struct oprofile_operations oprofile_ops; 29extern struct oprofile_operations oprofile_ops;
28extern unsigned long oprofile_started; 30extern unsigned long oprofile_started;
29extern unsigned long oprofile_backtrace_depth; 31extern unsigned long oprofile_backtrace_depth;
@@ -35,5 +37,6 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
35void oprofile_timer_init(struct oprofile_operations *ops); 37void oprofile_timer_init(struct oprofile_operations *ops);
36 38
37int oprofile_set_backtrace(unsigned long depth); 39int oprofile_set_backtrace(unsigned long depth);
40int oprofile_set_timeout(unsigned long time);
38 41
39#endif /* OPROF_H */ 42#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 5d36ffc30dd5..bbd7516e0869 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/jiffies.h>
12 13
13#include "event_buffer.h" 14#include "event_buffer.h"
14#include "oprofile_stats.h" 15#include "oprofile_stats.h"
@@ -17,10 +18,51 @@
17#define BUFFER_SIZE_DEFAULT 131072 18#define BUFFER_SIZE_DEFAULT 131072
18#define CPU_BUFFER_SIZE_DEFAULT 8192 19#define CPU_BUFFER_SIZE_DEFAULT 8192
19#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ 20#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
21#define TIME_SLICE_DEFAULT 1
20 22
21unsigned long oprofile_buffer_size; 23unsigned long oprofile_buffer_size;
22unsigned long oprofile_cpu_buffer_size; 24unsigned long oprofile_cpu_buffer_size;
23unsigned long oprofile_buffer_watershed; 25unsigned long oprofile_buffer_watershed;
26unsigned long oprofile_time_slice;
27
28#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
29
30static ssize_t timeout_read(struct file *file, char __user *buf,
31 size_t count, loff_t *offset)
32{
33 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
34 buf, count, offset);
35}
36
37
38static ssize_t timeout_write(struct file *file, char const __user *buf,
39 size_t count, loff_t *offset)
40{
41 unsigned long val;
42 int retval;
43
44 if (*offset)
45 return -EINVAL;
46
47 retval = oprofilefs_ulong_from_user(&val, buf, count);
48 if (retval)
49 return retval;
50
51 retval = oprofile_set_timeout(val);
52
53 if (retval)
54 return retval;
55 return count;
56}
57
58
59static const struct file_operations timeout_fops = {
60 .read = timeout_read,
61 .write = timeout_write,
62};
63
64#endif
65
24 66
25static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 67static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
26{ 68{
@@ -129,6 +171,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
129 oprofile_buffer_size = BUFFER_SIZE_DEFAULT; 171 oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
130 oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; 172 oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
131 oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; 173 oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
174 oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
132 175
133 oprofilefs_create_file(sb, root, "enable", &enable_fops); 176 oprofilefs_create_file(sb, root, "enable", &enable_fops);
134 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 177 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -139,6 +182,9 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
139 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 182 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
140 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 183 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
141 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 184 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
185#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
186 oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
187#endif
142 oprofile_create_stats_files(sb, root); 188 oprofile_create_stats_files(sb, root);
143 if (oprofile_ops.create_files) 189 if (oprofile_ops.create_files)
144 oprofile_ops.create_files(sb, root); 190 oprofile_ops.create_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 3c2270a8300c..61689e814d46 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -34,6 +34,7 @@ void oprofile_reset_stats(void)
34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); 34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35 atomic_set(&oprofile_stats.event_lost_overflow, 0); 35 atomic_set(&oprofile_stats.event_lost_overflow, 0);
36 atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); 36 atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
37 atomic_set(&oprofile_stats.multiplex_counter, 0);
37} 38}
38 39
39 40
@@ -76,4 +77,8 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
76 &oprofile_stats.event_lost_overflow); 77 &oprofile_stats.event_lost_overflow);
77 oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", 78 oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
78 &oprofile_stats.bt_lost_no_mapping); 79 &oprofile_stats.bt_lost_no_mapping);
80#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
81 oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
82 &oprofile_stats.multiplex_counter);
83#endif
79} 84}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 3da0d08dc1f9..0b54e46c3c14 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -17,6 +17,7 @@ struct oprofile_stat_struct {
17 atomic_t sample_lost_no_mapping; 17 atomic_t sample_lost_no_mapping;
18 atomic_t bt_lost_no_mapping; 18 atomic_t bt_lost_no_mapping;
19 atomic_t event_lost_overflow; 19 atomic_t event_lost_overflow;
20 atomic_t multiplex_counter;
20}; 21};
21 22
22extern struct oprofile_stat_struct oprofile_stats; 23extern struct oprofile_stat_struct oprofile_stats;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 4f5b8712931f..44803644ca05 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -55,15 +55,12 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
55 return desc->irq_2_iommu; 55 return desc->irq_2_iommu;
56} 56}
57 57
58static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) 58static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
59{ 59{
60 struct irq_desc *desc; 60 struct irq_desc *desc;
61 struct irq_2_iommu *irq_iommu; 61 struct irq_2_iommu *irq_iommu;
62 62
63 /* 63 desc = irq_to_desc(irq);
64 * alloc irq desc if not allocated already.
65 */
66 desc = irq_to_desc_alloc_node(irq, node);
67 if (!desc) { 64 if (!desc) {
68 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 65 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
69 return NULL; 66 return NULL;
@@ -72,16 +69,11 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
72 irq_iommu = desc->irq_2_iommu; 69 irq_iommu = desc->irq_2_iommu;
73 70
74 if (!irq_iommu) 71 if (!irq_iommu)
75 desc->irq_2_iommu = get_one_free_irq_2_iommu(node); 72 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
76 73
77 return desc->irq_2_iommu; 74 return desc->irq_2_iommu;
78} 75}
79 76
80static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
81{
82 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
83}
84
85#else /* !CONFIG_SPARSE_IRQ */ 77#else /* !CONFIG_SPARSE_IRQ */
86 78
87static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 79static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e3a87210e947..e03fe98f0619 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
598} 598}
599 599
600/** 600/**
601 * pci_sriov_resource_alignment - get resource alignment for VF BAR
602 * @dev: the PCI device
603 * @resno: the resource number
604 *
605 * Returns the alignment of the VF BAR found in the SR-IOV capability.
606 * This is not the same as the resource size which is defined as
607 * the VF BAR size multiplied by the number of VFs. The alignment
608 * is just the VF BAR size.
609 */
610int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
611{
612 struct resource tmp;
613 enum pci_bar_type type;
614 int reg = pci_iov_resource_bar(dev, resno, &type);
615
616 if (!reg)
617 return 0;
618
619 __pci_read_base(dev, type, &tmp, reg);
620 return resource_alignment(&tmp);
621}
622
623/**
601 * pci_restore_iov_state - restore the state of the IOV capability 624 * pci_restore_iov_state - restore the state of the IOV capability
602 * @dev: the PCI device 625 * @dev: the PCI device
603 */ 626 */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f73bcbedf37c..5ff4d25bf0e9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev);
243extern void pci_iov_release(struct pci_dev *dev); 243extern void pci_iov_release(struct pci_dev *dev);
244extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 244extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
245 enum pci_bar_type *type); 245 enum pci_bar_type *type);
246extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
246extern void pci_restore_iov_state(struct pci_dev *dev); 247extern void pci_restore_iov_state(struct pci_dev *dev);
247extern int pci_iov_bus_range(struct pci_bus *bus); 248extern int pci_iov_bus_range(struct pci_bus *bus);
248 249
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
298} 299}
299#endif /* CONFIG_PCI_IOV */ 300#endif /* CONFIG_PCI_IOV */
300 301
302static inline int pci_resource_alignment(struct pci_dev *dev,
303 struct resource *res)
304{
305#ifdef CONFIG_PCI_IOV
306 int resno = res - dev->resource;
307
308 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
309 return pci_sriov_resource_alignment(dev, resno);
310#endif
311 return resource_alignment(res);
312}
313
301#endif /* DRIVERS_PCI_H */ 314#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index b636e245445d..7c443b4583ab 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,7 +25,7 @@
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28#include "pci.h"
29 29
30static void pbus_assign_resources_sorted(const struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
384 continue; 384 continue;
385 r_size = resource_size(r); 385 r_size = resource_size(r);
386 /* For bridges size != alignment */ 386 /* For bridges size != alignment */
387 align = resource_alignment(r); 387 align = pci_resource_alignment(dev, r);
388 order = __ffs(align) - 20; 388 order = __ffs(align) - 20;
389 if (order > 11) { 389 if (order > 11) {
390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: " 390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1898c7b47907..88cdd1a937d6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
144 144
145 size = resource_size(res); 145 size = resource_size(res);
146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
147 align = resource_alignment(res); 147 align = pci_resource_alignment(dev, res);
148 148
149 /* First, try exact prefetching match.. */ 149 /* First, try exact prefetching match.. */
150 ret = pci_bus_alloc_resource(bus, res, size, align, min, 150 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
178 struct pci_bus *bus; 178 struct pci_bus *bus;
179 int ret; 179 int ret;
180 180
181 align = resource_alignment(res); 181 align = pci_resource_alignment(dev, res);
182 if (!align) { 182 if (!align) {
183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " 183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
184 "alignment) %pR flags %#lx\n", 184 "alignment) %pR flags %#lx\n",
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
259 if (!(r->flags) || r->parent) 259 if (!(r->flags) || r->parent)
260 continue; 260 continue;
261 261
262 r_align = resource_alignment(r); 262 r_align = pci_resource_alignment(dev, r);
263 if (!r_align) { 263 if (!r_align) {
264 dev_warn(&dev->dev, "BAR %d: bogus alignment " 264 dev_warn(&dev->dev, "BAR %d: bogus alignment "
265 "%pR flags %#lx\n", 265 "%pR flags %#lx\n",
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
271 struct resource_list *ln = list->next; 271 struct resource_list *ln = list->next;
272 272
273 if (ln) 273 if (ln)
274 align = resource_alignment(ln->res); 274 align = pci_resource_alignment(ln->dev, ln->res);
275 275
276 if (r_align > align) { 276 if (r_align > align) {
277 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); 277 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 3f62dd50bbbe..e109da4583a8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -669,14 +669,14 @@ static void dasd_profile_end(struct dasd_block *block,
669 * memory and 2) dasd_smalloc_request uses the static ccw memory 669 * memory and 2) dasd_smalloc_request uses the static ccw memory
670 * that gets allocated for each device. 670 * that gets allocated for each device.
671 */ 671 */
672struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 672struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
673 int datasize, 673 int datasize,
674 struct dasd_device *device) 674 struct dasd_device *device)
675{ 675{
676 struct dasd_ccw_req *cqr; 676 struct dasd_ccw_req *cqr;
677 677
678 /* Sanity checks */ 678 /* Sanity checks */
679 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 679 BUG_ON(datasize > PAGE_SIZE ||
680 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 680 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
681 681
682 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 682 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
@@ -700,14 +700,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
700 return ERR_PTR(-ENOMEM); 700 return ERR_PTR(-ENOMEM);
701 } 701 }
702 } 702 }
703 strncpy((char *) &cqr->magic, magic, 4); 703 cqr->magic = magic;
704 ASCEBC((char *) &cqr->magic, 4);
705 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 704 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
706 dasd_get_device(device); 705 dasd_get_device(device);
707 return cqr; 706 return cqr;
708} 707}
709 708
710struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 709struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
711 int datasize, 710 int datasize,
712 struct dasd_device *device) 711 struct dasd_device *device)
713{ 712{
@@ -717,7 +716,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
717 int size; 716 int size;
718 717
719 /* Sanity checks */ 718 /* Sanity checks */
720 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 719 BUG_ON(datasize > PAGE_SIZE ||
721 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 720 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
722 721
723 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 722 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
@@ -744,8 +743,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
744 cqr->data = data; 743 cqr->data = data;
745 memset(cqr->data, 0, datasize); 744 memset(cqr->data, 0, datasize);
746 } 745 }
747 strncpy((char *) &cqr->magic, magic, 4); 746 cqr->magic = magic;
748 ASCEBC((char *) &cqr->magic, 4);
749 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 747 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
750 dasd_get_device(device); 748 dasd_get_device(device);
751 return cqr; 749 return cqr;
@@ -899,9 +897,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
899 switch (rc) { 897 switch (rc) {
900 case 0: 898 case 0:
901 cqr->status = DASD_CQR_IN_IO; 899 cqr->status = DASD_CQR_IN_IO;
902 DBF_DEV_EVENT(DBF_DEBUG, device,
903 "start_IO: request %p started successful",
904 cqr);
905 break; 900 break;
906 case -EBUSY: 901 case -EBUSY:
907 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 902 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
@@ -1699,8 +1694,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1699 * for that. State DASD_STATE_ONLINE is normal block device 1694 * for that. State DASD_STATE_ONLINE is normal block device
1700 * operation. 1695 * operation.
1701 */ 1696 */
1702 if (basedev->state < DASD_STATE_READY) 1697 if (basedev->state < DASD_STATE_READY) {
1698 while ((req = blk_fetch_request(block->request_queue)))
1699 __blk_end_request_all(req, -EIO);
1703 return; 1700 return;
1701 }
1704 /* Now we try to fetch requests from the request queue */ 1702 /* Now we try to fetch requests from the request queue */
1705 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1703 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1706 if (basedev->features & DASD_FEATURE_READONLY && 1704 if (basedev->features & DASD_FEATURE_READONLY &&
@@ -2530,7 +2528,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2530static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2528static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2531 void *rdc_buffer, 2529 void *rdc_buffer,
2532 int rdc_buffer_size, 2530 int rdc_buffer_size,
2533 char *magic) 2531 int magic)
2534{ 2532{
2535 struct dasd_ccw_req *cqr; 2533 struct dasd_ccw_req *cqr;
2536 struct ccw1 *ccw; 2534 struct ccw1 *ccw;
@@ -2561,7 +2559,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2561} 2559}
2562 2560
2563 2561
2564int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2562int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2565 void *rdc_buffer, int rdc_buffer_size) 2563 void *rdc_buffer, int rdc_buffer_size)
2566{ 2564{
2567 int ret; 2565 int ret;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 27991b692056..e8ff7b0c961d 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,7 +7,7 @@
7 * 7 *
8 */ 8 */
9 9
10#define KMSG_COMPONENT "dasd" 10#define KMSG_COMPONENT "dasd-eckd"
11 11
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 5b7bbc87593b..70a008c00522 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -5,7 +5,7 @@
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */ 6 */
7 7
8#define KMSG_COMPONENT "dasd" 8#define KMSG_COMPONENT "dasd-eckd"
9 9
10#include <linux/list.h> 10#include <linux/list.h>
11#include <asm/ebcdic.h> 11#include <asm/ebcdic.h>
@@ -379,8 +379,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
379 int rc; 379 int rc;
380 unsigned long flags; 380 unsigned long flags;
381 381
382 cqr = dasd_kmalloc_request("ECKD", 382 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
383 1 /* PSF */ + 1 /* RSSD */ ,
384 (sizeof(struct dasd_psf_prssd_data)), 383 (sizeof(struct dasd_psf_prssd_data)),
385 device); 384 device);
386 if (IS_ERR(cqr)) 385 if (IS_ERR(cqr))
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 644086ba2ede..4e49b4a6c880 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd-diag"
12 12
13#include <linux/stddef.h> 13#include <linux/stddef.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -523,8 +523,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
523 /* Build the request */ 523 /* Build the request */
524 datasize = sizeof(struct dasd_diag_req) + 524 datasize = sizeof(struct dasd_diag_req) +
525 count*sizeof(struct dasd_diag_bio); 525 count*sizeof(struct dasd_diag_bio);
526 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, 526 cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
527 datasize, memdev);
528 if (IS_ERR(cqr)) 527 if (IS_ERR(cqr))
529 return cqr; 528 return cqr;
530 529
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c11770f5b368..a1ce573648a2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,7 +10,7 @@
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11 */ 11 */
12 12
13#define KMSG_COMPONENT "dasd" 13#define KMSG_COMPONENT "dasd-eckd"
14 14
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -730,7 +730,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
730 struct dasd_ccw_req *cqr; 730 struct dasd_ccw_req *cqr;
731 struct ccw1 *ccw; 731 struct ccw1 *ccw;
732 732
733 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); 733 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
734 device);
734 735
735 if (IS_ERR(cqr)) { 736 if (IS_ERR(cqr)) {
736 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 737 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -934,8 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
934 struct dasd_eckd_private *private; 935 struct dasd_eckd_private *private;
935 936
936 private = (struct dasd_eckd_private *) device->private; 937 private = (struct dasd_eckd_private *) device->private;
937 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 938 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
938 1 /* PSF */ + 1 /* RSSD */ ,
939 (sizeof(struct dasd_psf_prssd_data) + 939 (sizeof(struct dasd_psf_prssd_data) +
940 sizeof(struct dasd_rssd_features)), 940 sizeof(struct dasd_rssd_features)),
941 device); 941 device);
@@ -998,7 +998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
998 struct dasd_psf_ssc_data *psf_ssc_data; 998 struct dasd_psf_ssc_data *psf_ssc_data;
999 struct ccw1 *ccw; 999 struct ccw1 *ccw;
1000 1000
1001 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 1001 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1002 sizeof(struct dasd_psf_ssc_data), 1002 sizeof(struct dasd_psf_ssc_data),
1003 device); 1003 device);
1004 1004
@@ -1149,8 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1149 goto out_err3; 1149 goto out_err3;
1150 1150
1151 /* Read Device Characteristics */ 1151 /* Read Device Characteristics */
1152 rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data, 1152 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1153 64); 1153 &private->rdc_data, 64);
1154 if (rc) { 1154 if (rc) {
1155 DBF_EVENT(DBF_WARNING, 1155 DBF_EVENT(DBF_WARNING,
1156 "Read device characteristics failed, rc=%d for " 1156 "Read device characteristics failed, rc=%d for "
@@ -1217,8 +1217,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1217 1217
1218 cplength = 8; 1218 cplength = 8;
1219 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1219 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1220 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1220 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1221 cplength, datasize, device);
1222 if (IS_ERR(cqr)) 1221 if (IS_ERR(cqr))
1223 return cqr; 1222 return cqr;
1224 ccw = cqr->cpaddr; 1223 ccw = cqr->cpaddr;
@@ -1499,8 +1498,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1499 return ERR_PTR(-EINVAL); 1498 return ERR_PTR(-EINVAL);
1500 } 1499 }
1501 /* Allocate the format ccw request. */ 1500 /* Allocate the format ccw request. */
1502 fcp = dasd_smalloc_request(dasd_eckd_discipline.name, 1501 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1503 cplength, datasize, device);
1504 if (IS_ERR(fcp)) 1502 if (IS_ERR(fcp))
1505 return fcp; 1503 return fcp;
1506 1504
@@ -1783,8 +1781,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1783 datasize += count*sizeof(struct LO_eckd_data); 1781 datasize += count*sizeof(struct LO_eckd_data);
1784 } 1782 }
1785 /* Allocate the ccw request. */ 1783 /* Allocate the ccw request. */
1786 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1784 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1787 cplength, datasize, startdev); 1785 startdev);
1788 if (IS_ERR(cqr)) 1786 if (IS_ERR(cqr))
1789 return cqr; 1787 return cqr;
1790 ccw = cqr->cpaddr; 1788 ccw = cqr->cpaddr;
@@ -1948,8 +1946,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1948 cidaw * sizeof(unsigned long long); 1946 cidaw * sizeof(unsigned long long);
1949 1947
1950 /* Allocate the ccw request. */ 1948 /* Allocate the ccw request. */
1951 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1949 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1952 cplength, datasize, startdev); 1950 startdev);
1953 if (IS_ERR(cqr)) 1951 if (IS_ERR(cqr))
1954 return cqr; 1952 return cqr;
1955 ccw = cqr->cpaddr; 1953 ccw = cqr->cpaddr;
@@ -2249,8 +2247,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2249 2247
2250 /* Allocate the ccw request. */ 2248 /* Allocate the ccw request. */
2251 itcw_size = itcw_calc_size(0, ctidaw, 0); 2249 itcw_size = itcw_calc_size(0, ctidaw, 0);
2252 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2250 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2253 0, itcw_size, startdev);
2254 if (IS_ERR(cqr)) 2251 if (IS_ERR(cqr))
2255 return cqr; 2252 return cqr;
2256 2253
@@ -2557,8 +2554,7 @@ dasd_eckd_release(struct dasd_device *device)
2557 if (!capable(CAP_SYS_ADMIN)) 2554 if (!capable(CAP_SYS_ADMIN))
2558 return -EACCES; 2555 return -EACCES;
2559 2556
2560 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2557 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2561 1, 32, device);
2562 if (IS_ERR(cqr)) { 2558 if (IS_ERR(cqr)) {
2563 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2559 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2564 "Could not allocate initialization request"); 2560 "Could not allocate initialization request");
@@ -2600,8 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
2600 if (!capable(CAP_SYS_ADMIN)) 2596 if (!capable(CAP_SYS_ADMIN))
2601 return -EACCES; 2597 return -EACCES;
2602 2598
2603 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2599 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2604 1, 32, device);
2605 if (IS_ERR(cqr)) { 2600 if (IS_ERR(cqr)) {
2606 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2601 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2607 "Could not allocate initialization request"); 2602 "Could not allocate initialization request");
@@ -2642,8 +2637,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
2642 if (!capable(CAP_SYS_ADMIN)) 2637 if (!capable(CAP_SYS_ADMIN))
2643 return -EACCES; 2638 return -EACCES;
2644 2639
2645 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2640 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2646 1, 32, device);
2647 if (IS_ERR(cqr)) { 2641 if (IS_ERR(cqr)) {
2648 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2642 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2649 "Could not allocate initialization request"); 2643 "Could not allocate initialization request");
@@ -2681,8 +2675,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2681 struct ccw1 *ccw; 2675 struct ccw1 *ccw;
2682 int rc; 2676 int rc;
2683 2677
2684 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2678 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
2685 1 /* PSF */ + 1 /* RSSD */ ,
2686 (sizeof(struct dasd_psf_prssd_data) + 2679 (sizeof(struct dasd_psf_prssd_data) +
2687 sizeof(struct dasd_rssd_perf_stats_t)), 2680 sizeof(struct dasd_rssd_perf_stats_t)),
2688 device); 2681 device);
@@ -2828,7 +2821,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2828 } 2821 }
2829 2822
2830 /* setup CCWs for PSF + RSSD */ 2823 /* setup CCWs for PSF + RSSD */
2831 cqr = dasd_smalloc_request("ECKD", 2 , 0, device); 2824 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2832 if (IS_ERR(cqr)) { 2825 if (IS_ERR(cqr)) {
2833 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2826 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2834 "Could not allocate initialization request"); 2827 "Could not allocate initialization request");
@@ -3254,7 +3247,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3254 3247
3255 /* Read Device Characteristics */ 3248 /* Read Device Characteristics */
3256 memset(&private->rdc_data, 0, sizeof(private->rdc_data)); 3249 memset(&private->rdc_data, 0, sizeof(private->rdc_data));
3257 rc = dasd_generic_read_dev_chars(device, "ECKD", 3250 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3258 &private->rdc_data, 64); 3251 &private->rdc_data, 64);
3259 if (rc) { 3252 if (rc) {
3260 DBF_EVENT(DBF_WARNING, 3253 DBF_EVENT(DBF_WARNING,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c24c8c30380d..d96039eae59b 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -6,7 +6,7 @@
6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */ 7 */
8 8
9#define KMSG_COMPONENT "dasd" 9#define KMSG_COMPONENT "dasd-eckd"
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
@@ -464,7 +464,7 @@ int dasd_eer_enable(struct dasd_device *device)
464 if (!device->discipline || strcmp(device->discipline->name, "ECKD")) 464 if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
465 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ 465 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
466 466
467 cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, 467 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
468 SNSS_DATA_SIZE, device); 468 SNSS_DATA_SIZE, device);
469 if (IS_ERR(cqr)) 469 if (IS_ERR(cqr))
470 return -ENOMEM; 470 return -ENOMEM;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index cb8f9cef7429..7656384a811d 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -99,8 +99,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
99 cqr->lpm = LPM_ANYPATH; 99 cqr->lpm = LPM_ANYPATH;
100 cqr->status = DASD_CQR_FILLED; 100 cqr->status = DASD_CQR_FILLED;
101 } else { 101 } else {
102 dev_err(&device->cdev->dev, 102 pr_err("%s: default ERP has run out of retries and failed\n",
103 "default ERP has run out of retries and failed\n"); 103 dev_name(&device->cdev->dev));
104 cqr->status = DASD_CQR_FAILED; 104 cqr->status = DASD_CQR_FAILED;
105 cqr->stopclk = get_clock(); 105 cqr->stopclk = get_clock();
106 } 106 }
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 31849ad5e59f..f245377e8e27 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -5,7 +5,7 @@
5 * Copyright IBM Corp. 1999, 2009 5 * Copyright IBM Corp. 1999, 2009
6 */ 6 */
7 7
8#define KMSG_COMPONENT "dasd" 8#define KMSG_COMPONENT "dasd-fba"
9 9
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
@@ -152,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
152 block->base = device; 152 block->base = device;
153 153
154 /* Read Device Characteristics */ 154 /* Read Device Characteristics */
155 rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data, 155 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
156 32); 156 &private->rdc_data, 32);
157 if (rc) { 157 if (rc) {
158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
159 "error %d for device: %s", 159 "error %d for device: %s",
@@ -305,8 +305,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
305 datasize += (count - 1)*sizeof(struct LO_fba_data); 305 datasize += (count - 1)*sizeof(struct LO_fba_data);
306 } 306 }
307 /* Allocate the ccw request. */ 307 /* Allocate the ccw request. */
308 cqr = dasd_smalloc_request(dasd_fba_discipline.name, 308 cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
309 cplength, datasize, memdev);
310 if (IS_ERR(cqr)) 309 if (IS_ERR(cqr))
311 return cqr; 310 return cqr;
312 ccw = cqr->cpaddr; 311 ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index b699ca356ac5..5e47a1ee52b9 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -59,6 +59,11 @@
59#include <asm/dasd.h> 59#include <asm/dasd.h>
60#include <asm/idals.h> 60#include <asm/idals.h>
61 61
62/* DASD discipline magic */
63#define DASD_ECKD_MAGIC 0xC5C3D2C4
64#define DASD_DIAG_MAGIC 0xC4C9C1C7
65#define DASD_FBA_MAGIC 0xC6C2C140
66
62/* 67/*
63 * SECTION: Type definitions 68 * SECTION: Type definitions
64 */ 69 */
@@ -540,9 +545,9 @@ extern struct block_device_operations dasd_device_operations;
540extern struct kmem_cache *dasd_page_cache; 545extern struct kmem_cache *dasd_page_cache;
541 546
542struct dasd_ccw_req * 547struct dasd_ccw_req *
543dasd_kmalloc_request(char *, int, int, struct dasd_device *); 548dasd_kmalloc_request(int , int, int, struct dasd_device *);
544struct dasd_ccw_req * 549struct dasd_ccw_req *
545dasd_smalloc_request(char *, int, int, struct dasd_device *); 550dasd_smalloc_request(int , int, int, struct dasd_device *);
546void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); 551void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
547void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); 552void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
548 553
@@ -587,7 +592,7 @@ void dasd_generic_handle_state_change(struct dasd_device *);
587int dasd_generic_pm_freeze(struct ccw_device *); 592int dasd_generic_pm_freeze(struct ccw_device *);
588int dasd_generic_restore_device(struct ccw_device *); 593int dasd_generic_restore_device(struct ccw_device *);
589 594
590int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); 595int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
591char *dasd_get_sense(struct irb *); 596char *dasd_get_sense(struct irb *);
592 597
593/* externals in dasd_devmap.c */ 598/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index df918ef27965..f756a1b0c57a 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -98,8 +98,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
98 if (!capable (CAP_SYS_ADMIN)) 98 if (!capable (CAP_SYS_ADMIN))
99 return -EACCES; 99 return -EACCES;
100 100
101 dev_info(&base->cdev->dev, "The DASD has been put in the quiesce " 101 pr_info("%s: The DASD has been put in the quiesce "
102 "state\n"); 102 "state\n", dev_name(&base->cdev->dev));
103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
104 base->stopped |= DASD_STOPPED_QUIESCE; 104 base->stopped |= DASD_STOPPED_QUIESCE;
105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -119,8 +119,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
119 if (!capable (CAP_SYS_ADMIN)) 119 if (!capable (CAP_SYS_ADMIN))
120 return -EACCES; 120 return -EACCES;
121 121
122 dev_info(&base->cdev->dev, "I/O operations have been resumed " 122 pr_info("%s: I/O operations have been resumed "
123 "on the DASD\n"); 123 "on the DASD\n", dev_name(&base->cdev->dev));
124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
125 base->stopped &= ~DASD_STOPPED_QUIESCE; 125 base->stopped &= ~DASD_STOPPED_QUIESCE;
126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -146,8 +146,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
146 return -EPERM; 146 return -EPERM;
147 147
148 if (base->state != DASD_STATE_BASIC) { 148 if (base->state != DASD_STATE_BASIC) {
149 dev_warn(&base->cdev->dev, 149 pr_warning("%s: The DASD cannot be formatted while it is "
150 "The DASD cannot be formatted while it is enabled\n"); 150 "enabled\n", dev_name(&base->cdev->dev));
151 return -EBUSY; 151 return -EBUSY;
152 } 152 }
153 153
@@ -175,9 +175,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
175 dasd_sfree_request(cqr, cqr->memdev); 175 dasd_sfree_request(cqr, cqr->memdev);
176 if (rc) { 176 if (rc) {
177 if (rc != -ERESTARTSYS) 177 if (rc != -ERESTARTSYS)
178 dev_err(&base->cdev->dev, 178 pr_err("%s: Formatting unit %d failed with "
179 "Formatting unit %d failed with " 179 "rc=%d\n", dev_name(&base->cdev->dev),
180 "rc=%d\n", fdata->start_unit, rc); 180 fdata->start_unit, rc);
181 return rc; 181 return rc;
182 } 182 }
183 fdata->start_unit++; 183 fdata->start_unit++;
@@ -204,9 +204,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
204 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 204 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
205 return -EFAULT; 205 return -EFAULT;
206 if (bdev != bdev->bd_contains) { 206 if (bdev != bdev->bd_contains) {
207 dev_warn(&block->base->cdev->dev, 207 pr_warning("%s: The specified DASD is a partition and cannot "
208 "The specified DASD is a partition and cannot be " 208 "be formatted\n",
209 "formatted\n"); 209 dev_name(&block->base->cdev->dev));
210 return -EINVAL; 210 return -EINVAL;
211 } 211 }
212 return dasd_format(block, &fdata); 212 return dasd_format(block, &fdata);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index db442cd6621e..ee604e92a5fa 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -42,7 +42,6 @@
42#include <linux/suspend.h> 42#include <linux/suspend.h>
43#include <linux/platform_device.h> 43#include <linux/platform_device.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45#include <asm/checksum.h>
46 45
47#define XPRAM_NAME "xpram" 46#define XPRAM_NAME "xpram"
48#define XPRAM_DEVS 1 /* one partition */ 47#define XPRAM_DEVS 1 /* one partition */
@@ -51,7 +50,6 @@
51typedef struct { 50typedef struct {
52 unsigned int size; /* size of xpram segment in pages */ 51 unsigned int size; /* size of xpram segment in pages */
53 unsigned int offset; /* start page of xpram segment */ 52 unsigned int offset; /* start page of xpram segment */
54 unsigned int csum; /* partition checksum for suspend */
55} xpram_device_t; 53} xpram_device_t;
56 54
57static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; 55static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@@ -387,58 +385,6 @@ out:
387} 385}
388 386
389/* 387/*
390 * Save checksums for all partitions.
391 */
392static int xpram_save_checksums(void)
393{
394 unsigned long mem_page;
395 int rc, i;
396
397 rc = 0;
398 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
399 if (!mem_page)
400 return -ENOMEM;
401 for (i = 0; i < xpram_devs; i++) {
402 rc = xpram_page_in(mem_page, xpram_devices[i].offset);
403 if (rc)
404 goto fail;
405 xpram_devices[i].csum = csum_partial((const void *) mem_page,
406 PAGE_SIZE, 0);
407 }
408fail:
409 free_page(mem_page);
410 return rc ? -ENXIO : 0;
411}
412
413/*
414 * Verify checksums for all partitions.
415 */
416static int xpram_validate_checksums(void)
417{
418 unsigned long mem_page;
419 unsigned int csum;
420 int rc, i;
421
422 rc = 0;
423 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
424 if (!mem_page)
425 return -ENOMEM;
426 for (i = 0; i < xpram_devs; i++) {
427 rc = xpram_page_in(mem_page, xpram_devices[i].offset);
428 if (rc)
429 goto fail;
430 csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
431 if (xpram_devices[i].csum != csum) {
432 rc = -EINVAL;
433 goto fail;
434 }
435 }
436fail:
437 free_page(mem_page);
438 return rc ? -ENXIO : 0;
439}
440
441/*
442 * Resume failed: Print error message and call panic. 388 * Resume failed: Print error message and call panic.
443 */ 389 */
444static void xpram_resume_error(const char *message) 390static void xpram_resume_error(const char *message)
@@ -458,21 +404,10 @@ static int xpram_restore(struct device *dev)
458 xpram_resume_error("xpram disappeared"); 404 xpram_resume_error("xpram disappeared");
459 if (xpram_pages != xpram_highest_page_index() + 1) 405 if (xpram_pages != xpram_highest_page_index() + 1)
460 xpram_resume_error("Size of xpram changed"); 406 xpram_resume_error("Size of xpram changed");
461 if (xpram_validate_checksums())
462 xpram_resume_error("Data of xpram changed");
463 return 0; 407 return 0;
464} 408}
465 409
466/*
467 * Save necessary state in suspend.
468 */
469static int xpram_freeze(struct device *dev)
470{
471 return xpram_save_checksums();
472}
473
474static struct dev_pm_ops xpram_pm_ops = { 410static struct dev_pm_ops xpram_pm_ops = {
475 .freeze = xpram_freeze,
476 .restore = xpram_restore, 411 .restore = xpram_restore,
477}; 412};
478 413
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 0769ced52dbd..4e34d3686c23 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -82,6 +82,16 @@ config SCLP_CPI
82 You should only select this option if you know what you are doing, 82 You should only select this option if you know what you are doing,
83 need this feature and intend to run your kernel in LPAR. 83 need this feature and intend to run your kernel in LPAR.
84 84
85config SCLP_ASYNC
86 tristate "Support for Call Home via Asynchronous SCLP Records"
87 depends on S390
88 help
89 This option enables the call home function, which is able to inform
90 the service element and connected organisations about a kernel panic.
91 You should only select this option if you know what you are doing,
92 want for inform other people about your kernel panics,
93 need this feature and intend to run your kernel in LPAR.
94
85config S390_TAPE 95config S390_TAPE
86 tristate "S/390 tape device support" 96 tristate "S/390 tape device support"
87 depends on CCW 97 depends on CCW
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 7e73e39a1741..efb500ab66c0 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
18obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o 18obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
19obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
19 20
20obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o 21obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
21obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o 22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 3234e90bd7f9..89ece1c235aa 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -581,7 +581,7 @@ static int __init mon_init(void)
581 monreader_device->release = (void (*)(struct device *))kfree; 581 monreader_device->release = (void (*)(struct device *))kfree;
582 rc = device_register(monreader_device); 582 rc = device_register(monreader_device);
583 if (rc) { 583 if (rc) {
584 kfree(monreader_device); 584 put_device(monreader_device);
585 goto out_driver; 585 goto out_driver;
586 } 586 }
587 587
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 60e7cb07095b..6bb5a6bdfab5 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -27,6 +27,7 @@
27#define EVTYP_VT220MSG 0x1A 27#define EVTYP_VT220MSG 0x1A
28#define EVTYP_CONFMGMDATA 0x04 28#define EVTYP_CONFMGMDATA 0x04
29#define EVTYP_SDIAS 0x1C 29#define EVTYP_SDIAS 0x1C
30#define EVTYP_ASYNC 0x0A
30 31
31#define EVTYP_OPCMD_MASK 0x80000000 32#define EVTYP_OPCMD_MASK 0x80000000
32#define EVTYP_MSG_MASK 0x40000000 33#define EVTYP_MSG_MASK 0x40000000
@@ -38,6 +39,7 @@
38#define EVTYP_VT220MSG_MASK 0x00000040 39#define EVTYP_VT220MSG_MASK 0x00000040
39#define EVTYP_CONFMGMDATA_MASK 0x10000000 40#define EVTYP_CONFMGMDATA_MASK 0x10000000
40#define EVTYP_SDIAS_MASK 0x00000010 41#define EVTYP_SDIAS_MASK 0x00000010
42#define EVTYP_ASYNC_MASK 0x00400000
41 43
42#define GNRLMSGFLGS_DOM 0x8000 44#define GNRLMSGFLGS_DOM 0x8000
43#define GNRLMSGFLGS_SNDALRM 0x4000 45#define GNRLMSGFLGS_SNDALRM 0x4000
@@ -85,12 +87,12 @@ struct sccb_header {
85} __attribute__((packed)); 87} __attribute__((packed));
86 88
87extern u64 sclp_facilities; 89extern u64 sclp_facilities;
88
89#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) 90#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
90#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 91#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
91#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) 92#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
92#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) 93#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
93 94
95
94struct gds_subvector { 96struct gds_subvector {
95 u8 length; 97 u8 length;
96 u8 key; 98 u8 key;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
new file mode 100644
index 000000000000..daaec185ed36
--- /dev/null
+++ b/drivers/s390/char/sclp_async.c
@@ -0,0 +1,224 @@
1/*
2 * Enable Asynchronous Notification via SCLP.
3 *
4 * Copyright IBM Corp. 2009
5 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
6 *
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/stat.h>
13#include <linux/string.h>
14#include <linux/ctype.h>
15#include <linux/kmod.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/proc_fs.h>
19#include <linux/sysctl.h>
20#include <linux/utsname.h>
21#include "sclp.h"
22
23static int callhome_enabled;
24static struct sclp_req *request;
25static struct sclp_async_sccb *sccb;
26static int sclp_async_send_wait(char *message);
27static struct ctl_table_header *callhome_sysctl_header;
28static DEFINE_SPINLOCK(sclp_async_lock);
29static char nodename[64];
30#define SCLP_NORMAL_WRITE 0x00
31
32struct async_evbuf {
33 struct evbuf_header header;
34 u64 reserved;
35 u8 rflags;
36 u8 empty;
37 u8 rtype;
38 u8 otype;
39 char comp_id[12];
40 char data[3000]; /* there is still some space left */
41} __attribute__((packed));
42
43struct sclp_async_sccb {
44 struct sccb_header header;
45 struct async_evbuf evbuf;
46} __attribute__((packed));
47
48static struct sclp_register sclp_async_register = {
49 .send_mask = EVTYP_ASYNC_MASK,
50};
51
52static int call_home_on_panic(struct notifier_block *self,
53 unsigned long event, void *data)
54{
55 strncat(data, nodename, strlen(nodename));
56 sclp_async_send_wait(data);
57 return NOTIFY_DONE;
58}
59
60static struct notifier_block call_home_panic_nb = {
61 .notifier_call = call_home_on_panic,
62 .priority = INT_MAX,
63};
64
65static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
66 void __user *buffer, size_t *count,
67 loff_t *ppos)
68{
69 unsigned long val;
70 int len, rc;
71 char buf[2];
72
73 if (!*count | (*ppos && !write)) {
74 *count = 0;
75 return 0;
76 }
77 if (!write) {
78 len = sprintf(buf, "%d\n", callhome_enabled);
79 buf[len] = '\0';
80 rc = copy_to_user(buffer, buf, sizeof(buf));
81 if (rc != 0)
82 return -EFAULT;
83 } else {
84 len = *count;
85 rc = copy_from_user(buf, buffer, sizeof(buf));
86 if (rc != 0)
87 return -EFAULT;
88 if (strict_strtoul(buf, 0, &val) != 0)
89 return -EINVAL;
90 if (val != 0 && val != 1)
91 return -EINVAL;
92 callhome_enabled = val;
93 }
94 *count = len;
95 *ppos += len;
96 return 0;
97}
98
99static struct ctl_table callhome_table[] = {
100 {
101 .procname = "callhome",
102 .mode = 0644,
103 .proc_handler = &proc_handler_callhome,
104 },
105 { .ctl_name = 0 }
106};
107
108static struct ctl_table kern_dir_table[] = {
109 {
110 .ctl_name = CTL_KERN,
111 .procname = "kernel",
112 .maxlen = 0,
113 .mode = 0555,
114 .child = callhome_table,
115 },
116 { .ctl_name = 0 }
117};
118
119/*
120 * Function used to transfer asynchronous notification
121 * records which waits for send completion
122 */
123static int sclp_async_send_wait(char *message)
124{
125 struct async_evbuf *evb;
126 int rc;
127 unsigned long flags;
128
129 if (!callhome_enabled)
130 return 0;
131 sccb->evbuf.header.type = EVTYP_ASYNC;
132 sccb->evbuf.rtype = 0xA5;
133 sccb->evbuf.otype = 0x00;
134 evb = &sccb->evbuf;
135 request->command = SCLP_CMDW_WRITE_EVENT_DATA;
136 request->sccb = sccb;
137 request->status = SCLP_REQ_FILLED;
138 strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
139 /*
140 * Retain Queue
141 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
142 */
143 strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
144 sccb->evbuf.header.length = sizeof(sccb->evbuf);
145 sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
146 sccb->header.function_code = SCLP_NORMAL_WRITE;
147 rc = sclp_add_request(request);
148 if (rc)
149 return rc;
150 spin_lock_irqsave(&sclp_async_lock, flags);
151 while (request->status != SCLP_REQ_DONE &&
152 request->status != SCLP_REQ_FAILED) {
153 sclp_sync_wait();
154 }
155 spin_unlock_irqrestore(&sclp_async_lock, flags);
156 if (request->status != SCLP_REQ_DONE)
157 return -EIO;
158 rc = ((struct sclp_async_sccb *)
159 request->sccb)->header.response_code;
160 if (rc != 0x0020)
161 return -EIO;
162 if (evb->header.flags != 0x80)
163 return -EIO;
164 return rc;
165}
166
167static int __init sclp_async_init(void)
168{
169 int rc;
170
171 rc = sclp_register(&sclp_async_register);
172 if (rc)
173 return rc;
174 callhome_sysctl_header = register_sysctl_table(kern_dir_table);
175 if (!callhome_sysctl_header) {
176 rc = -ENOMEM;
177 goto out_sclp;
178 }
179 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
180 rc = -EOPNOTSUPP;
181 goto out_sclp;
182 }
183 rc = -ENOMEM;
184 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
185 if (!request)
186 goto out_sys;
187 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
188 if (!sccb)
189 goto out_mem;
190 rc = atomic_notifier_chain_register(&panic_notifier_list,
191 &call_home_panic_nb);
192 if (rc)
193 goto out_mem;
194
195 strncpy(nodename, init_utsname()->nodename, 64);
196 return 0;
197
198out_mem:
199 kfree(request);
200 free_page((unsigned long) sccb);
201out_sys:
202 unregister_sysctl_table(callhome_sysctl_header);
203out_sclp:
204 sclp_unregister(&sclp_async_register);
205 return rc;
206
207}
208module_init(sclp_async_init);
209
210static void __exit sclp_async_exit(void)
211{
212 atomic_notifier_chain_unregister(&panic_notifier_list,
213 &call_home_panic_nb);
214 unregister_sysctl_table(callhome_sysctl_header);
215 sclp_unregister(&sclp_async_register);
216 free_page((unsigned long) sccb);
217 kfree(request);
218}
219module_exit(sclp_async_exit);
220
221MODULE_AUTHOR("Copyright IBM Corp. 2009");
222MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
223MODULE_LICENSE("GPL");
224MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5a519fac37b7..2fe45ff77b75 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -8,7 +8,7 @@
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape" 11#define KMSG_COMPONENT "tape_34xx"
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 418f72dd39b4..e4cc3aae9162 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -8,7 +8,7 @@
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape" 11#define KMSG_COMPONENT "tape_3590"
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
@@ -39,8 +39,6 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
39 * - Read Alternate: implemented 39 * - Read Alternate: implemented
40 *******************************************************************/ 40 *******************************************************************/
41 41
42#define KMSG_COMPONENT "tape"
43
44static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { 42static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
45 [0x00] = "", 43 [0x00] = "",
46 [0x10] = "Lost Sense", 44 [0x10] = "Lost Sense",
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 47ff695255ea..4cb9e70507ab 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -302,8 +302,6 @@ tapeblock_revalidate_disk(struct gendisk *disk)
302 if (!device->blk_data.medium_changed) 302 if (!device->blk_data.medium_changed)
303 return 0; 303 return 0;
304 304
305 dev_info(&device->cdev->dev, "Determining the size of the recorded "
306 "area...\n");
307 rc = tape_mtop(device, MTFSFM, 1); 305 rc = tape_mtop(device, MTFSFM, 1);
308 if (rc) 306 if (rc)
309 return rc; 307 return rc;
@@ -312,6 +310,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
312 if (rc < 0) 310 if (rc < 0)
313 return rc; 311 return rc;
314 312
313 pr_info("%s: Determining the size of the recorded area...\n",
314 dev_name(&device->cdev->dev));
315 DBF_LH(3, "Image file ends at %d\n", rc); 315 DBF_LH(3, "Image file ends at %d\n", rc);
316 nr_of_blks = rc; 316 nr_of_blks = rc;
317 317
@@ -330,8 +330,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
330 device->bof = rc; 330 device->bof = rc;
331 nr_of_blks -= rc; 331 nr_of_blks -= rc;
332 332
333 dev_info(&device->cdev->dev, "The size of the recorded area is %i " 333 pr_info("%s: The size of the recorded area is %i blocks\n",
334 "blocks\n", nr_of_blks); 334 dev_name(&device->cdev->dev), nr_of_blks);
335 set_capacity(device->blk_data.disk, 335 set_capacity(device->blk_data.disk,
336 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); 336 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
337 337
@@ -366,8 +366,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
366 366
367 if (device->required_tapemarks) { 367 if (device->required_tapemarks) {
368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
369 dev_warn(&device->cdev->dev, "Opening the tape failed because" 369 pr_warning("%s: Opening the tape failed because of missing "
370 " of missing end-of-file marks\n"); 370 "end-of-file marks\n", dev_name(&device->cdev->dev));
371 rc = -EPERM; 371 rc = -EPERM;
372 goto put_device; 372 goto put_device;
373 } 373 }
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 1d420d947596..5cd31e071647 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -214,13 +214,15 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
214 switch(newstate){ 214 switch(newstate){
215 case MS_UNLOADED: 215 case MS_UNLOADED:
216 device->tape_generic_status |= GMT_DR_OPEN(~0); 216 device->tape_generic_status |= GMT_DR_OPEN(~0);
217 dev_info(&device->cdev->dev, "The tape cartridge has been " 217 if (device->medium_state == MS_LOADED)
218 "successfully unloaded\n"); 218 pr_info("%s: The tape cartridge has been successfully "
219 "unloaded\n", dev_name(&device->cdev->dev));
219 break; 220 break;
220 case MS_LOADED: 221 case MS_LOADED:
221 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 222 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
222 dev_info(&device->cdev->dev, "A tape cartridge has been " 223 if (device->medium_state == MS_UNLOADED)
223 "mounted\n"); 224 pr_info("%s: A tape cartridge has been mounted\n",
225 dev_name(&device->cdev->dev));
224 break; 226 break;
225 default: 227 default:
226 // print nothing 228 // print nothing
@@ -358,11 +360,11 @@ tape_generic_online(struct tape_device *device,
358 360
359out_char: 361out_char:
360 tapechar_cleanup_device(device); 362 tapechar_cleanup_device(device);
363out_minor:
364 tape_remove_minor(device);
361out_discipline: 365out_discipline:
362 device->discipline->cleanup_device(device); 366 device->discipline->cleanup_device(device);
363 device->discipline = NULL; 367 device->discipline = NULL;
364out_minor:
365 tape_remove_minor(device);
366out: 368out:
367 module_put(discipline->owner); 369 module_put(discipline->owner);
368 return rc; 370 return rc;
@@ -654,8 +656,8 @@ tape_generic_remove(struct ccw_device *cdev)
654 */ 656 */
655 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 657 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
656 device->cdev_id); 658 device->cdev_id);
657 dev_warn(&device->cdev->dev, "A tape unit was detached" 659 pr_warning("%s: A tape unit was detached while in "
658 " while in use\n"); 660 "use\n", dev_name(&device->cdev->dev));
659 tape_state_set(device, TS_NOT_OPER); 661 tape_state_set(device, TS_NOT_OPER);
660 __tape_discard_requests(device); 662 __tape_discard_requests(device);
661 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 663 spin_unlock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 1a9420ba518d..750354ad16e5 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -68,7 +68,7 @@ tape_std_assign(struct tape_device *device)
68 * to another host (actually this shouldn't happen but it does). 68 * to another host (actually this shouldn't happen but it does).
69 * So we set up a timeout for this call. 69 * So we set up a timeout for this call.
70 */ 70 */
71 init_timer(&timeout); 71 init_timer_on_stack(&timeout);
72 timeout.function = tape_std_assign_timeout; 72 timeout.function = tape_std_assign_timeout;
73 timeout.data = (unsigned long) request; 73 timeout.data = (unsigned long) request;
74 timeout.expires = jiffies + 2 * HZ; 74 timeout.expires = jiffies + 2 * HZ;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c20a4fe6da51..d1a142fa3eb4 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -765,8 +765,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
765 } else 765 } else
766 return -ENOMEM; 766 return -ENOMEM;
767 ret = device_register(dev); 767 ret = device_register(dev);
768 if (ret) 768 if (ret) {
769 put_device(dev);
769 return ret; 770 return ret;
771 }
770 772
771 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group); 773 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
772 if (ret) { 774 if (ret) {
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 31b902e94f7b..77571b68539a 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -1026,9 +1026,15 @@ static int __init ur_init(void)
1026 1026
1027 debug_set_level(vmur_dbf, 6); 1027 debug_set_level(vmur_dbf, 6);
1028 1028
1029 vmur_class = class_create(THIS_MODULE, "vmur");
1030 if (IS_ERR(vmur_class)) {
1031 rc = PTR_ERR(vmur_class);
1032 goto fail_free_dbf;
1033 }
1034
1029 rc = ccw_driver_register(&ur_driver); 1035 rc = ccw_driver_register(&ur_driver);
1030 if (rc) 1036 if (rc)
1031 goto fail_free_dbf; 1037 goto fail_class_destroy;
1032 1038
1033 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1039 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
1034 if (rc) { 1040 if (rc) {
@@ -1038,18 +1044,13 @@ static int __init ur_init(void)
1038 } 1044 }
1039 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1045 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
1040 1046
1041 vmur_class = class_create(THIS_MODULE, "vmur");
1042 if (IS_ERR(vmur_class)) {
1043 rc = PTR_ERR(vmur_class);
1044 goto fail_unregister_region;
1045 }
1046 pr_info("%s loaded.\n", ur_banner); 1047 pr_info("%s loaded.\n", ur_banner);
1047 return 0; 1048 return 0;
1048 1049
1049fail_unregister_region:
1050 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1051fail_unregister_driver: 1050fail_unregister_driver:
1052 ccw_driver_unregister(&ur_driver); 1051 ccw_driver_unregister(&ur_driver);
1052fail_class_destroy:
1053 class_destroy(vmur_class);
1053fail_free_dbf: 1054fail_free_dbf:
1054 debug_unregister(vmur_dbf); 1055 debug_unregister(vmur_dbf);
1055 return rc; 1056 return rc;
@@ -1057,9 +1058,9 @@ fail_free_dbf:
1057 1058
1058static void __exit ur_exit(void) 1059static void __exit ur_exit(void)
1059{ 1060{
1060 class_destroy(vmur_class);
1061 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1061 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1062 ccw_driver_unregister(&ur_driver); 1062 ccw_driver_unregister(&ur_driver);
1063 class_destroy(vmur_class);
1063 debug_unregister(vmur_dbf); 1064 debug_unregister(vmur_dbf);
1064 pr_info("%s unloaded.\n", ur_banner); 1065 pr_info("%s unloaded.\n", ur_banner);
1065} 1066}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1bbae433fbd8..c431198bdbc4 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -275,7 +275,7 @@ struct zcore_header {
275 u32 num_pages; 275 u32 num_pages;
276 u32 pad1; 276 u32 pad1;
277 u64 tod; 277 u64 tod;
278 cpuid_t cpu_id; 278 struct cpuid cpu_id;
279 u32 arch_id; 279 u32 arch_id;
280 u32 volnr; 280 u32 volnr;
281 u32 build_arch; 281 u32 build_arch;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index adb3dd301528..fa4c9662f65e 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
6 fcx.o itcw.o crw.o 6 fcx.o itcw.o crw.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 3e5f304ad88f..40002830d48a 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -417,7 +417,8 @@ int chp_new(struct chp_id chpid)
417 if (ret) { 417 if (ret) {
418 CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", 418 CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
419 chpid.cssid, chpid.id, ret); 419 chpid.cssid, chpid.id, ret);
420 goto out_free; 420 put_device(&chp->dev);
421 goto out;
421 } 422 }
422 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 423 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
423 if (ret) { 424 if (ret) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 425e8f89a6c5..37aa611d4ac5 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -37,29 +37,6 @@ struct channel_path_desc {
37 37
38struct channel_path; 38struct channel_path;
39 39
40struct css_general_char {
41 u64 : 12;
42 u32 dynio : 1; /* bit 12 */
43 u32 : 28;
44 u32 aif : 1; /* bit 41 */
45 u32 : 3;
46 u32 mcss : 1; /* bit 45 */
47 u32 fcs : 1; /* bit 46 */
48 u32 : 1;
49 u32 ext_mb : 1; /* bit 48 */
50 u32 : 7;
51 u32 aif_tdd : 1; /* bit 56 */
52 u32 : 1;
53 u32 qebsm : 1; /* bit 58 */
54 u32 : 8;
55 u32 aif_osa : 1; /* bit 67 */
56 u32 : 14;
57 u32 cib : 1; /* bit 82 */
58 u32 : 5;
59 u32 fcx : 1; /* bit 88 */
60 u32 : 7;
61}__attribute__((packed));
62
63struct css_chsc_char { 40struct css_chsc_char {
64 u64 res; 41 u64 res;
65 u64 : 20; 42 u64 : 20;
@@ -72,7 +49,6 @@ struct css_chsc_char {
72 u32 : 19; 49 u32 : 19;
73}__attribute__((packed)); 50}__attribute__((packed));
74 51
75extern struct css_general_char css_general_characteristics;
76extern struct css_chsc_char css_chsc_characteristics; 52extern struct css_chsc_char css_chsc_characteristics;
77 53
78struct chsc_ssd_info { 54struct chsc_ssd_info {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5ec7789bd9d8..138124fcfcad 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -139,12 +139,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
139 __u8 lpm, /* logical path mask */ 139 __u8 lpm, /* logical path mask */
140 __u8 key) /* storage key */ 140 __u8 key) /* storage key */
141{ 141{
142 char dbf_txt[15];
143 int ccode; 142 int ccode;
144 union orb *orb; 143 union orb *orb;
145 144
146 CIO_TRACE_EVENT(4, "stIO"); 145 CIO_TRACE_EVENT(5, "stIO");
147 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 146 CIO_TRACE_EVENT(5, dev_name(&sch->dev));
148 147
149 orb = &to_io_private(sch)->orb; 148 orb = &to_io_private(sch)->orb;
150 memset(orb, 0, sizeof(union orb)); 149 memset(orb, 0, sizeof(union orb));
@@ -169,8 +168,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
169 ccode = ssch(sch->schid, orb); 168 ccode = ssch(sch->schid, orb);
170 169
171 /* process condition code */ 170 /* process condition code */
172 sprintf(dbf_txt, "ccode:%d", ccode); 171 CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
173 CIO_TRACE_EVENT(4, dbf_txt);
174 172
175 switch (ccode) { 173 switch (ccode) {
176 case 0: 174 case 0:
@@ -201,16 +199,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
201int 199int
202cio_resume (struct subchannel *sch) 200cio_resume (struct subchannel *sch)
203{ 201{
204 char dbf_txt[15];
205 int ccode; 202 int ccode;
206 203
207 CIO_TRACE_EVENT (4, "resIO"); 204 CIO_TRACE_EVENT(4, "resIO");
208 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 205 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
209 206
210 ccode = rsch (sch->schid); 207 ccode = rsch (sch->schid);
211 208
212 sprintf (dbf_txt, "ccode:%d", ccode); 209 CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
213 CIO_TRACE_EVENT (4, dbf_txt);
214 210
215 switch (ccode) { 211 switch (ccode) {
216 case 0: 212 case 0:
@@ -235,13 +231,12 @@ cio_resume (struct subchannel *sch)
235int 231int
236cio_halt(struct subchannel *sch) 232cio_halt(struct subchannel *sch)
237{ 233{
238 char dbf_txt[15];
239 int ccode; 234 int ccode;
240 235
241 if (!sch) 236 if (!sch)
242 return -ENODEV; 237 return -ENODEV;
243 238
244 CIO_TRACE_EVENT (2, "haltIO"); 239 CIO_TRACE_EVENT(2, "haltIO");
245 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 240 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
246 241
247 /* 242 /*
@@ -249,8 +244,7 @@ cio_halt(struct subchannel *sch)
249 */ 244 */
250 ccode = hsch (sch->schid); 245 ccode = hsch (sch->schid);
251 246
252 sprintf (dbf_txt, "ccode:%d", ccode); 247 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
253 CIO_TRACE_EVENT (2, dbf_txt);
254 248
255 switch (ccode) { 249 switch (ccode) {
256 case 0: 250 case 0:
@@ -270,13 +264,12 @@ cio_halt(struct subchannel *sch)
270int 264int
271cio_clear(struct subchannel *sch) 265cio_clear(struct subchannel *sch)
272{ 266{
273 char dbf_txt[15];
274 int ccode; 267 int ccode;
275 268
276 if (!sch) 269 if (!sch)
277 return -ENODEV; 270 return -ENODEV;
278 271
279 CIO_TRACE_EVENT (2, "clearIO"); 272 CIO_TRACE_EVENT(2, "clearIO");
280 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 273 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
281 274
282 /* 275 /*
@@ -284,8 +277,7 @@ cio_clear(struct subchannel *sch)
284 */ 277 */
285 ccode = csch (sch->schid); 278 ccode = csch (sch->schid);
286 279
287 sprintf (dbf_txt, "ccode:%d", ccode); 280 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
288 CIO_TRACE_EVENT (2, dbf_txt);
289 281
290 switch (ccode) { 282 switch (ccode) {
291 case 0: 283 case 0:
@@ -306,19 +298,17 @@ cio_clear(struct subchannel *sch)
306int 298int
307cio_cancel (struct subchannel *sch) 299cio_cancel (struct subchannel *sch)
308{ 300{
309 char dbf_txt[15];
310 int ccode; 301 int ccode;
311 302
312 if (!sch) 303 if (!sch)
313 return -ENODEV; 304 return -ENODEV;
314 305
315 CIO_TRACE_EVENT (2, "cancelIO"); 306 CIO_TRACE_EVENT(2, "cancelIO");
316 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 307 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
317 308
318 ccode = xsch (sch->schid); 309 ccode = xsch (sch->schid);
319 310
320 sprintf (dbf_txt, "ccode:%d", ccode); 311 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
321 CIO_TRACE_EVENT (2, dbf_txt);
322 312
323 switch (ccode) { 313 switch (ccode) {
324 case 0: /* success */ 314 case 0: /* success */
@@ -429,11 +419,10 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
429 */ 419 */
430int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 420int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
431{ 421{
432 char dbf_txt[15];
433 int retry; 422 int retry;
434 int ret; 423 int ret;
435 424
436 CIO_TRACE_EVENT (2, "ensch"); 425 CIO_TRACE_EVENT(2, "ensch");
437 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 426 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
438 427
439 if (sch_is_pseudo_sch(sch)) 428 if (sch_is_pseudo_sch(sch))
@@ -460,8 +449,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
460 } else 449 } else
461 break; 450 break;
462 } 451 }
463 sprintf (dbf_txt, "ret:%d", ret); 452 CIO_HEX_EVENT(2, &ret, sizeof(ret));
464 CIO_TRACE_EVENT (2, dbf_txt);
465 return ret; 453 return ret;
466} 454}
467EXPORT_SYMBOL_GPL(cio_enable_subchannel); 455EXPORT_SYMBOL_GPL(cio_enable_subchannel);
@@ -472,11 +460,10 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
472 */ 460 */
473int cio_disable_subchannel(struct subchannel *sch) 461int cio_disable_subchannel(struct subchannel *sch)
474{ 462{
475 char dbf_txt[15];
476 int retry; 463 int retry;
477 int ret; 464 int ret;
478 465
479 CIO_TRACE_EVENT (2, "dissch"); 466 CIO_TRACE_EVENT(2, "dissch");
480 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 467 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
481 468
482 if (sch_is_pseudo_sch(sch)) 469 if (sch_is_pseudo_sch(sch))
@@ -495,8 +482,7 @@ int cio_disable_subchannel(struct subchannel *sch)
495 } else 482 } else
496 break; 483 break;
497 } 484 }
498 sprintf (dbf_txt, "ret:%d", ret); 485 CIO_HEX_EVENT(2, &ret, sizeof(ret));
499 CIO_TRACE_EVENT (2, dbf_txt);
500 return ret; 486 return ret;
501} 487}
502EXPORT_SYMBOL_GPL(cio_disable_subchannel); 488EXPORT_SYMBOL_GPL(cio_disable_subchannel);
@@ -578,11 +564,6 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
578 goto out; 564 goto out;
579 } 565 }
580 mutex_init(&sch->reg_mutex); 566 mutex_init(&sch->reg_mutex);
581 /* Set a name for the subchannel */
582 if (cio_is_console(schid))
583 sch->dev.init_name = cio_get_console_sch_name(schid);
584 else
585 dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
586 567
587 /* 568 /*
588 * The first subchannel that is not-operational (ccode==3) 569 * The first subchannel that is not-operational (ccode==3)
@@ -686,7 +667,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
686 667
687#ifdef CONFIG_CCW_CONSOLE 668#ifdef CONFIG_CCW_CONSOLE
688static struct subchannel console_subchannel; 669static struct subchannel console_subchannel;
689static char console_sch_name[10] = "0.x.xxxx";
690static struct io_subchannel_private console_priv; 670static struct io_subchannel_private console_priv;
691static int console_subchannel_in_use; 671static int console_subchannel_in_use;
692 672
@@ -873,12 +853,6 @@ cio_get_console_subchannel(void)
873 return &console_subchannel; 853 return &console_subchannel;
874} 854}
875 855
876const char *cio_get_console_sch_name(struct subchannel_id schid)
877{
878 snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
879 return (const char *)console_sch_name;
880}
881
882#endif 856#endif
883static int 857static int
884__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 858__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 5150fba742ac..2e43558c704b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -133,15 +133,11 @@ extern int cio_is_console(struct subchannel_id);
133extern struct subchannel *cio_get_console_subchannel(void); 133extern struct subchannel *cio_get_console_subchannel(void);
134extern spinlock_t * cio_get_console_lock(void); 134extern spinlock_t * cio_get_console_lock(void);
135extern void *cio_get_console_priv(void); 135extern void *cio_get_console_priv(void);
136extern const char *cio_get_console_sch_name(struct subchannel_id schid);
137extern const char *cio_get_console_cdev_name(struct subchannel *sch);
138#else 136#else
139#define cio_is_console(schid) 0 137#define cio_is_console(schid) 0
140#define cio_get_console_subchannel() NULL 138#define cio_get_console_subchannel() NULL
141#define cio_get_console_lock() NULL 139#define cio_get_console_lock() NULL
142#define cio_get_console_priv() NULL 140#define cio_get_console_priv() NULL
143#define cio_get_console_sch_name(schid) NULL
144#define cio_get_console_cdev_name(sch) NULL
145#endif 141#endif
146 142
147#endif 143#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 85d43c6bcb66..e995123fd805 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -152,24 +152,15 @@ css_alloc_subchannel(struct subchannel_id schid)
152} 152}
153 153
154static void 154static void
155css_free_subchannel(struct subchannel *sch)
156{
157 if (sch) {
158 /* Reset intparm to zeroes. */
159 sch->config.intparm = 0;
160 cio_commit_config(sch);
161 kfree(sch->lock);
162 kfree(sch);
163 }
164}
165
166static void
167css_subchannel_release(struct device *dev) 155css_subchannel_release(struct device *dev)
168{ 156{
169 struct subchannel *sch; 157 struct subchannel *sch;
170 158
171 sch = to_subchannel(dev); 159 sch = to_subchannel(dev);
172 if (!cio_is_console(sch->schid)) { 160 if (!cio_is_console(sch->schid)) {
161 /* Reset intparm to zeroes. */
162 sch->config.intparm = 0;
163 cio_commit_config(sch);
173 kfree(sch->lock); 164 kfree(sch->lock);
174 kfree(sch); 165 kfree(sch);
175 } 166 }
@@ -180,6 +171,8 @@ static int css_sch_device_register(struct subchannel *sch)
180 int ret; 171 int ret;
181 172
182 mutex_lock(&sch->reg_mutex); 173 mutex_lock(&sch->reg_mutex);
174 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
175 sch->schid.sch_no);
183 ret = device_register(&sch->dev); 176 ret = device_register(&sch->dev);
184 mutex_unlock(&sch->reg_mutex); 177 mutex_unlock(&sch->reg_mutex);
185 return ret; 178 return ret;
@@ -327,7 +320,7 @@ int css_probe_device(struct subchannel_id schid)
327 return PTR_ERR(sch); 320 return PTR_ERR(sch);
328 ret = css_register_subchannel(sch); 321 ret = css_register_subchannel(sch);
329 if (ret) 322 if (ret)
330 css_free_subchannel(sch); 323 put_device(&sch->dev);
331 return ret; 324 return ret;
332} 325}
333 326
@@ -644,7 +637,10 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
644 * not working) so we do it now. This is true e.g. for the 637 * not working) so we do it now. This is true e.g. for the
645 * console subchannel. 638 * console subchannel.
646 */ 639 */
647 css_register_subchannel(sch); 640 if (css_register_subchannel(sch)) {
641 if (!cio_is_console(schid))
642 put_device(&sch->dev);
643 }
648 return 0; 644 return 0;
649} 645}
650 646
@@ -661,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
661 css->global_pgid.pgid_high.cpu_addr = 0; 657 css->global_pgid.pgid_high.cpu_addr = 0;
662#endif 658#endif
663 } 659 }
664 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 660 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
665 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 661 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
666 css->global_pgid.tod_high = tod_high; 662 css->global_pgid.tod_high = tod_high;
667 663
668} 664}
@@ -920,8 +916,10 @@ init_channel_subsystem (void)
920 goto out_device; 916 goto out_device;
921 } 917 }
922 ret = device_register(&css->pseudo_subchannel->dev); 918 ret = device_register(&css->pseudo_subchannel->dev);
923 if (ret) 919 if (ret) {
920 put_device(&css->pseudo_subchannel->dev);
924 goto out_file; 921 goto out_file;
922 }
925 } 923 }
926 ret = register_reboot_notifier(&css_reboot_notifier); 924 ret = register_reboot_notifier(&css_reboot_notifier);
927 if (ret) 925 if (ret)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d593bc76afe3..0f95405c2c5e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -307,8 +307,11 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
307 307
308static void ccw_device_unregister(struct ccw_device *cdev) 308static void ccw_device_unregister(struct ccw_device *cdev)
309{ 309{
310 if (test_and_clear_bit(1, &cdev->private->registered)) 310 if (test_and_clear_bit(1, &cdev->private->registered)) {
311 device_del(&cdev->dev); 311 device_del(&cdev->dev);
312 /* Release reference from device_initialize(). */
313 put_device(&cdev->dev);
314 }
312} 315}
313 316
314static void ccw_device_remove_orphan_cb(struct work_struct *work) 317static void ccw_device_remove_orphan_cb(struct work_struct *work)
@@ -319,7 +322,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work)
319 priv = container_of(work, struct ccw_device_private, kick_work); 322 priv = container_of(work, struct ccw_device_private, kick_work);
320 cdev = priv->cdev; 323 cdev = priv->cdev;
321 ccw_device_unregister(cdev); 324 ccw_device_unregister(cdev);
322 put_device(&cdev->dev);
323 /* Release cdev reference for workqueue processing. */ 325 /* Release cdev reference for workqueue processing. */
324 put_device(&cdev->dev); 326 put_device(&cdev->dev);
325} 327}
@@ -333,15 +335,15 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
333 * Forced offline in disconnected state means 335 * Forced offline in disconnected state means
334 * 'throw away device'. 336 * 'throw away device'.
335 */ 337 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 if (ccw_device_is_orphan(cdev)) { 338 if (ccw_device_is_orphan(cdev)) {
340 /* 339 /*
341 * Deregister ccw device. 340 * Deregister ccw device.
342 * Unfortunately, we cannot do this directly from the 341 * Unfortunately, we cannot do this directly from the
343 * attribute method. 342 * attribute method.
344 */ 343 */
344 /* Get cdev reference for workqueue processing. */
345 if (!get_device(&cdev->dev))
346 return;
345 spin_lock_irqsave(cdev->ccwlock, flags); 347 spin_lock_irqsave(cdev->ccwlock, flags);
346 cdev->private->state = DEV_STATE_NOT_OPER; 348 cdev->private->state = DEV_STATE_NOT_OPER;
347 spin_unlock_irqrestore(cdev->ccwlock, flags); 349 spin_unlock_irqrestore(cdev->ccwlock, flags);
@@ -380,30 +382,34 @@ int ccw_device_set_offline(struct ccw_device *cdev)
380 } 382 }
381 cdev->online = 0; 383 cdev->online = 0;
382 spin_lock_irq(cdev->ccwlock); 384 spin_lock_irq(cdev->ccwlock);
383 ret = ccw_device_offline(cdev); 385 /* Wait until a final state or DISCONNECTED is reached */
384 if (ret == -ENODEV) { 386 while (!dev_fsm_final_state(cdev) &&
385 if (cdev->private->state != DEV_STATE_NOT_OPER) { 387 cdev->private->state != DEV_STATE_DISCONNECTED) {
386 cdev->private->state = DEV_STATE_OFFLINE;
387 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
388 }
389 spin_unlock_irq(cdev->ccwlock); 388 spin_unlock_irq(cdev->ccwlock);
390 /* Give up reference from ccw_device_set_online(). */ 389 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
391 put_device(&cdev->dev); 390 cdev->private->state == DEV_STATE_DISCONNECTED));
392 return ret; 391 spin_lock_irq(cdev->ccwlock);
393 } 392 }
393 ret = ccw_device_offline(cdev);
394 if (ret)
395 goto error;
394 spin_unlock_irq(cdev->ccwlock); 396 spin_unlock_irq(cdev->ccwlock);
395 if (ret == 0) { 397 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
396 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 398 cdev->private->state == DEV_STATE_DISCONNECTED));
397 /* Give up reference from ccw_device_set_online(). */ 399 /* Give up reference from ccw_device_set_online(). */
398 put_device(&cdev->dev); 400 put_device(&cdev->dev);
399 } else { 401 return 0;
400 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 402
401 "device 0.%x.%04x\n", 403error:
402 ret, cdev->private->dev_id.ssid, 404 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
403 cdev->private->dev_id.devno); 405 ret, cdev->private->dev_id.ssid,
404 cdev->online = 1; 406 cdev->private->dev_id.devno);
405 } 407 cdev->private->state = DEV_STATE_OFFLINE;
406 return ret; 408 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
409 spin_unlock_irq(cdev->ccwlock);
410 /* Give up reference from ccw_device_set_online(). */
411 put_device(&cdev->dev);
412 return -ENODEV;
407} 413}
408 414
409/** 415/**
@@ -421,6 +427,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
421int ccw_device_set_online(struct ccw_device *cdev) 427int ccw_device_set_online(struct ccw_device *cdev)
422{ 428{
423 int ret; 429 int ret;
430 int ret2;
424 431
425 if (!cdev) 432 if (!cdev)
426 return -ENODEV; 433 return -ENODEV;
@@ -444,28 +451,53 @@ int ccw_device_set_online(struct ccw_device *cdev)
444 put_device(&cdev->dev); 451 put_device(&cdev->dev);
445 return ret; 452 return ret;
446 } 453 }
447 if (cdev->private->state != DEV_STATE_ONLINE) { 454 spin_lock_irq(cdev->ccwlock);
455 /* Check if online processing was successful */
456 if ((cdev->private->state != DEV_STATE_ONLINE) &&
457 (cdev->private->state != DEV_STATE_W4SENSE)) {
458 spin_unlock_irq(cdev->ccwlock);
448 /* Give up online reference since onlining failed. */ 459 /* Give up online reference since onlining failed. */
449 put_device(&cdev->dev); 460 put_device(&cdev->dev);
450 return -ENODEV; 461 return -ENODEV;
451 } 462 }
452 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { 463 spin_unlock_irq(cdev->ccwlock);
453 cdev->online = 1; 464 if (cdev->drv->set_online)
454 return 0; 465 ret = cdev->drv->set_online(cdev);
455 } 466 if (ret)
467 goto rollback;
468 cdev->online = 1;
469 return 0;
470
471rollback:
456 spin_lock_irq(cdev->ccwlock); 472 spin_lock_irq(cdev->ccwlock);
457 ret = ccw_device_offline(cdev); 473 /* Wait until a final state or DISCONNECTED is reached */
474 while (!dev_fsm_final_state(cdev) &&
475 cdev->private->state != DEV_STATE_DISCONNECTED) {
476 spin_unlock_irq(cdev->ccwlock);
477 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
478 cdev->private->state == DEV_STATE_DISCONNECTED));
479 spin_lock_irq(cdev->ccwlock);
480 }
481 ret2 = ccw_device_offline(cdev);
482 if (ret2)
483 goto error;
458 spin_unlock_irq(cdev->ccwlock); 484 spin_unlock_irq(cdev->ccwlock);
459 if (ret == 0) 485 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
460 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 486 cdev->private->state == DEV_STATE_DISCONNECTED));
461 else
462 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
463 "device 0.%x.%04x\n",
464 ret, cdev->private->dev_id.ssid,
465 cdev->private->dev_id.devno);
466 /* Give up online reference since onlining failed. */ 487 /* Give up online reference since onlining failed. */
467 put_device(&cdev->dev); 488 put_device(&cdev->dev);
468 return (ret == 0) ? -ENODEV : ret; 489 return ret;
490
491error:
492 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
493 "device 0.%x.%04x\n",
494 ret2, cdev->private->dev_id.ssid,
495 cdev->private->dev_id.devno);
496 cdev->private->state = DEV_STATE_OFFLINE;
497 spin_unlock_irq(cdev->ccwlock);
498 /* Give up online reference since onlining failed. */
499 put_device(&cdev->dev);
500 return ret;
469} 501}
470 502
471static int online_store_handle_offline(struct ccw_device *cdev) 503static int online_store_handle_offline(struct ccw_device *cdev)
@@ -637,8 +669,12 @@ static int ccw_device_register(struct ccw_device *cdev)
637 int ret; 669 int ret;
638 670
639 dev->bus = &ccw_bus_type; 671 dev->bus = &ccw_bus_type;
640 672 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
641 if ((ret = device_add(dev))) 673 cdev->private->dev_id.devno);
674 if (ret)
675 return ret;
676 ret = device_add(dev);
677 if (ret)
642 return ret; 678 return ret;
643 679
644 set_bit(1, &cdev->private->registered); 680 set_bit(1, &cdev->private->registered);
@@ -1024,9 +1060,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
1024 return; 1060 return;
1025 sch = to_subchannel(cdev->dev.parent); 1061 sch = to_subchannel(cdev->dev.parent);
1026 css_sch_device_unregister(sch); 1062 css_sch_device_unregister(sch);
1027 /* Reset intparm to zeroes. */
1028 sch->config.intparm = 0;
1029 cio_commit_config(sch);
1030 /* Release cdev reference for workqueue processing.*/ 1063 /* Release cdev reference for workqueue processing.*/
1031 put_device(&cdev->dev); 1064 put_device(&cdev->dev);
1032 /* Release subchannel reference for local processing. */ 1065 /* Release subchannel reference for local processing. */
@@ -1035,6 +1068,9 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
1035 1068
1036void ccw_device_schedule_sch_unregister(struct ccw_device *cdev) 1069void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1037{ 1070{
1071 /* Get cdev reference for workqueue processing. */
1072 if (!get_device(&cdev->dev))
1073 return;
1038 PREPARE_WORK(&cdev->private->kick_work, 1074 PREPARE_WORK(&cdev->private->kick_work,
1039 ccw_device_call_sch_unregister); 1075 ccw_device_call_sch_unregister);
1040 queue_work(slow_path_wq, &cdev->private->kick_work); 1076 queue_work(slow_path_wq, &cdev->private->kick_work);
@@ -1055,9 +1091,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1055 /* Device did not respond in time. */ 1091 /* Device did not respond in time. */
1056 case DEV_STATE_NOT_OPER: 1092 case DEV_STATE_NOT_OPER:
1057 cdev->private->flags.recog_done = 1; 1093 cdev->private->flags.recog_done = 1;
1058 /* Remove device found not operational. */
1059 if (!get_device(&cdev->dev))
1060 break;
1061 ccw_device_schedule_sch_unregister(cdev); 1094 ccw_device_schedule_sch_unregister(cdev);
1062 if (atomic_dec_and_test(&ccw_device_init_count)) 1095 if (atomic_dec_and_test(&ccw_device_init_count))
1063 wake_up(&ccw_device_init_wq); 1096 wake_up(&ccw_device_init_wq);
@@ -1095,13 +1128,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1095 init_waitqueue_head(&priv->wait_q); 1128 init_waitqueue_head(&priv->wait_q);
1096 init_timer(&priv->timer); 1129 init_timer(&priv->timer);
1097 1130
1098 /* Set an initial name for the device. */
1099 if (cio_is_console(sch->schid))
1100 cdev->dev.init_name = cio_get_console_cdev_name(sch);
1101 else
1102 dev_set_name(&cdev->dev, "0.%x.%04x",
1103 sch->schid.ssid, sch->schib.pmcw.dev);
1104
1105 /* Increase counter of devices currently in recognition. */ 1131 /* Increase counter of devices currently in recognition. */
1106 atomic_inc(&ccw_device_init_count); 1132 atomic_inc(&ccw_device_init_count);
1107 1133
@@ -1171,8 +1197,8 @@ static void io_subchannel_irq(struct subchannel *sch)
1171 1197
1172 cdev = sch_get_cdev(sch); 1198 cdev = sch_get_cdev(sch);
1173 1199
1174 CIO_TRACE_EVENT(3, "IRQ"); 1200 CIO_TRACE_EVENT(6, "IRQ");
1175 CIO_TRACE_EVENT(3, dev_name(&sch->dev)); 1201 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1176 if (cdev) 1202 if (cdev)
1177 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1203 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1178} 1204}
@@ -1210,9 +1236,6 @@ static void io_subchannel_do_unreg(struct work_struct *work)
1210 1236
1211 sch = container_of(work, struct subchannel, work); 1237 sch = container_of(work, struct subchannel, work);
1212 css_sch_device_unregister(sch); 1238 css_sch_device_unregister(sch);
1213 /* Reset intparm to zeroes. */
1214 sch->config.intparm = 0;
1215 cio_commit_config(sch);
1216 put_device(&sch->dev); 1239 put_device(&sch->dev);
1217} 1240}
1218 1241
@@ -1334,7 +1357,6 @@ io_subchannel_remove (struct subchannel *sch)
1334 cdev->private->state = DEV_STATE_NOT_OPER; 1357 cdev->private->state = DEV_STATE_NOT_OPER;
1335 spin_unlock_irqrestore(cdev->ccwlock, flags); 1358 spin_unlock_irqrestore(cdev->ccwlock, flags);
1336 ccw_device_unregister(cdev); 1359 ccw_device_unregister(cdev);
1337 put_device(&cdev->dev);
1338 kfree(sch->private); 1360 kfree(sch->private);
1339 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1361 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1340 return 0; 1362 return 0;
@@ -1571,8 +1593,6 @@ static int purge_fn(struct device *dev, void *data)
1571 spin_unlock_irq(cdev->ccwlock); 1593 spin_unlock_irq(cdev->ccwlock);
1572 if (!unreg) 1594 if (!unreg)
1573 goto out; 1595 goto out;
1574 if (!get_device(&cdev->dev))
1575 goto out;
1576 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid, 1596 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1577 priv->dev_id.devno); 1597 priv->dev_id.devno);
1578 ccw_device_schedule_sch_unregister(cdev); 1598 ccw_device_schedule_sch_unregister(cdev);
@@ -1688,10 +1708,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1688 spin_unlock_irqrestore(sch->lock, flags); 1708 spin_unlock_irqrestore(sch->lock, flags);
1689 css_sch_device_unregister(sch); 1709 css_sch_device_unregister(sch);
1690 spin_lock_irqsave(sch->lock, flags); 1710 spin_lock_irqsave(sch->lock, flags);
1691
1692 /* Reset intparm to zeroes. */
1693 sch->config.intparm = 0;
1694 cio_commit_config(sch);
1695 break; 1711 break;
1696 case REPROBE: 1712 case REPROBE:
1697 ccw_device_trigger_reprobe(cdev); 1713 ccw_device_trigger_reprobe(cdev);
@@ -1712,7 +1728,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1712 1728
1713#ifdef CONFIG_CCW_CONSOLE 1729#ifdef CONFIG_CCW_CONSOLE
1714static struct ccw_device console_cdev; 1730static struct ccw_device console_cdev;
1715static char console_cdev_name[10] = "0.x.xxxx";
1716static struct ccw_device_private console_private; 1731static struct ccw_device_private console_private;
1717static int console_cdev_in_use; 1732static int console_cdev_in_use;
1718 1733
@@ -1796,13 +1811,6 @@ int ccw_device_force_console(void)
1796 return ccw_device_pm_restore(&console_cdev.dev); 1811 return ccw_device_pm_restore(&console_cdev.dev);
1797} 1812}
1798EXPORT_SYMBOL_GPL(ccw_device_force_console); 1813EXPORT_SYMBOL_GPL(ccw_device_force_console);
1799
1800const char *cio_get_console_cdev_name(struct subchannel *sch)
1801{
1802 snprintf(console_cdev_name, 10, "0.%x.%04x",
1803 sch->schid.ssid, sch->schib.pmcw.dev);
1804 return (const char *)console_cdev_name;
1805}
1806#endif 1814#endif
1807 1815
1808/* 1816/*
@@ -2020,7 +2028,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
2020 spin_unlock_irq(sch->lock); 2028 spin_unlock_irq(sch->lock);
2021 if (ret) { 2029 if (ret) {
2022 CIO_MSG_EVENT(0, "Couldn't start recognition for device " 2030 CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2023 "%s (ret=%d)\n", dev_name(&cdev->dev), ret); 2031 "0.%x.%04x (ret=%d)\n",
2032 cdev->private->dev_id.ssid,
2033 cdev->private->dev_id.devno, ret);
2024 spin_lock_irq(sch->lock); 2034 spin_lock_irq(sch->lock);
2025 cdev->private->state = DEV_STATE_DISCONNECTED; 2035 cdev->private->state = DEV_STATE_DISCONNECTED;
2026 spin_unlock_irq(sch->lock); 2036 spin_unlock_irq(sch->lock);
@@ -2083,8 +2093,9 @@ static int ccw_device_pm_restore(struct device *dev)
2083 } 2093 }
2084 /* check if the device id has changed */ 2094 /* check if the device id has changed */
2085 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 2095 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
2086 CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from " 2096 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
2087 "%04x to %04x)\n", dev_name(&sch->dev), 2097 "changed from %04x to %04x)\n",
2098 sch->schid.ssid, sch->schid.sch_no,
2088 cdev->private->dev_id.devno, 2099 cdev->private->dev_id.devno,
2089 sch->schib.pmcw.dev); 2100 sch->schib.pmcw.dev);
2090 goto out_unreg_unlock; 2101 goto out_unreg_unlock;
@@ -2117,8 +2128,9 @@ static int ccw_device_pm_restore(struct device *dev)
2117 if (cm_enabled) { 2128 if (cm_enabled) {
2118 ret = ccw_set_cmf(cdev, 1); 2129 ret = ccw_set_cmf(cdev, 1);
2119 if (ret) { 2130 if (ret) {
2120 CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed " 2131 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2121 "(rc=%d)\n", dev_name(&cdev->dev), ret); 2132 "(rc=%d)\n", cdev->private->dev_id.ssid,
2133 cdev->private->dev_id.devno, ret);
2122 ret = 0; 2134 ret = 0;
2123 } 2135 }
2124 } 2136 }
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 3db88c52d287..e728ce447f6e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -394,6 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state)
394 ccw_device_schedule_sch_unregister(cdev); 394 ccw_device_schedule_sch_unregister(cdev);
395 cdev->private->flags.donotify = 0; 395 cdev->private->flags.donotify = 0;
396 } 396 }
397 if (state == DEV_STATE_NOT_OPER) {
398 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
399 cdev->private->dev_id.devno, sch->schid.sch_no);
400 if (!ccw_device_notify(cdev, CIO_GONE))
401 ccw_device_schedule_sch_unregister(cdev);
402 cdev->private->flags.donotify = 0;
403 }
397 404
398 if (cdev->private->flags.donotify) { 405 if (cdev->private->flags.donotify) {
399 cdev->private->flags.donotify = 0; 406 cdev->private->flags.donotify = 0;
@@ -731,6 +738,17 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
731} 738}
732 739
733/* 740/*
741 * Handle path verification event in offline state.
742 */
743static void ccw_device_offline_verify(struct ccw_device *cdev,
744 enum dev_event dev_event)
745{
746 struct subchannel *sch = to_subchannel(cdev->dev.parent);
747
748 css_schedule_eval(sch->schid);
749}
750
751/*
734 * Handle path verification event. 752 * Handle path verification event.
735 */ 753 */
736static void 754static void
@@ -887,6 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
887 } 905 }
888call_handler: 906call_handler:
889 cdev->private->state = DEV_STATE_ONLINE; 907 cdev->private->state = DEV_STATE_ONLINE;
908 /* In case sensing interfered with setting the device online */
909 wake_up(&cdev->private->wait_q);
890 /* Call the handler. */ 910 /* Call the handler. */
891 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 911 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
892 /* Start delayed path verification. */ 912 /* Start delayed path verification. */
@@ -1149,7 +1169,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1149 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1169 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1150 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 1170 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
1151 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1171 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1152 [DEV_EVENT_VERIFY] = ccw_device_nop, 1172 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1153 }, 1173 },
1154 [DEV_STATE_VERIFY] = { 1174 [DEV_STATE_VERIFY] = {
1155 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1175 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b1241f8fae88..ff7748a9199d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/s390/cio/qdio.h 2 * linux/drivers/s390/cio/qdio.h
3 * 3 *
4 * Copyright 2000,2008 IBM Corp. 4 * Copyright 2000,2009 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */ 7 */
@@ -246,6 +246,7 @@ struct qdio_q {
246 atomic_t nr_buf_used; 246 atomic_t nr_buf_used;
247 247
248 struct qdio_irq *irq_ptr; 248 struct qdio_irq *irq_ptr;
249 struct dentry *debugfs_q;
249 struct tasklet_struct tasklet; 250 struct tasklet_struct tasklet;
250 251
251 /* error condition during a data transfer */ 252 /* error condition during a data transfer */
@@ -267,6 +268,7 @@ struct qdio_irq {
267 struct qib qib; 268 struct qib qib;
268 u32 *dsci; /* address of device state change indicator */ 269 u32 *dsci; /* address of device state change indicator */
269 struct ccw_device *cdev; 270 struct ccw_device *cdev;
271 struct dentry *debugfs_dev;
270 272
271 unsigned long int_parm; 273 unsigned long int_parm;
272 struct subchannel_id schid; 274 struct subchannel_id schid;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b8626d4df116..1b78f639ead3 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,14 +1,12 @@
1/* 1/*
2 * drivers/s390/cio/qdio_debug.c 2 * drivers/s390/cio/qdio_debug.c
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com) 6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */ 7 */
8#include <linux/proc_fs.h>
9#include <linux/seq_file.h> 8#include <linux/seq_file.h>
10#include <linux/debugfs.h> 9#include <linux/debugfs.h>
11#include <asm/qdio.h>
12#include <asm/debug.h> 10#include <asm/debug.h>
13#include "qdio_debug.h" 11#include "qdio_debug.h"
14#include "qdio.h" 12#include "qdio.h"
@@ -17,10 +15,7 @@ debug_info_t *qdio_dbf_setup;
17debug_info_t *qdio_dbf_error; 15debug_info_t *qdio_dbf_error;
18 16
19static struct dentry *debugfs_root; 17static struct dentry *debugfs_root;
20#define MAX_DEBUGFS_QUEUES 32 18#define QDIO_DEBUGFS_NAME_LEN 10
21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
22static DEFINE_MUTEX(debugfs_mutex);
23#define QDIO_DEBUGFS_NAME_LEN 40
24 19
25void qdio_allocate_dbf(struct qdio_initialize *init_data, 20void qdio_allocate_dbf(struct qdio_initialize *init_data,
26 struct qdio_irq *irq_ptr) 21 struct qdio_irq *irq_ptr)
@@ -130,20 +125,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
130 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
131} 126}
132 127
133static void remove_debugfs_entry(struct qdio_q *q)
134{
135 int i;
136
137 for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
138 if (!debugfs_queues[i])
139 continue;
140 if (debugfs_queues[i]->d_inode->i_private == q) {
141 debugfs_remove(debugfs_queues[i]);
142 debugfs_queues[i] = NULL;
143 }
144 }
145}
146
147static struct file_operations debugfs_fops = { 128static struct file_operations debugfs_fops = {
148 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
149 .open = qstat_seq_open, 130 .open = qstat_seq_open,
@@ -155,22 +136,15 @@ static struct file_operations debugfs_fops = {
155 136
156static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) 137static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
157{ 138{
158 int i = 0;
159 char name[QDIO_DEBUGFS_NAME_LEN]; 139 char name[QDIO_DEBUGFS_NAME_LEN];
160 140
161 while (debugfs_queues[i] != NULL) { 141 snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
162 i++;
163 if (i >= MAX_DEBUGFS_QUEUES)
164 return;
165 }
166 snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
167 dev_name(&cdev->dev),
168 q->is_input_q ? "input" : "output", 142 q->is_input_q ? "input" : "output",
169 q->nr); 143 q->nr);
170 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, 144 q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
171 debugfs_root, q, &debugfs_fops); 145 q->irq_ptr->debugfs_dev, q, &debugfs_fops);
172 if (IS_ERR(debugfs_queues[i])) 146 if (IS_ERR(q->debugfs_q))
173 debugfs_queues[i] = NULL; 147 q->debugfs_q = NULL;
174} 148}
175 149
176void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) 150void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -178,12 +152,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
178 struct qdio_q *q; 152 struct qdio_q *q;
179 int i; 153 int i;
180 154
181 mutex_lock(&debugfs_mutex); 155 irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
156 debugfs_root);
157 if (IS_ERR(irq_ptr->debugfs_dev))
158 irq_ptr->debugfs_dev = NULL;
182 for_each_input_queue(irq_ptr, q, i) 159 for_each_input_queue(irq_ptr, q, i)
183 setup_debugfs_entry(q, cdev); 160 setup_debugfs_entry(q, cdev);
184 for_each_output_queue(irq_ptr, q, i) 161 for_each_output_queue(irq_ptr, q, i)
185 setup_debugfs_entry(q, cdev); 162 setup_debugfs_entry(q, cdev);
186 mutex_unlock(&debugfs_mutex);
187} 163}
188 164
189void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) 165void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -191,17 +167,16 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
191 struct qdio_q *q; 167 struct qdio_q *q;
192 int i; 168 int i;
193 169
194 mutex_lock(&debugfs_mutex);
195 for_each_input_queue(irq_ptr, q, i) 170 for_each_input_queue(irq_ptr, q, i)
196 remove_debugfs_entry(q); 171 debugfs_remove(q->debugfs_q);
197 for_each_output_queue(irq_ptr, q, i) 172 for_each_output_queue(irq_ptr, q, i)
198 remove_debugfs_entry(q); 173 debugfs_remove(q->debugfs_q);
199 mutex_unlock(&debugfs_mutex); 174 debugfs_remove(irq_ptr->debugfs_dev);
200} 175}
201 176
202int __init qdio_debug_init(void) 177int __init qdio_debug_init(void)
203{ 178{
204 debugfs_root = debugfs_create_dir("qdio_queues", NULL); 179 debugfs_root = debugfs_create_dir("qdio", NULL);
205 180
206 qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); 181 qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
207 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); 182 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 0038750ad945..9aef402a5f1b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -798,8 +798,10 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
798 798
799 if (!qdio_inbound_q_done(q)) { 799 if (!qdio_inbound_q_done(q)) {
800 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 800 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
801 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 801 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
802 tasklet_schedule(&q->tasklet); 802 tasklet_schedule(&q->tasklet);
803 return;
804 }
803 } 805 }
804 806
805 qdio_stop_polling(q); 807 qdio_stop_polling(q);
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
deleted file mode 100644
index f8da25ab576d..000000000000
--- a/drivers/s390/cio/scsw.c
+++ /dev/null
@@ -1,843 +0,0 @@
1/*
2 * Helper functions for scsw access.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <asm/cio.h>
11#include "css.h"
12#include "chsc.h"
13
14/**
15 * scsw_is_tm - check for transport mode scsw
16 * @scsw: pointer to scsw
17 *
18 * Return non-zero if the specified scsw is a transport mode scsw, zero
19 * otherwise.
20 */
21int scsw_is_tm(union scsw *scsw)
22{
23 return css_general_characteristics.fcx && (scsw->tm.x == 1);
24}
25EXPORT_SYMBOL(scsw_is_tm);
26
27/**
28 * scsw_key - return scsw key field
29 * @scsw: pointer to scsw
30 *
31 * Return the value of the key field of the specified scsw, regardless of
32 * whether it is a transport mode or command mode scsw.
33 */
34u32 scsw_key(union scsw *scsw)
35{
36 if (scsw_is_tm(scsw))
37 return scsw->tm.key;
38 else
39 return scsw->cmd.key;
40}
41EXPORT_SYMBOL(scsw_key);
42
43/**
44 * scsw_eswf - return scsw eswf field
45 * @scsw: pointer to scsw
46 *
47 * Return the value of the eswf field of the specified scsw, regardless of
48 * whether it is a transport mode or command mode scsw.
49 */
50u32 scsw_eswf(union scsw *scsw)
51{
52 if (scsw_is_tm(scsw))
53 return scsw->tm.eswf;
54 else
55 return scsw->cmd.eswf;
56}
57EXPORT_SYMBOL(scsw_eswf);
58
59/**
60 * scsw_cc - return scsw cc field
61 * @scsw: pointer to scsw
62 *
63 * Return the value of the cc field of the specified scsw, regardless of
64 * whether it is a transport mode or command mode scsw.
65 */
66u32 scsw_cc(union scsw *scsw)
67{
68 if (scsw_is_tm(scsw))
69 return scsw->tm.cc;
70 else
71 return scsw->cmd.cc;
72}
73EXPORT_SYMBOL(scsw_cc);
74
75/**
76 * scsw_ectl - return scsw ectl field
77 * @scsw: pointer to scsw
78 *
79 * Return the value of the ectl field of the specified scsw, regardless of
80 * whether it is a transport mode or command mode scsw.
81 */
82u32 scsw_ectl(union scsw *scsw)
83{
84 if (scsw_is_tm(scsw))
85 return scsw->tm.ectl;
86 else
87 return scsw->cmd.ectl;
88}
89EXPORT_SYMBOL(scsw_ectl);
90
91/**
92 * scsw_pno - return scsw pno field
93 * @scsw: pointer to scsw
94 *
95 * Return the value of the pno field of the specified scsw, regardless of
96 * whether it is a transport mode or command mode scsw.
97 */
98u32 scsw_pno(union scsw *scsw)
99{
100 if (scsw_is_tm(scsw))
101 return scsw->tm.pno;
102 else
103 return scsw->cmd.pno;
104}
105EXPORT_SYMBOL(scsw_pno);
106
107/**
108 * scsw_fctl - return scsw fctl field
109 * @scsw: pointer to scsw
110 *
111 * Return the value of the fctl field of the specified scsw, regardless of
112 * whether it is a transport mode or command mode scsw.
113 */
114u32 scsw_fctl(union scsw *scsw)
115{
116 if (scsw_is_tm(scsw))
117 return scsw->tm.fctl;
118 else
119 return scsw->cmd.fctl;
120}
121EXPORT_SYMBOL(scsw_fctl);
122
123/**
124 * scsw_actl - return scsw actl field
125 * @scsw: pointer to scsw
126 *
127 * Return the value of the actl field of the specified scsw, regardless of
128 * whether it is a transport mode or command mode scsw.
129 */
130u32 scsw_actl(union scsw *scsw)
131{
132 if (scsw_is_tm(scsw))
133 return scsw->tm.actl;
134 else
135 return scsw->cmd.actl;
136}
137EXPORT_SYMBOL(scsw_actl);
138
139/**
140 * scsw_stctl - return scsw stctl field
141 * @scsw: pointer to scsw
142 *
143 * Return the value of the stctl field of the specified scsw, regardless of
144 * whether it is a transport mode or command mode scsw.
145 */
146u32 scsw_stctl(union scsw *scsw)
147{
148 if (scsw_is_tm(scsw))
149 return scsw->tm.stctl;
150 else
151 return scsw->cmd.stctl;
152}
153EXPORT_SYMBOL(scsw_stctl);
154
155/**
156 * scsw_dstat - return scsw dstat field
157 * @scsw: pointer to scsw
158 *
159 * Return the value of the dstat field of the specified scsw, regardless of
160 * whether it is a transport mode or command mode scsw.
161 */
162u32 scsw_dstat(union scsw *scsw)
163{
164 if (scsw_is_tm(scsw))
165 return scsw->tm.dstat;
166 else
167 return scsw->cmd.dstat;
168}
169EXPORT_SYMBOL(scsw_dstat);
170
171/**
172 * scsw_cstat - return scsw cstat field
173 * @scsw: pointer to scsw
174 *
175 * Return the value of the cstat field of the specified scsw, regardless of
176 * whether it is a transport mode or command mode scsw.
177 */
178u32 scsw_cstat(union scsw *scsw)
179{
180 if (scsw_is_tm(scsw))
181 return scsw->tm.cstat;
182 else
183 return scsw->cmd.cstat;
184}
185EXPORT_SYMBOL(scsw_cstat);
186
187/**
188 * scsw_cmd_is_valid_key - check key field validity
189 * @scsw: pointer to scsw
190 *
191 * Return non-zero if the key field of the specified command mode scsw is
192 * valid, zero otherwise.
193 */
194int scsw_cmd_is_valid_key(union scsw *scsw)
195{
196 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
197}
198EXPORT_SYMBOL(scsw_cmd_is_valid_key);
199
200/**
201 * scsw_cmd_is_valid_sctl - check fctl field validity
202 * @scsw: pointer to scsw
203 *
204 * Return non-zero if the fctl field of the specified command mode scsw is
205 * valid, zero otherwise.
206 */
207int scsw_cmd_is_valid_sctl(union scsw *scsw)
208{
209 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
210}
211EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
212
213/**
214 * scsw_cmd_is_valid_eswf - check eswf field validity
215 * @scsw: pointer to scsw
216 *
217 * Return non-zero if the eswf field of the specified command mode scsw is
218 * valid, zero otherwise.
219 */
220int scsw_cmd_is_valid_eswf(union scsw *scsw)
221{
222 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
223}
224EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
225
226/**
227 * scsw_cmd_is_valid_cc - check cc field validity
228 * @scsw: pointer to scsw
229 *
230 * Return non-zero if the cc field of the specified command mode scsw is
231 * valid, zero otherwise.
232 */
233int scsw_cmd_is_valid_cc(union scsw *scsw)
234{
235 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
236 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
237}
238EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
239
240/**
241 * scsw_cmd_is_valid_fmt - check fmt field validity
242 * @scsw: pointer to scsw
243 *
244 * Return non-zero if the fmt field of the specified command mode scsw is
245 * valid, zero otherwise.
246 */
247int scsw_cmd_is_valid_fmt(union scsw *scsw)
248{
249 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
250}
251EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
252
253/**
254 * scsw_cmd_is_valid_pfch - check pfch field validity
255 * @scsw: pointer to scsw
256 *
257 * Return non-zero if the pfch field of the specified command mode scsw is
258 * valid, zero otherwise.
259 */
260int scsw_cmd_is_valid_pfch(union scsw *scsw)
261{
262 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
263}
264EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
265
266/**
267 * scsw_cmd_is_valid_isic - check isic field validity
268 * @scsw: pointer to scsw
269 *
270 * Return non-zero if the isic field of the specified command mode scsw is
271 * valid, zero otherwise.
272 */
273int scsw_cmd_is_valid_isic(union scsw *scsw)
274{
275 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
276}
277EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
278
279/**
280 * scsw_cmd_is_valid_alcc - check alcc field validity
281 * @scsw: pointer to scsw
282 *
283 * Return non-zero if the alcc field of the specified command mode scsw is
284 * valid, zero otherwise.
285 */
286int scsw_cmd_is_valid_alcc(union scsw *scsw)
287{
288 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
289}
290EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
291
292/**
293 * scsw_cmd_is_valid_ssi - check ssi field validity
294 * @scsw: pointer to scsw
295 *
296 * Return non-zero if the ssi field of the specified command mode scsw is
297 * valid, zero otherwise.
298 */
299int scsw_cmd_is_valid_ssi(union scsw *scsw)
300{
301 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
302}
303EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
304
305/**
306 * scsw_cmd_is_valid_zcc - check zcc field validity
307 * @scsw: pointer to scsw
308 *
309 * Return non-zero if the zcc field of the specified command mode scsw is
310 * valid, zero otherwise.
311 */
312int scsw_cmd_is_valid_zcc(union scsw *scsw)
313{
314 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
315 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
316}
317EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
318
319/**
320 * scsw_cmd_is_valid_ectl - check ectl field validity
321 * @scsw: pointer to scsw
322 *
323 * Return non-zero if the ectl field of the specified command mode scsw is
324 * valid, zero otherwise.
325 */
326int scsw_cmd_is_valid_ectl(union scsw *scsw)
327{
328 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
329 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
330 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
331}
332EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
333
334/**
335 * scsw_cmd_is_valid_pno - check pno field validity
336 * @scsw: pointer to scsw
337 *
338 * Return non-zero if the pno field of the specified command mode scsw is
339 * valid, zero otherwise.
340 */
341int scsw_cmd_is_valid_pno(union scsw *scsw)
342{
343 return (scsw->cmd.fctl != 0) &&
344 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
345 (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
346 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
347 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
348}
349EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
350
351/**
352 * scsw_cmd_is_valid_fctl - check fctl field validity
353 * @scsw: pointer to scsw
354 *
355 * Return non-zero if the fctl field of the specified command mode scsw is
356 * valid, zero otherwise.
357 */
358int scsw_cmd_is_valid_fctl(union scsw *scsw)
359{
360 /* Only valid if pmcw.dnv == 1*/
361 return 1;
362}
363EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
364
365/**
366 * scsw_cmd_is_valid_actl - check actl field validity
367 * @scsw: pointer to scsw
368 *
369 * Return non-zero if the actl field of the specified command mode scsw is
370 * valid, zero otherwise.
371 */
372int scsw_cmd_is_valid_actl(union scsw *scsw)
373{
374 /* Only valid if pmcw.dnv == 1*/
375 return 1;
376}
377EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
378
379/**
380 * scsw_cmd_is_valid_stctl - check stctl field validity
381 * @scsw: pointer to scsw
382 *
383 * Return non-zero if the stctl field of the specified command mode scsw is
384 * valid, zero otherwise.
385 */
386int scsw_cmd_is_valid_stctl(union scsw *scsw)
387{
388 /* Only valid if pmcw.dnv == 1*/
389 return 1;
390}
391EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
392
393/**
394 * scsw_cmd_is_valid_dstat - check dstat field validity
395 * @scsw: pointer to scsw
396 *
397 * Return non-zero if the dstat field of the specified command mode scsw is
398 * valid, zero otherwise.
399 */
400int scsw_cmd_is_valid_dstat(union scsw *scsw)
401{
402 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
403 (scsw->cmd.cc != 3);
404}
405EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
406
407/**
408 * scsw_cmd_is_valid_cstat - check cstat field validity
409 * @scsw: pointer to scsw
410 *
411 * Return non-zero if the cstat field of the specified command mode scsw is
412 * valid, zero otherwise.
413 */
414int scsw_cmd_is_valid_cstat(union scsw *scsw)
415{
416 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
417 (scsw->cmd.cc != 3);
418}
419EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
420
421/**
422 * scsw_tm_is_valid_key - check key field validity
423 * @scsw: pointer to scsw
424 *
425 * Return non-zero if the key field of the specified transport mode scsw is
426 * valid, zero otherwise.
427 */
428int scsw_tm_is_valid_key(union scsw *scsw)
429{
430 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
431}
432EXPORT_SYMBOL(scsw_tm_is_valid_key);
433
434/**
435 * scsw_tm_is_valid_eswf - check eswf field validity
436 * @scsw: pointer to scsw
437 *
438 * Return non-zero if the eswf field of the specified transport mode scsw is
439 * valid, zero otherwise.
440 */
441int scsw_tm_is_valid_eswf(union scsw *scsw)
442{
443 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
444}
445EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
446
447/**
448 * scsw_tm_is_valid_cc - check cc field validity
449 * @scsw: pointer to scsw
450 *
451 * Return non-zero if the cc field of the specified transport mode scsw is
452 * valid, zero otherwise.
453 */
454int scsw_tm_is_valid_cc(union scsw *scsw)
455{
456 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
457 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
458}
459EXPORT_SYMBOL(scsw_tm_is_valid_cc);
460
461/**
462 * scsw_tm_is_valid_fmt - check fmt field validity
463 * @scsw: pointer to scsw
464 *
465 * Return non-zero if the fmt field of the specified transport mode scsw is
466 * valid, zero otherwise.
467 */
468int scsw_tm_is_valid_fmt(union scsw *scsw)
469{
470 return 1;
471}
472EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
473
474/**
475 * scsw_tm_is_valid_x - check x field validity
476 * @scsw: pointer to scsw
477 *
478 * Return non-zero if the x field of the specified transport mode scsw is
479 * valid, zero otherwise.
480 */
481int scsw_tm_is_valid_x(union scsw *scsw)
482{
483 return 1;
484}
485EXPORT_SYMBOL(scsw_tm_is_valid_x);
486
487/**
488 * scsw_tm_is_valid_q - check q field validity
489 * @scsw: pointer to scsw
490 *
491 * Return non-zero if the q field of the specified transport mode scsw is
492 * valid, zero otherwise.
493 */
494int scsw_tm_is_valid_q(union scsw *scsw)
495{
496 return 1;
497}
498EXPORT_SYMBOL(scsw_tm_is_valid_q);
499
500/**
501 * scsw_tm_is_valid_ectl - check ectl field validity
502 * @scsw: pointer to scsw
503 *
504 * Return non-zero if the ectl field of the specified transport mode scsw is
505 * valid, zero otherwise.
506 */
507int scsw_tm_is_valid_ectl(union scsw *scsw)
508{
509 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
510 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
511 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
512}
513EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
514
515/**
516 * scsw_tm_is_valid_pno - check pno field validity
517 * @scsw: pointer to scsw
518 *
519 * Return non-zero if the pno field of the specified transport mode scsw is
520 * valid, zero otherwise.
521 */
522int scsw_tm_is_valid_pno(union scsw *scsw)
523{
524 return (scsw->tm.fctl != 0) &&
525 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
526 (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
527 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
528 (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
529}
530EXPORT_SYMBOL(scsw_tm_is_valid_pno);
531
532/**
533 * scsw_tm_is_valid_fctl - check fctl field validity
534 * @scsw: pointer to scsw
535 *
536 * Return non-zero if the fctl field of the specified transport mode scsw is
537 * valid, zero otherwise.
538 */
539int scsw_tm_is_valid_fctl(union scsw *scsw)
540{
541 /* Only valid if pmcw.dnv == 1*/
542 return 1;
543}
544EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
545
546/**
547 * scsw_tm_is_valid_actl - check actl field validity
548 * @scsw: pointer to scsw
549 *
550 * Return non-zero if the actl field of the specified transport mode scsw is
551 * valid, zero otherwise.
552 */
553int scsw_tm_is_valid_actl(union scsw *scsw)
554{
555 /* Only valid if pmcw.dnv == 1*/
556 return 1;
557}
558EXPORT_SYMBOL(scsw_tm_is_valid_actl);
559
560/**
561 * scsw_tm_is_valid_stctl - check stctl field validity
562 * @scsw: pointer to scsw
563 *
564 * Return non-zero if the stctl field of the specified transport mode scsw is
565 * valid, zero otherwise.
566 */
567int scsw_tm_is_valid_stctl(union scsw *scsw)
568{
569 /* Only valid if pmcw.dnv == 1*/
570 return 1;
571}
572EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
573
574/**
575 * scsw_tm_is_valid_dstat - check dstat field validity
576 * @scsw: pointer to scsw
577 *
578 * Return non-zero if the dstat field of the specified transport mode scsw is
579 * valid, zero otherwise.
580 */
581int scsw_tm_is_valid_dstat(union scsw *scsw)
582{
583 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
584 (scsw->tm.cc != 3);
585}
586EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
587
588/**
589 * scsw_tm_is_valid_cstat - check cstat field validity
590 * @scsw: pointer to scsw
591 *
592 * Return non-zero if the cstat field of the specified transport mode scsw is
593 * valid, zero otherwise.
594 */
595int scsw_tm_is_valid_cstat(union scsw *scsw)
596{
597 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
598 (scsw->tm.cc != 3);
599}
600EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
601
602/**
603 * scsw_tm_is_valid_fcxs - check fcxs field validity
604 * @scsw: pointer to scsw
605 *
606 * Return non-zero if the fcxs field of the specified transport mode scsw is
607 * valid, zero otherwise.
608 */
609int scsw_tm_is_valid_fcxs(union scsw *scsw)
610{
611 return 1;
612}
613EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
614
615/**
616 * scsw_tm_is_valid_schxs - check schxs field validity
617 * @scsw: pointer to scsw
618 *
619 * Return non-zero if the schxs field of the specified transport mode scsw is
620 * valid, zero otherwise.
621 */
622int scsw_tm_is_valid_schxs(union scsw *scsw)
623{
624 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
625 SCHN_STAT_INTF_CTRL_CHK |
626 SCHN_STAT_PROT_CHECK |
627 SCHN_STAT_CHN_DATA_CHK));
628}
629EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
630
631/**
632 * scsw_is_valid_actl - check actl field validity
633 * @scsw: pointer to scsw
634 *
635 * Return non-zero if the actl field of the specified scsw is valid,
636 * regardless of whether it is a transport mode or command mode scsw.
637 * Return zero if the field does not contain a valid value.
638 */
639int scsw_is_valid_actl(union scsw *scsw)
640{
641 if (scsw_is_tm(scsw))
642 return scsw_tm_is_valid_actl(scsw);
643 else
644 return scsw_cmd_is_valid_actl(scsw);
645}
646EXPORT_SYMBOL(scsw_is_valid_actl);
647
648/**
649 * scsw_is_valid_cc - check cc field validity
650 * @scsw: pointer to scsw
651 *
652 * Return non-zero if the cc field of the specified scsw is valid,
653 * regardless of whether it is a transport mode or command mode scsw.
654 * Return zero if the field does not contain a valid value.
655 */
656int scsw_is_valid_cc(union scsw *scsw)
657{
658 if (scsw_is_tm(scsw))
659 return scsw_tm_is_valid_cc(scsw);
660 else
661 return scsw_cmd_is_valid_cc(scsw);
662}
663EXPORT_SYMBOL(scsw_is_valid_cc);
664
665/**
666 * scsw_is_valid_cstat - check cstat field validity
667 * @scsw: pointer to scsw
668 *
669 * Return non-zero if the cstat field of the specified scsw is valid,
670 * regardless of whether it is a transport mode or command mode scsw.
671 * Return zero if the field does not contain a valid value.
672 */
673int scsw_is_valid_cstat(union scsw *scsw)
674{
675 if (scsw_is_tm(scsw))
676 return scsw_tm_is_valid_cstat(scsw);
677 else
678 return scsw_cmd_is_valid_cstat(scsw);
679}
680EXPORT_SYMBOL(scsw_is_valid_cstat);
681
682/**
683 * scsw_is_valid_dstat - check dstat field validity
684 * @scsw: pointer to scsw
685 *
686 * Return non-zero if the dstat field of the specified scsw is valid,
687 * regardless of whether it is a transport mode or command mode scsw.
688 * Return zero if the field does not contain a valid value.
689 */
690int scsw_is_valid_dstat(union scsw *scsw)
691{
692 if (scsw_is_tm(scsw))
693 return scsw_tm_is_valid_dstat(scsw);
694 else
695 return scsw_cmd_is_valid_dstat(scsw);
696}
697EXPORT_SYMBOL(scsw_is_valid_dstat);
698
699/**
700 * scsw_is_valid_ectl - check ectl field validity
701 * @scsw: pointer to scsw
702 *
703 * Return non-zero if the ectl field of the specified scsw is valid,
704 * regardless of whether it is a transport mode or command mode scsw.
705 * Return zero if the field does not contain a valid value.
706 */
707int scsw_is_valid_ectl(union scsw *scsw)
708{
709 if (scsw_is_tm(scsw))
710 return scsw_tm_is_valid_ectl(scsw);
711 else
712 return scsw_cmd_is_valid_ectl(scsw);
713}
714EXPORT_SYMBOL(scsw_is_valid_ectl);
715
716/**
717 * scsw_is_valid_eswf - check eswf field validity
718 * @scsw: pointer to scsw
719 *
720 * Return non-zero if the eswf field of the specified scsw is valid,
721 * regardless of whether it is a transport mode or command mode scsw.
722 * Return zero if the field does not contain a valid value.
723 */
724int scsw_is_valid_eswf(union scsw *scsw)
725{
726 if (scsw_is_tm(scsw))
727 return scsw_tm_is_valid_eswf(scsw);
728 else
729 return scsw_cmd_is_valid_eswf(scsw);
730}
731EXPORT_SYMBOL(scsw_is_valid_eswf);
732
733/**
734 * scsw_is_valid_fctl - check fctl field validity
735 * @scsw: pointer to scsw
736 *
737 * Return non-zero if the fctl field of the specified scsw is valid,
738 * regardless of whether it is a transport mode or command mode scsw.
739 * Return zero if the field does not contain a valid value.
740 */
741int scsw_is_valid_fctl(union scsw *scsw)
742{
743 if (scsw_is_tm(scsw))
744 return scsw_tm_is_valid_fctl(scsw);
745 else
746 return scsw_cmd_is_valid_fctl(scsw);
747}
748EXPORT_SYMBOL(scsw_is_valid_fctl);
749
750/**
751 * scsw_is_valid_key - check key field validity
752 * @scsw: pointer to scsw
753 *
754 * Return non-zero if the key field of the specified scsw is valid,
755 * regardless of whether it is a transport mode or command mode scsw.
756 * Return zero if the field does not contain a valid value.
757 */
758int scsw_is_valid_key(union scsw *scsw)
759{
760 if (scsw_is_tm(scsw))
761 return scsw_tm_is_valid_key(scsw);
762 else
763 return scsw_cmd_is_valid_key(scsw);
764}
765EXPORT_SYMBOL(scsw_is_valid_key);
766
767/**
768 * scsw_is_valid_pno - check pno field validity
769 * @scsw: pointer to scsw
770 *
771 * Return non-zero if the pno field of the specified scsw is valid,
772 * regardless of whether it is a transport mode or command mode scsw.
773 * Return zero if the field does not contain a valid value.
774 */
775int scsw_is_valid_pno(union scsw *scsw)
776{
777 if (scsw_is_tm(scsw))
778 return scsw_tm_is_valid_pno(scsw);
779 else
780 return scsw_cmd_is_valid_pno(scsw);
781}
782EXPORT_SYMBOL(scsw_is_valid_pno);
783
784/**
785 * scsw_is_valid_stctl - check stctl field validity
786 * @scsw: pointer to scsw
787 *
788 * Return non-zero if the stctl field of the specified scsw is valid,
789 * regardless of whether it is a transport mode or command mode scsw.
790 * Return zero if the field does not contain a valid value.
791 */
792int scsw_is_valid_stctl(union scsw *scsw)
793{
794 if (scsw_is_tm(scsw))
795 return scsw_tm_is_valid_stctl(scsw);
796 else
797 return scsw_cmd_is_valid_stctl(scsw);
798}
799EXPORT_SYMBOL(scsw_is_valid_stctl);
800
801/**
802 * scsw_cmd_is_solicited - check for solicited scsw
803 * @scsw: pointer to scsw
804 *
805 * Return non-zero if the command mode scsw indicates that the associated
806 * status condition is solicited, zero if it is unsolicited.
807 */
808int scsw_cmd_is_solicited(union scsw *scsw)
809{
810 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
812}
813EXPORT_SYMBOL(scsw_cmd_is_solicited);
814
815/**
816 * scsw_tm_is_solicited - check for solicited scsw
817 * @scsw: pointer to scsw
818 *
819 * Return non-zero if the transport mode scsw indicates that the associated
820 * status condition is solicited, zero if it is unsolicited.
821 */
822int scsw_tm_is_solicited(union scsw *scsw)
823{
824 return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
825 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
826}
827EXPORT_SYMBOL(scsw_tm_is_solicited);
828
829/**
830 * scsw_is_solicited - check for solicited scsw
831 * @scsw: pointer to scsw
832 *
833 * Return non-zero if the transport or command mode scsw indicates that the
834 * associated status condition is solicited, zero if it is unsolicited.
835 */
836int scsw_is_solicited(union scsw *scsw)
837{
838 if (scsw_is_tm(scsw))
839 return scsw_tm_is_solicited(scsw);
840 else
841 return scsw_cmd_is_solicited(scsw);
842}
843EXPORT_SYMBOL(scsw_is_solicited);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed3dcdea7fe1..090b32a339c6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -648,7 +648,9 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
648 /* Poll on the device until all requests are finished. */ 648 /* Poll on the device until all requests are finished. */
649 do { 649 do {
650 flags = 0; 650 flags = 0;
651 spin_lock_bh(&ap_dev->lock);
651 __ap_poll_device(ap_dev, &flags); 652 __ap_poll_device(ap_dev, &flags);
653 spin_unlock_bh(&ap_dev->lock);
652 } while ((flags & 1) || (flags & 2)); 654 } while ((flags & 1) || (flags & 2));
653 655
654 ap_device_remove(dev); 656 ap_device_remove(dev);
@@ -1109,12 +1111,15 @@ static void ap_scan_bus(struct work_struct *unused)
1109 1111
1110 ap_dev->device.bus = &ap_bus_type; 1112 ap_dev->device.bus = &ap_bus_type;
1111 ap_dev->device.parent = ap_root_device; 1113 ap_dev->device.parent = ap_root_device;
1112 dev_set_name(&ap_dev->device, "card%02x", 1114 if (dev_set_name(&ap_dev->device, "card%02x",
1113 AP_QID_DEVICE(ap_dev->qid)); 1115 AP_QID_DEVICE(ap_dev->qid))) {
1116 kfree(ap_dev);
1117 continue;
1118 }
1114 ap_dev->device.release = ap_device_release; 1119 ap_dev->device.release = ap_device_release;
1115 rc = device_register(&ap_dev->device); 1120 rc = device_register(&ap_dev->device);
1116 if (rc) { 1121 if (rc) {
1117 kfree(ap_dev); 1122 put_device(&ap_dev->device);
1118 continue; 1123 continue;
1119 } 1124 }
1120 /* Add device attributes. */ 1125 /* Add device attributes. */
@@ -1407,14 +1412,12 @@ static void ap_reset(struct ap_device *ap_dev)
1407 1412
1408static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1413static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1409{ 1414{
1410 spin_lock(&ap_dev->lock);
1411 if (!ap_dev->unregistered) { 1415 if (!ap_dev->unregistered) {
1412 if (ap_poll_queue(ap_dev, flags)) 1416 if (ap_poll_queue(ap_dev, flags))
1413 ap_dev->unregistered = 1; 1417 ap_dev->unregistered = 1;
1414 if (ap_dev->reset == AP_RESET_DO) 1418 if (ap_dev->reset == AP_RESET_DO)
1415 ap_reset(ap_dev); 1419 ap_reset(ap_dev);
1416 } 1420 }
1417 spin_unlock(&ap_dev->lock);
1418 return 0; 1421 return 0;
1419} 1422}
1420 1423
@@ -1441,7 +1444,9 @@ static void ap_poll_all(unsigned long dummy)
1441 flags = 0; 1444 flags = 0;
1442 spin_lock(&ap_device_list_lock); 1445 spin_lock(&ap_device_list_lock);
1443 list_for_each_entry(ap_dev, &ap_device_list, list) { 1446 list_for_each_entry(ap_dev, &ap_device_list, list) {
1447 spin_lock(&ap_dev->lock);
1444 __ap_poll_device(ap_dev, &flags); 1448 __ap_poll_device(ap_dev, &flags);
1449 spin_unlock(&ap_dev->lock);
1445 } 1450 }
1446 spin_unlock(&ap_device_list_lock); 1451 spin_unlock(&ap_device_list_lock);
1447 } while (flags & 1); 1452 } while (flags & 1);
@@ -1487,7 +1492,9 @@ static int ap_poll_thread(void *data)
1487 flags = 0; 1492 flags = 0;
1488 spin_lock_bh(&ap_device_list_lock); 1493 spin_lock_bh(&ap_device_list_lock);
1489 list_for_each_entry(ap_dev, &ap_device_list, list) { 1494 list_for_each_entry(ap_dev, &ap_device_list, list) {
1495 spin_lock(&ap_dev->lock);
1490 __ap_poll_device(ap_dev, &flags); 1496 __ap_poll_device(ap_dev, &flags);
1497 spin_unlock(&ap_dev->lock);
1491 } 1498 }
1492 spin_unlock_bh(&ap_device_list_lock); 1499 spin_unlock_bh(&ap_device_list_lock);
1493 } 1500 }
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index e38e5d306faf..2930fc763ac5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
403 return len; 403 return len;
404} 404}
405 405
406void __init s390_virtio_console_init(void) 406static int __init s390_virtio_console_init(void)
407{ 407{
408 virtio_cons_early_init(early_put_chars); 408 if (!MACHINE_IS_KVM)
409 return -ENODEV;
410 return virtio_cons_early_init(early_put_chars);
409} 411}
412console_initcall(s390_virtio_console_init);
413
410 414
411/* 415/*
412 * We do this after core stuff, but before the drivers. 416 * We do this after core stuff, but before the drivers.
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8c36eafcfbfe..87dff11061b0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1839,9 +1839,10 @@ static int netiucv_register_device(struct net_device *ndev)
1839 return -ENOMEM; 1839 return -ENOMEM;
1840 1840
1841 ret = device_register(dev); 1841 ret = device_register(dev);
1842 1842 if (ret) {
1843 if (ret) 1843 put_device(dev);
1844 return ret; 1844 return ret;
1845 }
1845 ret = netiucv_add_files(dev); 1846 ret = netiucv_add_files(dev);
1846 if (ret) 1847 if (ret)
1847 goto out_unreg; 1848 goto out_unreg;
@@ -2226,8 +2227,10 @@ static int __init netiucv_init(void)
2226 netiucv_dev->release = (void (*)(struct device *))kfree; 2227 netiucv_dev->release = (void (*)(struct device *))kfree;
2227 netiucv_dev->driver = &netiucv_driver; 2228 netiucv_dev->driver = &netiucv_driver;
2228 rc = device_register(netiucv_dev); 2229 rc = device_register(netiucv_dev);
2229 if (rc) 2230 if (rc) {
2231 put_device(netiucv_dev);
2230 goto out_driver; 2232 goto out_driver;
2233 }
2231 netiucv_banner(); 2234 netiucv_banner();
2232 return rc; 2235 return rc;
2233 2236
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index e76a320d373b..102000d1af6f 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -219,13 +219,13 @@ static int __init smsg_init(void)
219 smsg_dev->driver = &smsg_driver; 219 smsg_dev->driver = &smsg_driver;
220 rc = device_register(smsg_dev); 220 rc = device_register(smsg_dev);
221 if (rc) 221 if (rc)
222 goto out_free_dev; 222 goto out_put;
223 223
224 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 224 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
225 return 0; 225 return 0;
226 226
227out_free_dev: 227out_put:
228 kfree(smsg_dev); 228 put_device(smsg_dev);
229out_free_path: 229out_free_path:
230 iucv_path_free(smsg_path); 230 iucv_path_free(smsg_path);
231 smsg_path = NULL; 231 smsg_path = NULL;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index 042d9bce9914..d0ab23a58355 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
26 26
27static void open_s3_dev(struct t3cdev *); 27static void open_s3_dev(struct t3cdev *);
28static void close_s3_dev(struct t3cdev *); 28static void close_s3_dev(struct t3cdev *);
29static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error); 29static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
30 30
31static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; 31static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
32static struct cxgb3_client t3c_client = { 32static struct cxgb3_client t3c_client = {
@@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = {
34 .handlers = cxgb3i_cpl_handlers, 34 .handlers = cxgb3i_cpl_handlers,
35 .add = open_s3_dev, 35 .add = open_s3_dev,
36 .remove = close_s3_dev, 36 .remove = close_s3_dev,
37 .err_handler = s3_err_handler, 37 .event_handler = s3_event_handler,
38}; 38};
39 39
40/** 40/**
@@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev)
66 cxgb3i_ddp_cleanup(t3dev); 66 cxgb3i_ddp_cleanup(t3dev);
67} 67}
68 68
69static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error) 69static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
70{ 70{
71 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); 71 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
72 72
73 cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n", 73 cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
74 snic, tdev, status, error); 74 snic, tdev, event, port);
75 if (!snic) 75 if (!snic)
76 return; 76 return;
77 77
78 switch (status) { 78 switch (event) {
79 case OFFLOAD_STATUS_DOWN: 79 case OFFLOAD_STATUS_DOWN:
80 snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; 80 snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
81 break; 81 break;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 9d7c99394ec6..640f65c6ef84 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1752,12 +1752,12 @@ static int comedi_open(struct inode *inode, struct file *file)
1752 mutex_lock(&dev->mutex); 1752 mutex_lock(&dev->mutex);
1753 if (dev->attached) 1753 if (dev->attached)
1754 goto ok; 1754 goto ok;
1755 if (!capable(CAP_SYS_MODULE) && dev->in_request_module) { 1755 if (!capable(CAP_NET_ADMIN) && dev->in_request_module) {
1756 DPRINTK("in request module\n"); 1756 DPRINTK("in request module\n");
1757 mutex_unlock(&dev->mutex); 1757 mutex_unlock(&dev->mutex);
1758 return -ENODEV; 1758 return -ENODEV;
1759 } 1759 }
1760 if (capable(CAP_SYS_MODULE) && dev->in_request_module) 1760 if (capable(CAP_NET_ADMIN) && dev->in_request_module)
1761 goto ok; 1761 goto ok;
1762 1762
1763 dev->in_request_module = 1; 1763 dev->in_request_module = 1;
@@ -1770,8 +1770,8 @@ static int comedi_open(struct inode *inode, struct file *file)
1770 1770
1771 dev->in_request_module = 0; 1771 dev->in_request_module = 0;
1772 1772
1773 if (!dev->attached && !capable(CAP_SYS_MODULE)) { 1773 if (!dev->attached && !capable(CAP_NET_ADMIN)) {
1774 DPRINTK("not attached and not CAP_SYS_MODULE\n"); 1774 DPRINTK("not attached and not CAP_NET_ADMIN\n");
1775 mutex_unlock(&dev->mutex); 1775 mutex_unlock(&dev->mutex);
1776 return -ENODEV; 1776 return -ENODEV;
1777 } 1777 }
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7b605795b770..e63c9bea6c54 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -1950,14 +1950,7 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type,
1950 */ 1950 */
1951static void pohmelfs_kill_super(struct super_block *sb) 1951static void pohmelfs_kill_super(struct super_block *sb)
1952{ 1952{
1953 struct writeback_control wbc = { 1953 sync_inodes_sb(sb);
1954 .sync_mode = WB_SYNC_ALL,
1955 .range_start = 0,
1956 .range_end = LLONG_MAX,
1957 .nr_to_write = LONG_MAX,
1958 };
1959 generic_sync_sb_inodes(sb, &wbc);
1960
1961 kill_anon_super(sb); 1954 kill_anon_super(sb);
1962} 1955}
1963 1956