aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/nvme.c153
-rw-r--r--drivers/block/rbd.c7
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c4
-rw-r--r--drivers/edac/sb_edac.c7
-rw-r--r--drivers/gpio/gpio-lpc32xx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c3
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/md/dm-mpath.c11
-rw-r--r--drivers/md/dm-table.c61
-rw-r--r--drivers/md/dm-thin.c135
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/mtd/mtdchar.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c4
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/micrel.c45
-rw-r--r--drivers/net/phy/smsc.c28
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/team/team.c44
-rw-r--r--drivers/net/usb/smsc75xx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1
-rw-r--r--drivers/sh/pfc/pinctrl.c2
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c76
37 files changed, 558 insertions, 208 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 38a2d063188..ad16c68c864 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -79,6 +79,7 @@ struct nvme_dev {
79 char serial[20]; 79 char serial[20];
80 char model[40]; 80 char model[40];
81 char firmware_rev[8]; 81 char firmware_rev[8];
82 u32 max_hw_sectors;
82}; 83};
83 84
84/* 85/*
@@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
835} 836}
836 837
837static int nvme_get_features(struct nvme_dev *dev, unsigned fid, 838static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
838 unsigned dword11, dma_addr_t dma_addr) 839 unsigned nsid, dma_addr_t dma_addr)
839{ 840{
840 struct nvme_command c; 841 struct nvme_command c;
841 842
842 memset(&c, 0, sizeof(c)); 843 memset(&c, 0, sizeof(c));
843 c.features.opcode = nvme_admin_get_features; 844 c.features.opcode = nvme_admin_get_features;
845 c.features.nsid = cpu_to_le32(nsid);
844 c.features.prp1 = cpu_to_le64(dma_addr); 846 c.features.prp1 = cpu_to_le64(dma_addr);
845 c.features.fid = cpu_to_le32(fid); 847 c.features.fid = cpu_to_le32(fid);
846 c.features.dword11 = cpu_to_le32(dword11);
847 848
848 return nvme_submit_admin_cmd(dev, &c, NULL); 849 return nvme_submit_admin_cmd(dev, &c, NULL);
849} 850}
@@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
862 return nvme_submit_admin_cmd(dev, &c, result); 863 return nvme_submit_admin_cmd(dev, &c, result);
863} 864}
864 865
866/**
867 * nvme_cancel_ios - Cancel outstanding I/Os
868 * @queue: The queue to cancel I/Os on
869 * @timeout: True to only cancel I/Os which have timed out
870 */
871static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
872{
873 int depth = nvmeq->q_depth - 1;
874 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
875 unsigned long now = jiffies;
876 int cmdid;
877
878 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
879 void *ctx;
880 nvme_completion_fn fn;
881 static struct nvme_completion cqe = {
882 .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
883 };
884
885 if (timeout && !time_after(now, info[cmdid].timeout))
886 continue;
887 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
888 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
889 fn(nvmeq->dev, ctx, &cqe);
890 }
891}
892
893static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
894{
895 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
896 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
897 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
898 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
899 kfree(nvmeq);
900}
901
865static void nvme_free_queue(struct nvme_dev *dev, int qid) 902static void nvme_free_queue(struct nvme_dev *dev, int qid)
866{ 903{
867 struct nvme_queue *nvmeq = dev->queues[qid]; 904 struct nvme_queue *nvmeq = dev->queues[qid];
868 int vector = dev->entry[nvmeq->cq_vector].vector; 905 int vector = dev->entry[nvmeq->cq_vector].vector;
869 906
907 spin_lock_irq(&nvmeq->q_lock);
908 nvme_cancel_ios(nvmeq, false);
909 spin_unlock_irq(&nvmeq->q_lock);
910
870 irq_set_affinity_hint(vector, NULL); 911 irq_set_affinity_hint(vector, NULL);
871 free_irq(vector, nvmeq); 912 free_irq(vector, nvmeq);
872 913
@@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
876 adapter_delete_cq(dev, qid); 917 adapter_delete_cq(dev, qid);
877 } 918 }
878 919
879 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 920 nvme_free_queue_mem(nvmeq);
880 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
881 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
882 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
883 kfree(nvmeq);
884} 921}
885 922
886static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 923static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
887 int depth, int vector) 924 int depth, int vector)
888{ 925{
889 struct device *dmadev = &dev->pci_dev->dev; 926 struct device *dmadev = &dev->pci_dev->dev;
890 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); 927 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
928 sizeof(struct nvme_cmd_info));
891 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 929 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
892 if (!nvmeq) 930 if (!nvmeq)
893 return NULL; 931 return NULL;
@@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
975 1013
976static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) 1014static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
977{ 1015{
978 int result; 1016 int result = 0;
979 u32 aqa; 1017 u32 aqa;
980 u64 cap; 1018 u64 cap;
981 unsigned long timeout; 1019 unsigned long timeout;
@@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
1005 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1043 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1006 dev->db_stride = NVME_CAP_STRIDE(cap); 1044 dev->db_stride = NVME_CAP_STRIDE(cap);
1007 1045
1008 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 1046 while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
1009 msleep(100); 1047 msleep(100);
1010 if (fatal_signal_pending(current)) 1048 if (fatal_signal_pending(current))
1011 return -EINTR; 1049 result = -EINTR;
1012 if (time_after(jiffies, timeout)) { 1050 if (time_after(jiffies, timeout)) {
1013 dev_err(&dev->pci_dev->dev, 1051 dev_err(&dev->pci_dev->dev,
1014 "Device not ready; aborting initialisation\n"); 1052 "Device not ready; aborting initialisation\n");
1015 return -ENODEV; 1053 result = -ENODEV;
1016 } 1054 }
1017 } 1055 }
1018 1056
1057 if (result) {
1058 nvme_free_queue_mem(nvmeq);
1059 return result;
1060 }
1061
1019 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1062 result = queue_request_irq(dev, nvmeq, "nvme admin");
1020 dev->queues[0] = nvmeq; 1063 dev->queues[0] = nvmeq;
1021 return result; 1064 return result;
@@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1037 offset = offset_in_page(addr); 1080 offset = offset_in_page(addr);
1038 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1081 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1039 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1082 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1083 if (!pages)
1084 return ERR_PTR(-ENOMEM);
1040 1085
1041 err = get_user_pages_fast(addr, count, 1, pages); 1086 err = get_user_pages_fast(addr, count, 1, pages);
1042 if (err < count) { 1087 if (err < count) {
@@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1146 return status; 1191 return status;
1147} 1192}
1148 1193
1149static int nvme_user_admin_cmd(struct nvme_ns *ns, 1194static int nvme_user_admin_cmd(struct nvme_dev *dev,
1150 struct nvme_admin_cmd __user *ucmd) 1195 struct nvme_admin_cmd __user *ucmd)
1151{ 1196{
1152 struct nvme_dev *dev = ns->dev;
1153 struct nvme_admin_cmd cmd; 1197 struct nvme_admin_cmd cmd;
1154 struct nvme_command c; 1198 struct nvme_command c;
1155 int status, length; 1199 int status, length;
1156 struct nvme_iod *iod; 1200 struct nvme_iod *uninitialized_var(iod);
1157 1201
1158 if (!capable(CAP_SYS_ADMIN)) 1202 if (!capable(CAP_SYS_ADMIN))
1159 return -EACCES; 1203 return -EACCES;
@@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1204 case NVME_IOCTL_ID: 1248 case NVME_IOCTL_ID:
1205 return ns->ns_id; 1249 return ns->ns_id;
1206 case NVME_IOCTL_ADMIN_CMD: 1250 case NVME_IOCTL_ADMIN_CMD:
1207 return nvme_user_admin_cmd(ns, (void __user *)arg); 1251 return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
1208 case NVME_IOCTL_SUBMIT_IO: 1252 case NVME_IOCTL_SUBMIT_IO:
1209 return nvme_submit_io(ns, (void __user *)arg); 1253 return nvme_submit_io(ns, (void __user *)arg);
1210 default: 1254 default:
@@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = {
1218 .compat_ioctl = nvme_ioctl, 1262 .compat_ioctl = nvme_ioctl,
1219}; 1263};
1220 1264
1221static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1222{
1223 int depth = nvmeq->q_depth - 1;
1224 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1225 unsigned long now = jiffies;
1226 int cmdid;
1227
1228 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1229 void *ctx;
1230 nvme_completion_fn fn;
1231 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1232
1233 if (!time_after(now, info[cmdid].timeout))
1234 continue;
1235 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1236 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1237 fn(nvmeq->dev, ctx, &cqe);
1238 }
1239}
1240
1241static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1265static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1242{ 1266{
1243 while (bio_list_peek(&nvmeq->sq_cong)) { 1267 while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data)
1269 spin_lock_irq(&nvmeq->q_lock); 1293 spin_lock_irq(&nvmeq->q_lock);
1270 if (nvme_process_cq(nvmeq)) 1294 if (nvme_process_cq(nvmeq))
1271 printk("process_cq did something\n"); 1295 printk("process_cq did something\n");
1272 nvme_timeout_ios(nvmeq); 1296 nvme_cancel_ios(nvmeq, true);
1273 nvme_resubmit_bios(nvmeq); 1297 nvme_resubmit_bios(nvmeq);
1274 spin_unlock_irq(&nvmeq->q_lock); 1298 spin_unlock_irq(&nvmeq->q_lock);
1275 } 1299 }
@@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1339 ns->disk = disk; 1363 ns->disk = disk;
1340 lbaf = id->flbas & 0xf; 1364 lbaf = id->flbas & 0xf;
1341 ns->lba_shift = id->lbaf[lbaf].ds; 1365 ns->lba_shift = id->lbaf[lbaf].ds;
1366 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1367 if (dev->max_hw_sectors)
1368 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1342 1369
1343 disk->major = nvme_major; 1370 disk->major = nvme_major;
1344 disk->minors = NVME_MINORS; 1371 disk->minors = NVME_MINORS;
@@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1383 1410
1384static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 1411static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1385{ 1412{
1386 int result, cpu, i, nr_io_queues, db_bar_size; 1413 int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
1387 1414
1388 nr_io_queues = num_online_cpus(); 1415 nr_io_queues = num_online_cpus();
1389 result = set_queue_count(dev, nr_io_queues); 1416 result = set_queue_count(dev, nr_io_queues);
@@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1429 cpu = cpumask_next(cpu, cpu_online_mask); 1456 cpu = cpumask_next(cpu, cpu_online_mask);
1430 } 1457 }
1431 1458
1459 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1460 NVME_Q_DEPTH);
1432 for (i = 0; i < nr_io_queues; i++) { 1461 for (i = 0; i < nr_io_queues; i++) {
1433 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, 1462 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
1434 NVME_Q_DEPTH, i);
1435 if (IS_ERR(dev->queues[i + 1])) 1463 if (IS_ERR(dev->queues[i + 1]))
1436 return PTR_ERR(dev->queues[i + 1]); 1464 return PTR_ERR(dev->queues[i + 1]);
1437 dev->queue_count++; 1465 dev->queue_count++;
@@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
1480 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1508 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1481 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1509 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1482 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1510 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1511 if (ctrl->mdts) {
1512 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
1513 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
1514 }
1483 1515
1484 id_ns = mem; 1516 id_ns = mem;
1485 for (i = 1; i <= nn; i++) { 1517 for (i = 1; i <= nn; i++) {
@@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
1523 list_del(&dev->node); 1555 list_del(&dev->node);
1524 spin_unlock(&dev_list_lock); 1556 spin_unlock(&dev_list_lock);
1525 1557
1526 /* TODO: wait all I/O finished or cancel them */
1527
1528 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1558 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1529 list_del(&ns->list); 1559 list_del(&ns->list);
1530 del_gendisk(ns->disk); 1560 del_gendisk(ns->disk);
@@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
1560 dma_pool_destroy(dev->prp_small_pool); 1590 dma_pool_destroy(dev->prp_small_pool);
1561} 1591}
1562 1592
1563/* XXX: Use an ida or something to let remove / add work correctly */ 1593static DEFINE_IDA(nvme_instance_ida);
1564static void nvme_set_instance(struct nvme_dev *dev) 1594
1595static int nvme_set_instance(struct nvme_dev *dev)
1565{ 1596{
1566 static int instance; 1597 int instance, error;
1567 dev->instance = instance++; 1598
1599 do {
1600 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1601 return -ENODEV;
1602
1603 spin_lock(&dev_list_lock);
1604 error = ida_get_new(&nvme_instance_ida, &instance);
1605 spin_unlock(&dev_list_lock);
1606 } while (error == -EAGAIN);
1607
1608 if (error)
1609 return -ENODEV;
1610
1611 dev->instance = instance;
1612 return 0;
1568} 1613}
1569 1614
1570static void nvme_release_instance(struct nvme_dev *dev) 1615static void nvme_release_instance(struct nvme_dev *dev)
1571{ 1616{
1617 spin_lock(&dev_list_lock);
1618 ida_remove(&nvme_instance_ida, dev->instance);
1619 spin_unlock(&dev_list_lock);
1572} 1620}
1573 1621
1574static int __devinit nvme_probe(struct pci_dev *pdev, 1622static int __devinit nvme_probe(struct pci_dev *pdev,
@@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
1601 pci_set_drvdata(pdev, dev); 1649 pci_set_drvdata(pdev, dev);
1602 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1650 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1603 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1651 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1604 nvme_set_instance(dev); 1652 result = nvme_set_instance(dev);
1653 if (result)
1654 goto disable;
1655
1605 dev->entry[0].vector = pdev->irq; 1656 dev->entry[0].vector = pdev->irq;
1606 1657
1607 result = nvme_setup_prp_pools(dev); 1658 result = nvme_setup_prp_pools(dev);
@@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = {
1704 1755
1705static int __init nvme_init(void) 1756static int __init nvme_init(void)
1706{ 1757{
1707 int result = -EBUSY; 1758 int result;
1708 1759
1709 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 1760 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1710 if (IS_ERR(nvme_thread)) 1761 if (IS_ERR(nvme_thread))
1711 return PTR_ERR(nvme_thread); 1762 return PTR_ERR(nvme_thread);
1712 1763
1713 nvme_major = register_blkdev(nvme_major, "nvme"); 1764 result = register_blkdev(nvme_major, "nvme");
1714 if (nvme_major <= 0) 1765 if (result < 0)
1715 goto kill_kthread; 1766 goto kill_kthread;
1767 else if (result > 0)
1768 nvme_major = result;
1716 1769
1717 result = pci_register_driver(&nvme_driver); 1770 result = pci_register_driver(&nvme_driver);
1718 if (result) 1771 if (result)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9917943a357..54a55f03115 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
246{ 246{
247 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 247 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
248 248
249 rbd_get_dev(rbd_dev);
250
251 set_device_ro(bdev, rbd_dev->read_only);
252
253 if ((mode & FMODE_WRITE) && rbd_dev->read_only) 249 if ((mode & FMODE_WRITE) && rbd_dev->read_only)
254 return -EROFS; 250 return -EROFS;
255 251
252 rbd_get_dev(rbd_dev);
253 set_device_ro(bdev, rbd_dev->read_only);
254
256 return 0; 255 return 0;
257} 256}
258 257
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 47180a08eda..b6653a6fc5d 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
391 for (j = 0; j < nr_channels; j++) { 391 for (j = 0; j < nr_channels; j++) {
392 struct dimm_info *dimm = csrow->channels[j]->dimm; 392 struct dimm_info *dimm = csrow->channels[j]->dimm;
393 393
394 dimm->nr_pages = nr_pages / nr_channels; 394 dimm->nr_pages = nr_pages;
395 dimm->grain = nr_pages << PAGE_SHIFT; 395 dimm->grain = nr_pages << PAGE_SHIFT;
396 dimm->mtype = MEM_DDR2; 396 dimm->mtype = MEM_DDR2;
397 dimm->dtype = DEV_UNKNOWN; 397 dimm->dtype = DEV_UNKNOWN;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 39c63757c2a..6a49dd00b81 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
1012 /* add the number of COLUMN bits */ 1012 /* add the number of COLUMN bits */
1013 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 1013 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
1014 1014
1015 /* Dual-rank memories have twice the size */
1016 if (dinfo->dual_rank)
1017 addrBits++;
1018
1015 addrBits += 6; /* add 64 bits per DIMM */ 1019 addrBits += 6; /* add 64 bits per DIMM */
1016 addrBits -= 20; /* divide by 2^^20 */ 1020 addrBits -= 20; /* divide by 2^^20 */
1017 addrBits -= 3; /* 8 bits per bytes */ 1021 addrBits -= 3; /* 8 bits per bytes */
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f3b1f9fafa4..5715b7c2c51 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
513{ 513{
514 struct sbridge_pvt *pvt = mci->pvt_info; 514 struct sbridge_pvt *pvt = mci->pvt_info;
515 struct dimm_info *dimm; 515 struct dimm_info *dimm;
516 int i, j, banks, ranks, rows, cols, size, npages; 516 unsigned i, j, banks, ranks, rows, cols, npages;
517 u64 size;
517 u32 reg; 518 u32 reg;
518 enum edac_type mode; 519 enum edac_type mode;
519 enum mem_type mtype; 520 enum mem_type mtype;
@@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
585 cols = numcol(mtr); 586 cols = numcol(mtr);
586 587
587 /* DDR3 has 8 I/O banks */ 588 /* DDR3 has 8 I/O banks */
588 size = (rows * cols * banks * ranks) >> (20 - 3); 589 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
589 npages = MiB_TO_PAGES(size); 590 npages = MiB_TO_PAGES(size);
590 591
591 edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 592 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
592 pvt->sbridge_dev->mc, i, j, 593 pvt->sbridge_dev->mc, i, j,
593 size, npages, 594 size, npages,
594 banks, ranks, rows, cols); 595 banks, ranks, rows, cols);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 8a420f13905..ed94b4ea72e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
308{ 308{
309 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); 309 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
310 310
311 __set_gpio_level_p012(group, pin, value);
311 __set_gpio_dir_p012(group, pin, 0); 312 __set_gpio_dir_p012(group, pin, 0);
312 313
313 return 0; 314 return 0;
@@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
318{ 319{
319 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); 320 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
320 321
322 __set_gpio_level_p3(group, pin, value);
321 __set_gpio_dir_p3(group, pin, 0); 323 __set_gpio_dir_p3(group, pin, 0);
322 324
323 return 0; 325 return 0;
@@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
326static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, 328static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
327 int value) 329 int value)
328{ 330{
331 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
332
333 __set_gpo_level_p3(group, pin, value);
329 return 0; 334 return 0;
330} 335}
331 336
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index ff23d88880e..3ca240b4413 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
179 return 0; 179 return 0;
180 } else 180 } else
181 if (init->class == 0x906e) { 181 if (init->class == 0x906e) {
182 NV_ERROR(dev, "906e not supported yet\n"); 182 NV_DEBUG(dev, "906e not supported yet\n");
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 185
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index f704e942372..f376c39310d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev)
124 priv = dev_priv->engine.fb.priv; 124 priv = dev_priv->engine.fb.priv;
125 125
126 nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); 126 nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
127 nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
127 return 0; 128 return 0;
128} 129}
129 130
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 7d85553d518..cd39eb99f5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
373static void 373static void
374nvc0_fifo_isr(struct drm_device *dev) 374nvc0_fifo_isr(struct drm_device *dev)
375{ 375{
376 u32 stat = nv_rd32(dev, 0x002100); 376 u32 mask = nv_rd32(dev, 0x002140);
377 u32 stat = nv_rd32(dev, 0x002100) & mask;
377 378
378 if (stat & 0x00000100) { 379 if (stat & 0x00000100) {
379 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); 380 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index e98d144e6eb..281bece751b 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -345,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
345static void 345static void
346nve0_fifo_isr(struct drm_device *dev) 346nve0_fifo_isr(struct drm_device *dev)
347{ 347{
348 u32 stat = nv_rd32(dev, 0x002100); 348 u32 mask = nv_rd32(dev, 0x002140);
349 u32 stat = nv_rd32(dev, 0x002100) & mask;
349 350
350 if (stat & 0x00000100) { 351 if (stat & 0x00000100) {
351 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); 352 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ba055e9ca00..8d9dc44f1f9 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector)
69static int udl_mode_valid(struct drm_connector *connector, 69static int udl_mode_valid(struct drm_connector *connector,
70 struct drm_display_mode *mode) 70 struct drm_display_mode *mode)
71{ 71{
72 struct udl_device *udl = connector->dev->dev_private;
73 if (!udl->sku_pixel_limit)
74 return 0;
75
76 if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
77 return MODE_VIRTUAL_Y;
78
72 return 0; 79 return 0;
73} 80}
74 81
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f2fb8f15e2f..7e0743358df 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
1018 } 1018 }
1019 1019
1020 1020
1021 event = kzalloc(sizeof(event->event), GFP_KERNEL); 1021 event = kzalloc(sizeof(*event), GFP_KERNEL);
1022 if (unlikely(event == NULL)) { 1022 if (unlikely(event == NULL)) {
1023 DRM_ERROR("Failed to allocate an event.\n"); 1023 DRM_ERROR("Failed to allocate an event.\n");
1024 ret = -ENOMEM; 1024 ret = -ENOMEM;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d8abb90a6c2..034233eefc8 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1555 unsigned long arg) 1555 unsigned long arg)
1556{ 1556{
1557 struct multipath *m = ti->private; 1557 struct multipath *m = ti->private;
1558 struct pgpath *pgpath;
1558 struct block_device *bdev; 1559 struct block_device *bdev;
1559 fmode_t mode; 1560 fmode_t mode;
1560 unsigned long flags; 1561 unsigned long flags;
@@ -1570,12 +1571,14 @@ again:
1570 if (!m->current_pgpath) 1571 if (!m->current_pgpath)
1571 __choose_pgpath(m, 0); 1572 __choose_pgpath(m, 0);
1572 1573
1573 if (m->current_pgpath) { 1574 pgpath = m->current_pgpath;
1574 bdev = m->current_pgpath->path.dev->bdev; 1575
1575 mode = m->current_pgpath->path.dev->mode; 1576 if (pgpath) {
1577 bdev = pgpath->path.dev->bdev;
1578 mode = pgpath->path.dev->mode;
1576 } 1579 }
1577 1580
1578 if (m->queue_io) 1581 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1579 r = -EAGAIN; 1582 r = -EAGAIN;
1580 else if (!bdev) 1583 else if (!bdev)
1581 r = -EIO; 1584 r = -EIO;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f90069029aa..100368eb799 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1212 return &t->targets[(KEYS_PER_NODE * n) + k]; 1212 return &t->targets[(KEYS_PER_NODE * n) + k];
1213} 1213}
1214 1214
1215static int count_device(struct dm_target *ti, struct dm_dev *dev,
1216 sector_t start, sector_t len, void *data)
1217{
1218 unsigned *num_devices = data;
1219
1220 (*num_devices)++;
1221
1222 return 0;
1223}
1224
1225/*
1226 * Check whether a table has no data devices attached using each
1227 * target's iterate_devices method.
1228 * Returns false if the result is unknown because a target doesn't
1229 * support iterate_devices.
1230 */
1231bool dm_table_has_no_data_devices(struct dm_table *table)
1232{
1233 struct dm_target *uninitialized_var(ti);
1234 unsigned i = 0, num_devices = 0;
1235
1236 while (i < dm_table_get_num_targets(table)) {
1237 ti = dm_table_get_target(table, i++);
1238
1239 if (!ti->type->iterate_devices)
1240 return false;
1241
1242 ti->type->iterate_devices(ti, count_device, &num_devices);
1243 if (num_devices)
1244 return false;
1245 }
1246
1247 return true;
1248}
1249
1215/* 1250/*
1216 * Establish the new table's queue_limits and validate them. 1251 * Establish the new table's queue_limits and validate them.
1217 */ 1252 */
@@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1354 return q && blk_queue_nonrot(q); 1389 return q && blk_queue_nonrot(q);
1355} 1390}
1356 1391
1357static bool dm_table_is_nonrot(struct dm_table *t) 1392static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1393 sector_t start, sector_t len, void *data)
1394{
1395 struct request_queue *q = bdev_get_queue(dev->bdev);
1396
1397 return q && !blk_queue_add_random(q);
1398}
1399
1400static bool dm_table_all_devices_attribute(struct dm_table *t,
1401 iterate_devices_callout_fn func)
1358{ 1402{
1359 struct dm_target *ti; 1403 struct dm_target *ti;
1360 unsigned i = 0; 1404 unsigned i = 0;
1361 1405
1362 /* Ensure that all underlying device are non-rotational. */
1363 while (i < dm_table_get_num_targets(t)) { 1406 while (i < dm_table_get_num_targets(t)) {
1364 ti = dm_table_get_target(t, i++); 1407 ti = dm_table_get_target(t, i++);
1365 1408
1366 if (!ti->type->iterate_devices || 1409 if (!ti->type->iterate_devices ||
1367 !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) 1410 !ti->type->iterate_devices(ti, func, NULL))
1368 return 0; 1411 return 0;
1369 } 1412 }
1370 1413
@@ -1396,7 +1439,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1396 if (!dm_table_discard_zeroes_data(t)) 1439 if (!dm_table_discard_zeroes_data(t))
1397 q->limits.discard_zeroes_data = 0; 1440 q->limits.discard_zeroes_data = 0;
1398 1441
1399 if (dm_table_is_nonrot(t)) 1442 /* Ensure that all underlying devices are non-rotational. */
1443 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1400 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1444 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1401 else 1445 else
1402 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1446 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
@@ -1404,6 +1448,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1404 dm_table_set_integrity(t); 1448 dm_table_set_integrity(t);
1405 1449
1406 /* 1450 /*
1451 * Determine whether or not this queue's I/O timings contribute
1452 * to the entropy pool, Only request-based targets use this.
1453 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1454 * have it set.
1455 */
1456 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1457 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1458
1459 /*
1407 * QUEUE_FLAG_STACKABLE must be set after all queue settings are 1460 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1408 * visible to other CPUs because, once the flag is set, incoming bios 1461 * visible to other CPUs because, once the flag is set, incoming bios
1409 * are processed by request-based dm, which refers to the queue 1462 * are processed by request-based dm, which refers to the queue
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index af1fc3b2c2a..c29410af1e2 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -509,9 +509,9 @@ enum pool_mode {
509struct pool_features { 509struct pool_features {
510 enum pool_mode mode; 510 enum pool_mode mode;
511 511
512 unsigned zero_new_blocks:1; 512 bool zero_new_blocks:1;
513 unsigned discard_enabled:1; 513 bool discard_enabled:1;
514 unsigned discard_passdown:1; 514 bool discard_passdown:1;
515}; 515};
516 516
517struct thin_c; 517struct thin_c;
@@ -580,7 +580,8 @@ struct pool_c {
580 struct dm_target_callbacks callbacks; 580 struct dm_target_callbacks callbacks;
581 581
582 dm_block_t low_water_blocks; 582 dm_block_t low_water_blocks;
583 struct pool_features pf; 583 struct pool_features requested_pf; /* Features requested during table load */
584 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
584}; 585};
585 586
586/* 587/*
@@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool)
1839/*---------------------------------------------------------------- 1840/*----------------------------------------------------------------
1840 * Binding of control targets to a pool object 1841 * Binding of control targets to a pool object
1841 *--------------------------------------------------------------*/ 1842 *--------------------------------------------------------------*/
1843static bool data_dev_supports_discard(struct pool_c *pt)
1844{
1845 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1846
1847 return q && blk_queue_discard(q);
1848}
1849
1850/*
1851 * If discard_passdown was enabled verify that the data device
1852 * supports discards. Disable discard_passdown if not.
1853 */
1854static void disable_passdown_if_not_supported(struct pool_c *pt)
1855{
1856 struct pool *pool = pt->pool;
1857 struct block_device *data_bdev = pt->data_dev->bdev;
1858 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1859 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1860 const char *reason = NULL;
1861 char buf[BDEVNAME_SIZE];
1862
1863 if (!pt->adjusted_pf.discard_passdown)
1864 return;
1865
1866 if (!data_dev_supports_discard(pt))
1867 reason = "discard unsupported";
1868
1869 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1870 reason = "max discard sectors smaller than a block";
1871
1872 else if (data_limits->discard_granularity > block_size)
1873 reason = "discard granularity larger than a block";
1874
1875 else if (block_size & (data_limits->discard_granularity - 1))
1876 reason = "discard granularity not a factor of block size";
1877
1878 if (reason) {
1879 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1880 pt->adjusted_pf.discard_passdown = false;
1881 }
1882}
1883
1842static int bind_control_target(struct pool *pool, struct dm_target *ti) 1884static int bind_control_target(struct pool *pool, struct dm_target *ti)
1843{ 1885{
1844 struct pool_c *pt = ti->private; 1886 struct pool_c *pt = ti->private;
@@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1847 * We want to make sure that degraded pools are never upgraded. 1889 * We want to make sure that degraded pools are never upgraded.
1848 */ 1890 */
1849 enum pool_mode old_mode = pool->pf.mode; 1891 enum pool_mode old_mode = pool->pf.mode;
1850 enum pool_mode new_mode = pt->pf.mode; 1892 enum pool_mode new_mode = pt->adjusted_pf.mode;
1851 1893
1852 if (old_mode > new_mode) 1894 if (old_mode > new_mode)
1853 new_mode = old_mode; 1895 new_mode = old_mode;
1854 1896
1855 pool->ti = ti; 1897 pool->ti = ti;
1856 pool->low_water_blocks = pt->low_water_blocks; 1898 pool->low_water_blocks = pt->low_water_blocks;
1857 pool->pf = pt->pf; 1899 pool->pf = pt->adjusted_pf;
1858 set_pool_mode(pool, new_mode);
1859 1900
1860 /* 1901 set_pool_mode(pool, new_mode);
1861 * If discard_passdown was enabled verify that the data device
1862 * supports discards. Disable discard_passdown if not; otherwise
1863 * -EOPNOTSUPP will be returned.
1864 */
1865 /* FIXME: pull this out into a sep fn. */
1866 if (pt->pf.discard_passdown) {
1867 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1868 if (!q || !blk_queue_discard(q)) {
1869 char buf[BDEVNAME_SIZE];
1870 DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
1871 bdevname(pt->data_dev->bdev, buf));
1872 pool->pf.discard_passdown = 0;
1873 }
1874 }
1875 1902
1876 return 0; 1903 return 0;
1877} 1904}
@@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1889static void pool_features_init(struct pool_features *pf) 1916static void pool_features_init(struct pool_features *pf)
1890{ 1917{
1891 pf->mode = PM_WRITE; 1918 pf->mode = PM_WRITE;
1892 pf->zero_new_blocks = 1; 1919 pf->zero_new_blocks = true;
1893 pf->discard_enabled = 1; 1920 pf->discard_enabled = true;
1894 pf->discard_passdown = 1; 1921 pf->discard_passdown = true;
1895} 1922}
1896 1923
1897static void __pool_destroy(struct pool *pool) 1924static void __pool_destroy(struct pool *pool)
@@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2119 argc--; 2146 argc--;
2120 2147
2121 if (!strcasecmp(arg_name, "skip_block_zeroing")) 2148 if (!strcasecmp(arg_name, "skip_block_zeroing"))
2122 pf->zero_new_blocks = 0; 2149 pf->zero_new_blocks = false;
2123 2150
2124 else if (!strcasecmp(arg_name, "ignore_discard")) 2151 else if (!strcasecmp(arg_name, "ignore_discard"))
2125 pf->discard_enabled = 0; 2152 pf->discard_enabled = false;
2126 2153
2127 else if (!strcasecmp(arg_name, "no_discard_passdown")) 2154 else if (!strcasecmp(arg_name, "no_discard_passdown"))
2128 pf->discard_passdown = 0; 2155 pf->discard_passdown = false;
2129 2156
2130 else if (!strcasecmp(arg_name, "read_only")) 2157 else if (!strcasecmp(arg_name, "read_only"))
2131 pf->mode = PM_READ_ONLY; 2158 pf->mode = PM_READ_ONLY;
@@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2259 pt->metadata_dev = metadata_dev; 2286 pt->metadata_dev = metadata_dev;
2260 pt->data_dev = data_dev; 2287 pt->data_dev = data_dev;
2261 pt->low_water_blocks = low_water_blocks; 2288 pt->low_water_blocks = low_water_blocks;
2262 pt->pf = pf; 2289 pt->adjusted_pf = pt->requested_pf = pf;
2263 ti->num_flush_requests = 1; 2290 ti->num_flush_requests = 1;
2291
2264 /* 2292 /*
2265 * Only need to enable discards if the pool should pass 2293 * Only need to enable discards if the pool should pass
2266 * them down to the data device. The thin device's discard 2294 * them down to the data device. The thin device's discard
@@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2268 */ 2296 */
2269 if (pf.discard_enabled && pf.discard_passdown) { 2297 if (pf.discard_enabled && pf.discard_passdown) {
2270 ti->num_discard_requests = 1; 2298 ti->num_discard_requests = 1;
2299
2271 /* 2300 /*
2272 * Setting 'discards_supported' circumvents the normal 2301 * Setting 'discards_supported' circumvents the normal
2273 * stacking of discard limits (this keeps the pool and 2302 * stacking of discard limits (this keeps the pool and
2274 * thin devices' discard limits consistent). 2303 * thin devices' discard limits consistent).
2275 */ 2304 */
2276 ti->discards_supported = true; 2305 ti->discards_supported = true;
2306 ti->discard_zeroes_data_unsupported = true;
2277 } 2307 }
2278 ti->private = pt; 2308 ti->private = pt;
2279 2309
@@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2703 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), 2733 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2704 (unsigned long)pool->sectors_per_block, 2734 (unsigned long)pool->sectors_per_block,
2705 (unsigned long long)pt->low_water_blocks); 2735 (unsigned long long)pt->low_water_blocks);
2706 emit_flags(&pt->pf, result, sz, maxlen); 2736 emit_flags(&pt->requested_pf, result, sz, maxlen);
2707 break; 2737 break;
2708 } 2738 }
2709 2739
@@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2732 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2762 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2733} 2763}
2734 2764
2735static void set_discard_limits(struct pool *pool, struct queue_limits *limits) 2765static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2736{ 2766{
2737 /* 2767 struct pool *pool = pt->pool;
2738 * FIXME: these limits may be incompatible with the pool's data device 2768 struct queue_limits *data_limits;
2739 */ 2769
2740 limits->max_discard_sectors = pool->sectors_per_block; 2770 limits->max_discard_sectors = pool->sectors_per_block;
2741 2771
2742 /* 2772 /*
2743 * This is just a hint, and not enforced. We have to cope with 2773 * discard_granularity is just a hint, and not enforced.
2744 * bios that cover a block partially. A discard that spans a block
2745 * boundary is not sent to this target.
2746 */ 2774 */
2747 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 2775 if (pt->adjusted_pf.discard_passdown) {
2748 limits->discard_zeroes_data = pool->pf.zero_new_blocks; 2776 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2777 limits->discard_granularity = data_limits->discard_granularity;
2778 } else
2779 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2749} 2780}
2750 2781
2751static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2782static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2755 2786
2756 blk_limits_io_min(limits, 0); 2787 blk_limits_io_min(limits, 0);
2757 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 2788 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2758 if (pool->pf.discard_enabled) 2789
2759 set_discard_limits(pool, limits); 2790 /*
2791 * pt->adjusted_pf is a staging area for the actual features to use.
2792 * They get transferred to the live pool in bind_control_target()
2793 * called from pool_preresume().
2794 */
2795 if (!pt->adjusted_pf.discard_enabled)
2796 return;
2797
2798 disable_passdown_if_not_supported(pt);
2799
2800 set_discard_limits(pt, limits);
2760} 2801}
2761 2802
2762static struct target_type pool_target = { 2803static struct target_type pool_target = {
2763 .name = "thin-pool", 2804 .name = "thin-pool",
2764 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2805 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2765 DM_TARGET_IMMUTABLE, 2806 DM_TARGET_IMMUTABLE,
2766 .version = {1, 3, 0}, 2807 .version = {1, 4, 0},
2767 .module = THIS_MODULE, 2808 .module = THIS_MODULE,
2768 .ctr = pool_ctr, 2809 .ctr = pool_ctr,
2769 .dtr = pool_dtr, 2810 .dtr = pool_dtr,
@@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti,
3042 return 0; 3083 return 0;
3043} 3084}
3044 3085
3086/*
3087 * A thin device always inherits its queue limits from its pool.
3088 */
3045static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) 3089static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
3046{ 3090{
3047 struct thin_c *tc = ti->private; 3091 struct thin_c *tc = ti->private;
3048 struct pool *pool = tc->pool;
3049 3092
3050 blk_limits_io_min(limits, 0); 3093 *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
3051 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3052 set_discard_limits(pool, limits);
3053} 3094}
3054 3095
3055static struct target_type thin_target = { 3096static struct target_type thin_target = {
3056 .name = "thin", 3097 .name = "thin",
3057 .version = {1, 3, 0}, 3098 .version = {1, 4, 0},
3058 .module = THIS_MODULE, 3099 .module = THIS_MODULE,
3059 .ctr = thin_ctr, 3100 .ctr = thin_ctr,
3060 .dtr = thin_dtr, 3101 .dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 254d19268ad..892ae2766aa 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
718 v->hash_dev_block_bits = ffs(num) - 1; 718 v->hash_dev_block_bits = ffs(num) - 1;
719 719
720 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || 720 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
721 num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != 721 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
722 (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { 722 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
723 ti->error = "Invalid data blocks"; 723 ti->error = "Invalid data blocks";
724 r = -EINVAL; 724 r = -EINVAL;
725 goto bad; 725 goto bad;
@@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
733 } 733 }
734 734
735 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || 735 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
736 num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != 736 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
737 (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { 737 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
738 ti->error = "Invalid hash start"; 738 ti->error = "Invalid hash start";
739 r = -EINVAL; 739 r = -EINVAL;
740 goto bad; 740 goto bad;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4e09b6ff5b4..67ffa391edc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
865{ 865{
866 int r = error; 866 int r = error;
867 struct dm_rq_target_io *tio = clone->end_io_data; 867 struct dm_rq_target_io *tio = clone->end_io_data;
868 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 868 dm_request_endio_fn rq_end_io = NULL;
869 869
870 if (mapped && rq_end_io) 870 if (tio->ti) {
871 r = rq_end_io(tio->ti, clone, error, &tio->info); 871 rq_end_io = tio->ti->type->rq_end_io;
872
873 if (mapped && rq_end_io)
874 r = rq_end_io(tio->ti, clone, error, &tio->info);
875 }
872 876
873 if (r <= 0) 877 if (r <= 0)
874 /* The target wants to complete the I/O */ 878 /* The target wants to complete the I/O */
@@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
1588 int r, requeued = 0; 1592 int r, requeued = 0;
1589 struct dm_rq_target_io *tio = clone->end_io_data; 1593 struct dm_rq_target_io *tio = clone->end_io_data;
1590 1594
1591 /*
1592 * Hold the md reference here for the in-flight I/O.
1593 * We can't rely on the reference count by device opener,
1594 * because the device may be closed during the request completion
1595 * when all bios are completed.
1596 * See the comment in rq_completed() too.
1597 */
1598 dm_get(md);
1599
1600 tio->ti = ti; 1595 tio->ti = ti;
1601 r = ti->type->map_rq(ti, clone, &tio->info); 1596 r = ti->type->map_rq(ti, clone, &tio->info);
1602 switch (r) { 1597 switch (r) {
@@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
1628 return requeued; 1623 return requeued;
1629} 1624}
1630 1625
1626static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1627{
1628 struct request *clone;
1629
1630 blk_start_request(orig);
1631 clone = orig->special;
1632 atomic_inc(&md->pending[rq_data_dir(clone)]);
1633
1634 /*
1635 * Hold the md reference here for the in-flight I/O.
1636 * We can't rely on the reference count by device opener,
1637 * because the device may be closed during the request completion
1638 * when all bios are completed.
1639 * See the comment in rq_completed() too.
1640 */
1641 dm_get(md);
1642
1643 return clone;
1644}
1645
1631/* 1646/*
1632 * q->request_fn for request-based dm. 1647 * q->request_fn for request-based dm.
1633 * Called with the queue lock held. 1648 * Called with the queue lock held.
@@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
1657 pos = blk_rq_pos(rq); 1672 pos = blk_rq_pos(rq);
1658 1673
1659 ti = dm_table_find_target(map, pos); 1674 ti = dm_table_find_target(map, pos);
1660 BUG_ON(!dm_target_is_valid(ti)); 1675 if (!dm_target_is_valid(ti)) {
1676 /*
1677 * Must perform setup, that dm_done() requires,
1678 * before calling dm_kill_unmapped_request
1679 */
1680 DMERR_LIMIT("request attempted access beyond the end of device");
1681 clone = dm_start_request(md, rq);
1682 dm_kill_unmapped_request(clone, -EIO);
1683 continue;
1684 }
1661 1685
1662 if (ti->type->busy && ti->type->busy(ti)) 1686 if (ti->type->busy && ti->type->busy(ti))
1663 goto delay_and_out; 1687 goto delay_and_out;
1664 1688
1665 blk_start_request(rq); 1689 clone = dm_start_request(md, rq);
1666 clone = rq->special;
1667 atomic_inc(&md->pending[rq_data_dir(clone)]);
1668 1690
1669 spin_unlock(q->queue_lock); 1691 spin_unlock(q->queue_lock);
1670 if (map_request(ti, clone, md)) 1692 if (map_request(ti, clone, md))
@@ -1684,8 +1706,6 @@ delay_and_out:
1684 blk_delay_queue(q, HZ / 10); 1706 blk_delay_queue(q, HZ / 10);
1685out: 1707out:
1686 dm_table_put(map); 1708 dm_table_put(map);
1687
1688 return;
1689} 1709}
1690 1710
1691int dm_underlying_device_busy(struct request_queue *q) 1711int dm_underlying_device_busy(struct request_queue *q)
@@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md)
2409 */ 2429 */
2410struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2430struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2411{ 2431{
2412 struct dm_table *map = ERR_PTR(-EINVAL); 2432 struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
2413 struct queue_limits limits; 2433 struct queue_limits limits;
2414 int r; 2434 int r;
2415 2435
@@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2419 if (!dm_suspended_md(md)) 2439 if (!dm_suspended_md(md))
2420 goto out; 2440 goto out;
2421 2441
2442 /*
2443 * If the new table has no data devices, retain the existing limits.
2444 * This helps multipath with queue_if_no_path if all paths disappear,
2445 * then new I/O is queued based on these limits, and then some paths
2446 * reappear.
2447 */
2448 if (dm_table_has_no_data_devices(table)) {
2449 live_map = dm_get_live_table(md);
2450 if (live_map)
2451 limits = md->queue->limits;
2452 dm_table_put(live_map);
2453 }
2454
2422 r = dm_calculate_queue_limits(table, &limits); 2455 r = dm_calculate_queue_limits(table, &limits);
2423 if (r) { 2456 if (r) {
2424 map = ERR_PTR(r); 2457 map = ERR_PTR(r);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 52eef493d26..6a99fefaa74 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t,
54 void (*fn)(void *), void *context); 54 void (*fn)(void *), void *context);
55struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); 55struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
56struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 56struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
57bool dm_table_has_no_data_devices(struct dm_table *table);
57int dm_calculate_queue_limits(struct dm_table *table, 58int dm_calculate_queue_limits(struct dm_table *table,
58 struct queue_limits *limits); 59 struct queue_limits *limits);
59void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 60void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c2eb38f3c5..0138a727c1f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1512,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
1512 do { 1512 do {
1513 int n = conf->copies; 1513 int n = conf->copies;
1514 int cnt = 0; 1514 int cnt = 0;
1515 int this = first;
1515 while (n--) { 1516 while (n--) {
1516 if (conf->mirrors[first].rdev && 1517 if (conf->mirrors[this].rdev &&
1517 first != ignore) 1518 this != ignore)
1518 cnt++; 1519 cnt++;
1519 first = (first+1) % geo->raid_disks; 1520 this = (this+1) % geo->raid_disks;
1520 } 1521 }
1521 if (cnt == 0) 1522 if (cnt == 0)
1522 return 0; 1523 return 0;
1524 first = (first + geo->near_copies) % geo->raid_disks;
1523 } while (first != 0); 1525 } while (first != 0);
1524 return 1; 1526 return 1;
1525} 1527}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7031b865b3a..0689173fd9f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1591,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1591 #ifdef CONFIG_MULTICORE_RAID456 1591 #ifdef CONFIG_MULTICORE_RAID456
1592 init_waitqueue_head(&nsh->ops.wait_for_ops); 1592 init_waitqueue_head(&nsh->ops.wait_for_ops);
1593 #endif 1593 #endif
1594 spin_lock_init(&nsh->stripe_lock);
1594 1595
1595 list_add(&nsh->lru, &newstripes); 1596 list_add(&nsh->lru, &newstripes);
1596 } 1597 }
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482bec57..a6e74514e66 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
1123} 1123}
1124#endif 1124#endif
1125 1125
1126static inline unsigned long get_vm_size(struct vm_area_struct *vma)
1127{
1128 return vma->vm_end - vma->vm_start;
1129}
1130
1131static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
1132{
1133 return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
1134}
1135
1136/*
1137 * Set a new vm offset.
1138 *
1139 * Verify that the incoming offset really works as a page offset,
1140 * and that the offset and size fit in a resource_size_t.
1141 */
1142static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
1143{
1144 pgoff_t pgoff = off >> PAGE_SHIFT;
1145 if (off != (resource_size_t) pgoff << PAGE_SHIFT)
1146 return -EINVAL;
1147 if (off + get_vm_size(vma) - 1 < off)
1148 return -EINVAL;
1149 vma->vm_pgoff = pgoff;
1150 return 0;
1151}
1152
1126/* 1153/*
1127 * set up a mapping for shared memory segments 1154 * set up a mapping for shared memory segments
1128 */ 1155 */
@@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1132 struct mtd_file_info *mfi = file->private_data; 1159 struct mtd_file_info *mfi = file->private_data;
1133 struct mtd_info *mtd = mfi->mtd; 1160 struct mtd_info *mtd = mfi->mtd;
1134 struct map_info *map = mtd->priv; 1161 struct map_info *map = mtd->priv;
1135 unsigned long start; 1162 resource_size_t start, off;
1136 unsigned long off; 1163 unsigned long len, vma_len;
1137 u32 len;
1138 1164
1139 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1165 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
1140 off = vma->vm_pgoff << PAGE_SHIFT; 1166 off = get_vm_offset(vma);
1141 start = map->phys; 1167 start = map->phys;
1142 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1168 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
1143 start &= PAGE_MASK; 1169 start &= PAGE_MASK;
1144 if ((vma->vm_end - vma->vm_start + off) > len) 1170 vma_len = get_vm_size(vma);
1171
1172 /* Overflow in off+len? */
1173 if (vma_len + off < off)
1174 return -EINVAL;
1175 /* Does it fit in the mapping? */
1176 if (vma_len + off > len)
1145 return -EINVAL; 1177 return -EINVAL;
1146 1178
1147 off += start; 1179 off += start;
1148 vma->vm_pgoff = off >> PAGE_SHIFT; 1180 /* Did that overflow? */
1181 if (off < start)
1182 return -EINVAL;
1183 if (set_vm_offset(vma, off) < 0)
1184 return -EINVAL;
1149 vma->vm_flags |= VM_IO | VM_RESERVED; 1185 vma->vm_flags |= VM_IO | VM_RESERVED;
1150 1186
1151#ifdef pgprot_noncached 1187#ifdef pgprot_noncached
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 79cebd8525c..e48312f2305 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8564,7 +8564,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8564 return 0; 8564 return 0;
8565 8565
8566error: 8566error:
8567 iounmap(bp->regview); 8567 pci_iounmap(pdev, bp->regview);
8568 pci_release_regions(pdev); 8568 pci_release_regions(pdev);
8569 pci_disable_device(pdev); 8569 pci_disable_device(pdev);
8570 pci_set_drvdata(pdev, NULL); 8570 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index c42bbb16cda..a688a2ddcfd 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -722,10 +722,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
722 octeon_mgmt_adjust_link, 0, 722 octeon_mgmt_adjust_link, 0,
723 PHY_INTERFACE_MODE_MII); 723 PHY_INTERFACE_MODE_MII);
724 724
725 if (IS_ERR(p->phydev)) { 725 if (!p->phydev)
726 p->phydev = NULL;
727 return -1; 726 return -1;
728 }
729 727
730 phy_start_aneg(p->phydev); 728 phy_start_aneg(p->phydev);
731 729
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index e559dfa06d6..6fa74d530e4 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
1101 phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, 1101 phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
1102 PHY_INTERFACE_MODE_SGMII); 1102 PHY_INTERFACE_MODE_SGMII);
1103 1103
1104 if (IS_ERR(phydev)) { 1104 if (!phydev) {
1105 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); 1105 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
1106 return PTR_ERR(phydev); 1106 return -ENODEV;
1107 } 1107 }
1108 1108
1109 mac->phydev = phydev; 1109 mac->phydev = phydev;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b8ead696141..2a179d08720 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -15,7 +15,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
15 15
16 do { 16 do {
17 /* give atleast 1ms for firmware to respond */ 17 /* give atleast 1ms for firmware to respond */
18 msleep(1); 18 mdelay(1);
19 19
20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
21 return QLCNIC_CDRP_RSP_TIMEOUT; 21 return QLCNIC_CDRP_RSP_TIMEOUT;
@@ -601,7 +601,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
601 qlcnic_fw_cmd_destroy_tx_ctx(adapter); 601 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
602 602
603 /* Allow dma queues to drain after context reset */ 603 /* Allow dma queues to drain after context reset */
604 msleep(20); 604 mdelay(20);
605 } 605 }
606} 606}
607 607
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 2346b38b983..799789518e8 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -229,3 +229,5 @@ static void __exit bcm87xx_exit(void)
229 ARRAY_SIZE(bcm87xx_driver)); 229 ARRAY_SIZE(bcm87xx_driver));
230} 230}
231module_exit(bcm87xx_exit); 231module_exit(bcm87xx_exit);
232
233MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf287e0eb40..2165d5fdb8c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -21,6 +21,12 @@
21#include <linux/phy.h> 21#include <linux/phy.h>
22#include <linux/micrel_phy.h> 22#include <linux/micrel_phy.h>
23 23
24/* Operation Mode Strap Override */
25#define MII_KSZPHY_OMSO 0x16
26#define KSZPHY_OMSO_B_CAST_OFF (1 << 9)
27#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1)
28#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0)
29
24/* general Interrupt control/status reg in vendor specific block. */ 30/* general Interrupt control/status reg in vendor specific block. */
25#define MII_KSZPHY_INTCS 0x1B 31#define MII_KSZPHY_INTCS 0x1B
26#define KSZPHY_INTCS_JABBER (1 << 15) 32#define KSZPHY_INTCS_JABBER (1 << 15)
@@ -101,6 +107,13 @@ static int kszphy_config_init(struct phy_device *phydev)
101 return 0; 107 return 0;
102} 108}
103 109
110static int ksz8021_config_init(struct phy_device *phydev)
111{
112 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
113 phy_write(phydev, MII_KSZPHY_OMSO, val);
114 return 0;
115}
116
104static int ks8051_config_init(struct phy_device *phydev) 117static int ks8051_config_init(struct phy_device *phydev)
105{ 118{
106 int regval; 119 int regval;
@@ -128,9 +141,22 @@ static struct phy_driver ksphy_driver[] = {
128 .config_intr = ks8737_config_intr, 141 .config_intr = ks8737_config_intr,
129 .driver = { .owner = THIS_MODULE,}, 142 .driver = { .owner = THIS_MODULE,},
130}, { 143}, {
131 .phy_id = PHY_ID_KS8041, 144 .phy_id = PHY_ID_KSZ8021,
145 .phy_id_mask = 0x00ffffff,
146 .name = "Micrel KSZ8021",
147 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
148 SUPPORTED_Asym_Pause),
149 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
150 .config_init = ksz8021_config_init,
151 .config_aneg = genphy_config_aneg,
152 .read_status = genphy_read_status,
153 .ack_interrupt = kszphy_ack_interrupt,
154 .config_intr = kszphy_config_intr,
155 .driver = { .owner = THIS_MODULE,},
156}, {
157 .phy_id = PHY_ID_KSZ8041,
132 .phy_id_mask = 0x00fffff0, 158 .phy_id_mask = 0x00fffff0,
133 .name = "Micrel KS8041", 159 .name = "Micrel KSZ8041",
134 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 160 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
135 | SUPPORTED_Asym_Pause), 161 | SUPPORTED_Asym_Pause),
136 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 162 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -141,9 +167,9 @@ static struct phy_driver ksphy_driver[] = {
141 .config_intr = kszphy_config_intr, 167 .config_intr = kszphy_config_intr,
142 .driver = { .owner = THIS_MODULE,}, 168 .driver = { .owner = THIS_MODULE,},
143}, { 169}, {
144 .phy_id = PHY_ID_KS8051, 170 .phy_id = PHY_ID_KSZ8051,
145 .phy_id_mask = 0x00fffff0, 171 .phy_id_mask = 0x00fffff0,
146 .name = "Micrel KS8051", 172 .name = "Micrel KSZ8051",
147 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 173 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
148 | SUPPORTED_Asym_Pause), 174 | SUPPORTED_Asym_Pause),
149 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 175 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -154,8 +180,8 @@ static struct phy_driver ksphy_driver[] = {
154 .config_intr = kszphy_config_intr, 180 .config_intr = kszphy_config_intr,
155 .driver = { .owner = THIS_MODULE,}, 181 .driver = { .owner = THIS_MODULE,},
156}, { 182}, {
157 .phy_id = PHY_ID_KS8001, 183 .phy_id = PHY_ID_KSZ8001,
158 .name = "Micrel KS8001 or KS8721", 184 .name = "Micrel KSZ8001 or KS8721",
159 .phy_id_mask = 0x00ffffff, 185 .phy_id_mask = 0x00ffffff,
160 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), 186 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
161 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 187 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -201,10 +227,11 @@ MODULE_LICENSE("GPL");
201 227
202static struct mdio_device_id __maybe_unused micrel_tbl[] = { 228static struct mdio_device_id __maybe_unused micrel_tbl[] = {
203 { PHY_ID_KSZ9021, 0x000ffffe }, 229 { PHY_ID_KSZ9021, 0x000ffffe },
204 { PHY_ID_KS8001, 0x00ffffff }, 230 { PHY_ID_KSZ8001, 0x00ffffff },
205 { PHY_ID_KS8737, 0x00fffff0 }, 231 { PHY_ID_KS8737, 0x00fffff0 },
206 { PHY_ID_KS8041, 0x00fffff0 }, 232 { PHY_ID_KSZ8021, 0x00ffffff },
207 { PHY_ID_KS8051, 0x00fffff0 }, 233 { PHY_ID_KSZ8041, 0x00fffff0 },
234 { PHY_ID_KSZ8051, 0x00fffff0 },
208 { } 235 { }
209}; 236};
210 237
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 6d6192316b3..88e3991464e 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -56,6 +56,32 @@ static int smsc_phy_config_init(struct phy_device *phydev)
56 return smsc_phy_ack_interrupt (phydev); 56 return smsc_phy_ack_interrupt (phydev);
57} 57}
58 58
59static int lan87xx_config_init(struct phy_device *phydev)
60{
61 /*
62 * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
63 * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
64 * to a bug on the chip.
65 *
66 * When the system is powered on with the network cable being
67 * disconnected all the way until after ifconfig ethX up is
68 * issued for the LAN port with this PHY, connecting the cable
69 * afterwards does not cause LINK change detection, while the
70 * expected behavior is the Link UP being detected.
71 */
72 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
73 if (rc < 0)
74 return rc;
75
76 rc &= ~MII_LAN83C185_EDPWRDOWN;
77
78 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
79 if (rc < 0)
80 return rc;
81
82 return smsc_phy_ack_interrupt(phydev);
83}
84
59static int lan911x_config_init(struct phy_device *phydev) 85static int lan911x_config_init(struct phy_device *phydev)
60{ 86{
61 return smsc_phy_ack_interrupt(phydev); 87 return smsc_phy_ack_interrupt(phydev);
@@ -162,7 +188,7 @@ static struct phy_driver smsc_phy_driver[] = {
162 /* basic functions */ 188 /* basic functions */
163 .config_aneg = genphy_config_aneg, 189 .config_aneg = genphy_config_aneg,
164 .read_status = genphy_read_status, 190 .read_status = genphy_read_status,
165 .config_init = smsc_phy_config_init, 191 .config_init = lan87xx_config_init,
166 192
167 /* IRQ related */ 193 /* IRQ related */
168 .ack_interrupt = smsc_phy_ack_interrupt, 194 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index cbf7047decc..20f31d0d153 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock)
570 570
571 po = pppox_sk(sk); 571 po = pppox_sk(sk);
572 572
573 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 573 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
574 dev_put(po->pppoe_dev); 574 dev_put(po->pppoe_dev);
575 po->pppoe_dev = NULL; 575 po->pppoe_dev = NULL;
576 } 576 }
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 341b65dbbcd..f8cd61f449a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -848,7 +848,7 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
848} 848}
849#endif 849#endif
850 850
851static void __team_port_change_check(struct team_port *port, bool linkup); 851static void __team_port_change_port_added(struct team_port *port, bool linkup);
852 852
853static int team_port_add(struct team *team, struct net_device *port_dev) 853static int team_port_add(struct team *team, struct net_device *port_dev)
854{ 854{
@@ -948,7 +948,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
948 team_port_enable(team, port); 948 team_port_enable(team, port);
949 list_add_tail_rcu(&port->list, &team->port_list); 949 list_add_tail_rcu(&port->list, &team->port_list);
950 __team_compute_features(team); 950 __team_compute_features(team);
951 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 951 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
952 __team_options_change_check(team); 952 __team_options_change_check(team);
953 953
954 netdev_info(dev, "Port device %s added\n", portname); 954 netdev_info(dev, "Port device %s added\n", portname);
@@ -983,6 +983,8 @@ err_set_mtu:
983 return err; 983 return err;
984} 984}
985 985
986static void __team_port_change_port_removed(struct team_port *port);
987
986static int team_port_del(struct team *team, struct net_device *port_dev) 988static int team_port_del(struct team *team, struct net_device *port_dev)
987{ 989{
988 struct net_device *dev = team->dev; 990 struct net_device *dev = team->dev;
@@ -999,8 +1001,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
999 __team_option_inst_mark_removed_port(team, port); 1001 __team_option_inst_mark_removed_port(team, port);
1000 __team_options_change_check(team); 1002 __team_options_change_check(team);
1001 __team_option_inst_del_port(team, port); 1003 __team_option_inst_del_port(team, port);
1002 port->removed = true; 1004 __team_port_change_port_removed(port);
1003 __team_port_change_check(port, false);
1004 team_port_disable(team, port); 1005 team_port_disable(team, port);
1005 list_del_rcu(&port->list); 1006 list_del_rcu(&port->list);
1006 netdev_rx_handler_unregister(port_dev); 1007 netdev_rx_handler_unregister(port_dev);
@@ -1652,8 +1653,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1652 1653
1653 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 1654 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1654 &team_nl_family, 0, TEAM_CMD_NOOP); 1655 &team_nl_family, 0, TEAM_CMD_NOOP);
1655 if (IS_ERR(hdr)) { 1656 if (!hdr) {
1656 err = PTR_ERR(hdr); 1657 err = -EMSGSIZE;
1657 goto err_msg_put; 1658 goto err_msg_put;
1658 } 1659 }
1659 1660
@@ -1847,8 +1848,8 @@ start_again:
1847 1848
1848 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, 1849 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1849 TEAM_CMD_OPTIONS_GET); 1850 TEAM_CMD_OPTIONS_GET);
1850 if (IS_ERR(hdr)) 1851 if (!hdr)
1851 return PTR_ERR(hdr); 1852 return -EMSGSIZE;
1852 1853
1853 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 1854 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1854 goto nla_put_failure; 1855 goto nla_put_failure;
@@ -2067,8 +2068,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
2067 2068
2068 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 2069 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
2069 TEAM_CMD_PORT_LIST_GET); 2070 TEAM_CMD_PORT_LIST_GET);
2070 if (IS_ERR(hdr)) 2071 if (!hdr)
2071 return PTR_ERR(hdr); 2072 return -EMSGSIZE;
2072 2073
2073 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2074 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2074 goto nla_put_failure; 2075 goto nla_put_failure;
@@ -2251,13 +2252,11 @@ static void __team_options_change_check(struct team *team)
2251} 2252}
2252 2253
2253/* rtnl lock is held */ 2254/* rtnl lock is held */
2254static void __team_port_change_check(struct team_port *port, bool linkup) 2255
2256static void __team_port_change_send(struct team_port *port, bool linkup)
2255{ 2257{
2256 int err; 2258 int err;
2257 2259
2258 if (!port->removed && port->state.linkup == linkup)
2259 return;
2260
2261 port->changed = true; 2260 port->changed = true;
2262 port->state.linkup = linkup; 2261 port->state.linkup = linkup;
2263 team_refresh_port_linkup(port); 2262 team_refresh_port_linkup(port);
@@ -2282,6 +2281,23 @@ send_event:
2282 2281
2283} 2282}
2284 2283
2284static void __team_port_change_check(struct team_port *port, bool linkup)
2285{
2286 if (port->state.linkup != linkup)
2287 __team_port_change_send(port, linkup);
2288}
2289
2290static void __team_port_change_port_added(struct team_port *port, bool linkup)
2291{
2292 __team_port_change_send(port, linkup);
2293}
2294
2295static void __team_port_change_port_removed(struct team_port *port)
2296{
2297 port->removed = true;
2298 __team_port_change_send(port, false);
2299}
2300
2285static void team_port_change_check(struct team_port *port, bool linkup) 2301static void team_port_change_check(struct team_port *port, bool linkup)
2286{ 2302{
2287 struct team *team = port->team; 2303 struct team *team = port->team;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f5ab6e613ec..376143e8a1a 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1253,6 +1253,7 @@ static struct usb_driver smsc75xx_driver = {
1253 .probe = usbnet_probe, 1253 .probe = usbnet_probe,
1254 .suspend = usbnet_suspend, 1254 .suspend = usbnet_suspend,
1255 .resume = usbnet_resume, 1255 .resume = usbnet_resume,
1256 .reset_resume = usbnet_resume,
1256 .disconnect = usbnet_disconnect, 1257 .disconnect = usbnet_disconnect,
1257 .disable_hub_initiated_lpm = 1, 1258 .disable_hub_initiated_lpm = 1,
1258}; 1259};
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 1e86ea2266d..dbeebef562d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1442,6 +1442,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1442 return err; 1442 return err;
1443 1443
1444err_free_irq: 1444err_free_irq:
1445 trans_pcie->irq_requested = false;
1445 free_irq(trans_pcie->irq, trans); 1446 free_irq(trans_pcie->irq, trans);
1446error: 1447error:
1447 iwl_free_isr_ict(trans); 1448 iwl_free_isr_ict(trans);
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c
index a3ac39b7919..0646bf6e788 100644
--- a/drivers/sh/pfc/pinctrl.c
+++ b/drivers/sh/pfc/pinctrl.c
@@ -208,6 +208,8 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
208 208
209 break; 209 break;
210 case PINMUX_TYPE_GPIO: 210 case PINMUX_TYPE_GPIO:
211 case PINMUX_TYPE_INPUT:
212 case PINMUX_TYPE_OUTPUT:
211 break; 213 break;
212 default: 214 default:
213 pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type); 215 pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index d9569658476..3440812b4a8 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
624 /* print devices for all busses */ 624 /* print devices for all busses */
625 list_for_each_entry(bus, &usb_bus_list, bus_list) { 625 list_for_each_entry(bus, &usb_bus_list, bus_list) {
626 /* recurse through all children of the root hub */ 626 /* recurse through all children of the root hub */
627 if (!bus->root_hub) 627 if (!bus_to_hcd(bus)->rh_registered)
628 continue; 628 continue;
629 usb_lock_device(bus->root_hub); 629 usb_lock_device(bus->root_hub);
630 ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, 630 ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index bc84106ac05..75ba2091f9b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
1011 if (retval) { 1011 if (retval) {
1012 dev_err (parent_dev, "can't register root hub for %s, %d\n", 1012 dev_err (parent_dev, "can't register root hub for %s, %d\n",
1013 dev_name(&usb_dev->dev), retval); 1013 dev_name(&usb_dev->dev), retval);
1014 } 1014 } else {
1015 mutex_unlock(&usb_bus_list_lock);
1016
1017 if (retval == 0) {
1018 spin_lock_irq (&hcd_root_hub_lock); 1015 spin_lock_irq (&hcd_root_hub_lock);
1019 hcd->rh_registered = 1; 1016 hcd->rh_registered = 1;
1020 spin_unlock_irq (&hcd_root_hub_lock); 1017 spin_unlock_irq (&hcd_root_hub_lock);
@@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd)
1023 if (HCD_DEAD(hcd)) 1020 if (HCD_DEAD(hcd))
1024 usb_hc_died (hcd); /* This time clean up */ 1021 usb_hc_died (hcd); /* This time clean up */
1025 } 1022 }
1023 mutex_unlock(&usb_bus_list_lock);
1026 1024
1027 return retval; 1025 return retval;
1028} 1026}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index aaa8d2bce21..0bf72f943b0 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
467 /* From the GPIO notifying the over-current situation, find 467 /* From the GPIO notifying the over-current situation, find
468 * out the corresponding port */ 468 * out the corresponding port */
469 at91_for_each_port(port) { 469 at91_for_each_port(port) {
470 if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { 470 if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
471 gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
471 gpio = pdata->overcurrent_pin[port]; 472 gpio = pdata->overcurrent_pin[port];
472 break; 473 break;
473 } 474 }
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 211a4920b88..d8dedc7d391 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -76,9 +76,24 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
76 schedule_work(&virqfd->inject); 76 schedule_work(&virqfd->inject);
77 } 77 }
78 78
79 if (flags & POLLHUP) 79 if (flags & POLLHUP) {
80 /* The eventfd is closing, detach from VFIO */ 80 unsigned long flags;
81 virqfd_deactivate(virqfd); 81 spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
82
83 /*
84 * The eventfd is closing, if the virqfd has not yet been
85 * queued for release, as determined by testing whether the
86 * vdev pointer to it is still valid, queue it now. As
87 * with kvm irqfds, we know we won't race against the virqfd
88 * going away because we hold wqh->lock to get here.
89 */
90 if (*(virqfd->pvirqfd) == virqfd) {
91 *(virqfd->pvirqfd) = NULL;
92 virqfd_deactivate(virqfd);
93 }
94
95 spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
96 }
82 97
83 return 0; 98 return 0;
84} 99}
@@ -93,7 +108,6 @@ static void virqfd_ptable_queue_proc(struct file *file,
93static void virqfd_shutdown(struct work_struct *work) 108static void virqfd_shutdown(struct work_struct *work)
94{ 109{
95 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); 110 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
96 struct virqfd **pvirqfd = virqfd->pvirqfd;
97 u64 cnt; 111 u64 cnt;
98 112
99 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); 113 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
@@ -101,7 +115,6 @@ static void virqfd_shutdown(struct work_struct *work)
101 eventfd_ctx_put(virqfd->eventfd); 115 eventfd_ctx_put(virqfd->eventfd);
102 116
103 kfree(virqfd); 117 kfree(virqfd);
104 *pvirqfd = NULL;
105} 118}
106 119
107static void virqfd_inject(struct work_struct *work) 120static void virqfd_inject(struct work_struct *work)
@@ -122,15 +135,11 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
122 int ret = 0; 135 int ret = 0;
123 unsigned int events; 136 unsigned int events;
124 137
125 if (*pvirqfd)
126 return -EBUSY;
127
128 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); 138 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
129 if (!virqfd) 139 if (!virqfd)
130 return -ENOMEM; 140 return -ENOMEM;
131 141
132 virqfd->pvirqfd = pvirqfd; 142 virqfd->pvirqfd = pvirqfd;
133 *pvirqfd = virqfd;
134 virqfd->vdev = vdev; 143 virqfd->vdev = vdev;
135 virqfd->handler = handler; 144 virqfd->handler = handler;
136 virqfd->thread = thread; 145 virqfd->thread = thread;
@@ -154,6 +163,23 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
154 virqfd->eventfd = ctx; 163 virqfd->eventfd = ctx;
155 164
156 /* 165 /*
166 * virqfds can be released by closing the eventfd or directly
167 * through ioctl. These are both done through a workqueue, so
168 * we update the pointer to the virqfd under lock to avoid
169 * pushing multiple jobs to release the same virqfd.
170 */
171 spin_lock_irq(&vdev->irqlock);
172
173 if (*pvirqfd) {
174 spin_unlock_irq(&vdev->irqlock);
175 ret = -EBUSY;
176 goto fail;
177 }
178 *pvirqfd = virqfd;
179
180 spin_unlock_irq(&vdev->irqlock);
181
182 /*
157 * Install our own custom wake-up handling so we are notified via 183 * Install our own custom wake-up handling so we are notified via
158 * a callback whenever someone signals the underlying eventfd. 184 * a callback whenever someone signals the underlying eventfd.
159 */ 185 */
@@ -187,19 +213,29 @@ fail:
187 fput(file); 213 fput(file);
188 214
189 kfree(virqfd); 215 kfree(virqfd);
190 *pvirqfd = NULL;
191 216
192 return ret; 217 return ret;
193} 218}
194 219
195static void virqfd_disable(struct virqfd *virqfd) 220static void virqfd_disable(struct vfio_pci_device *vdev,
221 struct virqfd **pvirqfd)
196{ 222{
197 if (!virqfd) 223 unsigned long flags;
198 return; 224
225 spin_lock_irqsave(&vdev->irqlock, flags);
226
227 if (*pvirqfd) {
228 virqfd_deactivate(*pvirqfd);
229 *pvirqfd = NULL;
230 }
199 231
200 virqfd_deactivate(virqfd); 232 spin_unlock_irqrestore(&vdev->irqlock, flags);
201 233
202 /* Block until we know all outstanding shutdown jobs have completed. */ 234 /*
235 * Block until we know all outstanding shutdown jobs have completed.
236 * Even if we don't queue the job, flush the wq to be sure it's
237 * been released.
238 */
203 flush_workqueue(vfio_irqfd_cleanup_wq); 239 flush_workqueue(vfio_irqfd_cleanup_wq);
204} 240}
205 241
@@ -392,8 +428,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
392static void vfio_intx_disable(struct vfio_pci_device *vdev) 428static void vfio_intx_disable(struct vfio_pci_device *vdev)
393{ 429{
394 vfio_intx_set_signal(vdev, -1); 430 vfio_intx_set_signal(vdev, -1);
395 virqfd_disable(vdev->ctx[0].unmask); 431 virqfd_disable(vdev, &vdev->ctx[0].unmask);
396 virqfd_disable(vdev->ctx[0].mask); 432 virqfd_disable(vdev, &vdev->ctx[0].mask);
397 vdev->irq_type = VFIO_PCI_NUM_IRQS; 433 vdev->irq_type = VFIO_PCI_NUM_IRQS;
398 vdev->num_ctx = 0; 434 vdev->num_ctx = 0;
399 kfree(vdev->ctx); 435 kfree(vdev->ctx);
@@ -539,8 +575,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
539 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); 575 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
540 576
541 for (i = 0; i < vdev->num_ctx; i++) { 577 for (i = 0; i < vdev->num_ctx; i++) {
542 virqfd_disable(vdev->ctx[i].unmask); 578 virqfd_disable(vdev, &vdev->ctx[i].unmask);
543 virqfd_disable(vdev->ctx[i].mask); 579 virqfd_disable(vdev, &vdev->ctx[i].mask);
544 } 580 }
545 581
546 if (msix) { 582 if (msix) {
@@ -577,7 +613,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
577 vfio_send_intx_eventfd, NULL, 613 vfio_send_intx_eventfd, NULL,
578 &vdev->ctx[0].unmask, fd); 614 &vdev->ctx[0].unmask, fd);
579 615
580 virqfd_disable(vdev->ctx[0].unmask); 616 virqfd_disable(vdev, &vdev->ctx[0].unmask);
581 } 617 }
582 618
583 return 0; 619 return 0;