diff options
28 files changed, 283 insertions, 111 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 557a3ed9e244..b4ca9552d6fb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4411,6 +4411,7 @@ K: fmc_d.*register | |||
4411 | 4411 | ||
4412 | FPGA MANAGER FRAMEWORK | 4412 | FPGA MANAGER FRAMEWORK |
4413 | M: Alan Tull <atull@opensource.altera.com> | 4413 | M: Alan Tull <atull@opensource.altera.com> |
4414 | R: Moritz Fischer <moritz.fischer@ettus.com> | ||
4414 | S: Maintained | 4415 | S: Maintained |
4415 | F: drivers/fpga/ | 4416 | F: drivers/fpga/ |
4416 | F: include/linux/fpga/fpga-mgr.h | 4417 | F: include/linux/fpga/fpga-mgr.h |
@@ -7904,6 +7905,18 @@ S: Maintained | |||
7904 | F: net/openvswitch/ | 7905 | F: net/openvswitch/ |
7905 | F: include/uapi/linux/openvswitch.h | 7906 | F: include/uapi/linux/openvswitch.h |
7906 | 7907 | ||
7908 | OPERATING PERFORMANCE POINTS (OPP) | ||
7909 | M: Viresh Kumar <vireshk@kernel.org> | ||
7910 | M: Nishanth Menon <nm@ti.com> | ||
7911 | M: Stephen Boyd <sboyd@codeaurora.org> | ||
7912 | L: linux-pm@vger.kernel.org | ||
7913 | S: Maintained | ||
7914 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git | ||
7915 | F: drivers/base/power/opp/ | ||
7916 | F: include/linux/pm_opp.h | ||
7917 | F: Documentation/power/opp.txt | ||
7918 | F: Documentation/devicetree/bindings/opp/ | ||
7919 | |||
7907 | OPL4 DRIVER | 7920 | OPL4 DRIVER |
7908 | M: Clemens Ladisch <clemens@ladisch.de> | 7921 | M: Clemens Ladisch <clemens@ladisch.de> |
7909 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 7922 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 35759a91d47d..e8f847226a19 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c | |||
@@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev, | |||
1992 | (unsigned long long)pci_resource_start(pci_dev, 0)); | 1992 | (unsigned long long)pci_resource_start(pci_dev, 0)); |
1993 | 1993 | ||
1994 | pci_set_master(pci_dev); | 1994 | pci_set_master(pci_dev); |
1995 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1995 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
1996 | if (err) { | ||
1996 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1997 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
1997 | err = -EIO; | ||
1998 | goto fail_context; | 1998 | goto fail_context; |
1999 | } | 1999 | } |
2000 | 2000 | ||
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index dbc695f32760..0042803a9de7 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c | |||
@@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev, | |||
1319 | dev->pci_lat, (unsigned long long)dev->base_io_addr); | 1319 | dev->pci_lat, (unsigned long long)dev->base_io_addr); |
1320 | 1320 | ||
1321 | pci_set_master(pci_dev); | 1321 | pci_set_master(pci_dev); |
1322 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1322 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
1323 | if (err) { | ||
1323 | pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1324 | pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
1324 | err = -EIO; | 1325 | err = -EIO; |
1325 | goto fail_irq; | 1326 | goto fail_irq; |
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 0ed1b6530374..1b5268f9bb24 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c | |||
@@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci, | |||
890 | return err; | 890 | return err; |
891 | } | 891 | } |
892 | 892 | ||
893 | if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { | 893 | err = pci_set_dma_mask(pci,DMA_BIT_MASK(32)); |
894 | if (err) { | ||
894 | dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); | 895 | dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); |
895 | err = -EIO; | ||
896 | cx88_core_put(core, pci); | 896 | cx88_core_put(core, pci); |
897 | return err; | 897 | return err; |
898 | } | 898 | } |
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 9db7767d1fe0..f34c229f9b37 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c | |||
@@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev) | |||
393 | if (pci_enable_device(dev->pci)) | 393 | if (pci_enable_device(dev->pci)) |
394 | return -EIO; | 394 | return -EIO; |
395 | pci_set_master(dev->pci); | 395 | pci_set_master(dev->pci); |
396 | if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { | 396 | err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32)); |
397 | if (err) { | ||
397 | printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); | 398 | printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); |
398 | return -EIO; | 399 | return -EIO; |
399 | } | 400 | } |
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 0de1ad5a977d..aef9acf351f6 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c | |||
@@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev, | |||
1314 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); | 1314 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); |
1315 | 1315 | ||
1316 | pci_set_master(pci_dev); | 1316 | pci_set_master(pci_dev); |
1317 | if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { | 1317 | err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32)); |
1318 | if (err) { | ||
1318 | printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); | 1319 | printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); |
1319 | err = -EIO; | ||
1320 | goto fail_core; | 1320 | goto fail_core; |
1321 | } | 1321 | } |
1322 | dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); | 1322 | dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); |
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 60b2d462f98d..3fdbd81b5580 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c | |||
@@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, | |||
810 | "%s(): board vendor 0x%x, revision 0x%x\n", | 810 | "%s(): board vendor 0x%x, revision 0x%x\n", |
811 | __func__, board_vendor, board_revision); | 811 | __func__, board_vendor, board_revision); |
812 | pci_set_master(pci_dev); | 812 | pci_set_master(pci_dev); |
813 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 813 | if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) { |
814 | dev_err(&pci_dev->dev, | 814 | dev_err(&pci_dev->dev, |
815 | "%s(): 32bit PCI DMA is not supported\n", __func__); | 815 | "%s(): 32bit PCI DMA is not supported\n", __func__); |
816 | goto pci_detect_err; | 816 | goto pci_detect_err; |
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index e79d63eb774e..f720cea80e28 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c | |||
@@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev, | |||
951 | pci_name(pci_dev), dev->pci_rev, pci_dev->irq, | 951 | pci_name(pci_dev), dev->pci_rev, pci_dev->irq, |
952 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); | 952 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); |
953 | pci_set_master(pci_dev); | 953 | pci_set_master(pci_dev); |
954 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | 954 | err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
955 | if (err) { | ||
955 | pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); | 956 | pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); |
956 | err = -EIO; | ||
957 | goto fail1; | 957 | goto fail1; |
958 | } | 958 | } |
959 | 959 | ||
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 8f36b48ef733..8bbd092fbe1d 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c | |||
@@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev, | |||
1264 | 1264 | ||
1265 | pci_set_master(pci_dev); | 1265 | pci_set_master(pci_dev); |
1266 | /* TODO */ | 1266 | /* TODO */ |
1267 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1267 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
1268 | if (err) { | ||
1268 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1269 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
1269 | err = -EIO; | ||
1270 | goto fail_irq; | 1270 | goto fail_irq; |
1271 | } | 1271 | } |
1272 | 1272 | ||
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c index 8c5655d351d3..4e77618fbb2b 100644 --- a/drivers/media/pci/tw68/tw68-core.c +++ b/drivers/media/pci/tw68/tw68-core.c | |||
@@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev, | |||
257 | dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, | 257 | dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, |
258 | dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); | 258 | dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); |
259 | pci_set_master(pci_dev); | 259 | pci_set_master(pci_dev); |
260 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | 260 | err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
261 | if (err) { | ||
261 | pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); | 262 | pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); |
262 | err = -EIO; | ||
263 | goto fail1; | 263 | goto fail1; |
264 | } | 264 | } |
265 | 265 | ||
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index e2afabf3a465..7ccebae9cb48 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
@@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1500 | return -ENODEV; | 1500 | return -ENODEV; |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { | 1503 | err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); |
1504 | if (err) { | ||
1504 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1505 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1505 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); | 1506 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); |
1506 | return -ENODEV; | 1507 | return err; |
1507 | } | 1508 | } |
1508 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { | 1509 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { |
1509 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1510 | if (pcnet32_debug & NETIF_MSG_PROBE) |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index c81ce7f200a6..a7a1b218f308 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = { | |||
1636 | .iterate = configfs_readdir, | 1636 | .iterate = configfs_readdir, |
1637 | }; | 1637 | }; |
1638 | 1638 | ||
1639 | /** | ||
1640 | * configfs_register_group - creates a parent-child relation between two groups | ||
1641 | * @parent_group: parent group | ||
1642 | * @group: child group | ||
1643 | * | ||
1644 | * link groups, creates dentry for the child and attaches it to the | ||
1645 | * parent dentry. | ||
1646 | * | ||
1647 | * Return: 0 on success, negative errno code on error | ||
1648 | */ | ||
1649 | int configfs_register_group(struct config_group *parent_group, | ||
1650 | struct config_group *group) | ||
1651 | { | ||
1652 | struct configfs_subsystem *subsys = parent_group->cg_subsys; | ||
1653 | struct dentry *parent; | ||
1654 | int ret; | ||
1655 | |||
1656 | mutex_lock(&subsys->su_mutex); | ||
1657 | link_group(parent_group, group); | ||
1658 | mutex_unlock(&subsys->su_mutex); | ||
1659 | |||
1660 | parent = parent_group->cg_item.ci_dentry; | ||
1661 | |||
1662 | mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); | ||
1663 | ret = create_default_group(parent_group, group); | ||
1664 | if (!ret) { | ||
1665 | spin_lock(&configfs_dirent_lock); | ||
1666 | configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); | ||
1667 | spin_unlock(&configfs_dirent_lock); | ||
1668 | } | ||
1669 | mutex_unlock(&d_inode(parent)->i_mutex); | ||
1670 | return ret; | ||
1671 | } | ||
1672 | EXPORT_SYMBOL(configfs_register_group); | ||
1673 | |||
1674 | /** | ||
1675 | * configfs_unregister_group() - unregisters a child group from its parent | ||
1676 | * @group: parent group to be unregistered | ||
1677 | * | ||
1678 | * Undoes configfs_register_group() | ||
1679 | */ | ||
1680 | void configfs_unregister_group(struct config_group *group) | ||
1681 | { | ||
1682 | struct configfs_subsystem *subsys = group->cg_subsys; | ||
1683 | struct dentry *dentry = group->cg_item.ci_dentry; | ||
1684 | struct dentry *parent = group->cg_item.ci_parent->ci_dentry; | ||
1685 | |||
1686 | mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); | ||
1687 | spin_lock(&configfs_dirent_lock); | ||
1688 | configfs_detach_prep(dentry, NULL); | ||
1689 | spin_unlock(&configfs_dirent_lock); | ||
1690 | |||
1691 | configfs_detach_group(&group->cg_item); | ||
1692 | d_inode(dentry)->i_flags |= S_DEAD; | ||
1693 | dont_mount(dentry); | ||
1694 | d_delete(dentry); | ||
1695 | mutex_unlock(&d_inode(parent)->i_mutex); | ||
1696 | |||
1697 | dput(dentry); | ||
1698 | |||
1699 | mutex_lock(&subsys->su_mutex); | ||
1700 | unlink_group(group); | ||
1701 | mutex_unlock(&subsys->su_mutex); | ||
1702 | } | ||
1703 | EXPORT_SYMBOL(configfs_unregister_group); | ||
1704 | |||
1705 | /** | ||
1706 | * configfs_register_default_group() - allocates and registers a child group | ||
1707 | * @parent_group: parent group | ||
1708 | * @name: child group name | ||
1709 | * @item_type: child item type description | ||
1710 | * | ||
1711 | * boilerplate to allocate and register a child group with its parent. We need | ||
1712 | * kzalloc'ed memory because child's default_group is initially empty. | ||
1713 | * | ||
1714 | * Return: allocated config group or ERR_PTR() on error | ||
1715 | */ | ||
1716 | struct config_group * | ||
1717 | configfs_register_default_group(struct config_group *parent_group, | ||
1718 | const char *name, | ||
1719 | struct config_item_type *item_type) | ||
1720 | { | ||
1721 | int ret; | ||
1722 | struct config_group *group; | ||
1723 | |||
1724 | group = kzalloc(sizeof(*group), GFP_KERNEL); | ||
1725 | if (!group) | ||
1726 | return ERR_PTR(-ENOMEM); | ||
1727 | config_group_init_type_name(group, name, item_type); | ||
1728 | |||
1729 | ret = configfs_register_group(parent_group, group); | ||
1730 | if (ret) { | ||
1731 | kfree(group); | ||
1732 | return ERR_PTR(ret); | ||
1733 | } | ||
1734 | return group; | ||
1735 | } | ||
1736 | EXPORT_SYMBOL(configfs_register_default_group); | ||
1737 | |||
1738 | /** | ||
1739 | * configfs_unregister_default_group() - unregisters and frees a child group | ||
1740 | * @group: the group to act on | ||
1741 | */ | ||
1742 | void configfs_unregister_default_group(struct config_group *group) | ||
1743 | { | ||
1744 | configfs_unregister_group(group); | ||
1745 | kfree(group); | ||
1746 | } | ||
1747 | EXPORT_SYMBOL(configfs_unregister_default_group); | ||
1748 | |||
1639 | int configfs_register_subsystem(struct configfs_subsystem *subsys) | 1749 | int configfs_register_subsystem(struct configfs_subsystem *subsys) |
1640 | { | 1750 | { |
1641 | int err; | 1751 | int err; |
diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 4afc4d9d2e41..8b2127ffb226 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c | |||
@@ -610,9 +610,9 @@ parse_record: | |||
610 | int status = fat_parse_long(inode, &cpos, &bh, &de, | 610 | int status = fat_parse_long(inode, &cpos, &bh, &de, |
611 | &unicode, &nr_slots); | 611 | &unicode, &nr_slots); |
612 | if (status < 0) { | 612 | if (status < 0) { |
613 | ctx->pos = cpos; | 613 | bh = NULL; |
614 | ret = status; | 614 | ret = status; |
615 | goto out; | 615 | goto end_of_dir; |
616 | } else if (status == PARSE_INVALID) | 616 | } else if (status == PARSE_INVALID) |
617 | goto record_end; | 617 | goto record_end; |
618 | else if (status == PARSE_NOT_LONGNAME) | 618 | else if (status == PARSE_NOT_LONGNAME) |
@@ -654,8 +654,9 @@ parse_record: | |||
654 | fill_len = short_len; | 654 | fill_len = short_len; |
655 | 655 | ||
656 | start_filldir: | 656 | start_filldir: |
657 | if (!fake_offset) | 657 | ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); |
658 | ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); | 658 | if (fake_offset && ctx->pos < 2) |
659 | ctx->pos = 2; | ||
659 | 660 | ||
660 | if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { | 661 | if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { |
661 | if (!dir_emit_dot(file, ctx)) | 662 | if (!dir_emit_dot(file, ctx)) |
@@ -681,14 +682,19 @@ record_end: | |||
681 | fake_offset = 0; | 682 | fake_offset = 0; |
682 | ctx->pos = cpos; | 683 | ctx->pos = cpos; |
683 | goto get_new; | 684 | goto get_new; |
685 | |||
684 | end_of_dir: | 686 | end_of_dir: |
685 | ctx->pos = cpos; | 687 | if (fake_offset && cpos < 2) |
688 | ctx->pos = 2; | ||
689 | else | ||
690 | ctx->pos = cpos; | ||
686 | fill_failed: | 691 | fill_failed: |
687 | brelse(bh); | 692 | brelse(bh); |
688 | if (unicode) | 693 | if (unicode) |
689 | __putname(unicode); | 694 | __putname(unicode); |
690 | out: | 695 | out: |
691 | mutex_unlock(&sbi->s_lock); | 696 | mutex_unlock(&sbi->s_lock); |
697 | |||
692 | return ret; | 698 | return ret; |
693 | } | 699 | } |
694 | 700 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 316adb968b65..de4bdfac0cec 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page) | |||
332 | * truncation is indicated by end of range being LLONG_MAX | 332 | * truncation is indicated by end of range being LLONG_MAX |
333 | * In this case, we first scan the range and release found pages. | 333 | * In this case, we first scan the range and release found pages. |
334 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | 334 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv |
335 | * maps and global counts. | 335 | * maps and global counts. Page faults can not race with truncation |
336 | * in this routine. hugetlb_no_page() prevents page faults in the | ||
337 | * truncated range. It checks i_size before allocation, and again after | ||
338 | * with the page table lock for the page held. The same lock must be | ||
339 | * acquired to unmap a page. | ||
336 | * hole punch is indicated if end is not LLONG_MAX | 340 | * hole punch is indicated if end is not LLONG_MAX |
337 | * In the hole punch case we scan the range and release found pages. | 341 | * In the hole punch case we scan the range and release found pages. |
338 | * Only when releasing a page is the associated region/reserv map | 342 | * Only when releasing a page is the associated region/reserv map |
339 | * deleted. The region/reserv map for ranges without associated | 343 | * deleted. The region/reserv map for ranges without associated |
340 | * pages are not modified. | 344 | * pages are not modified. Page faults can race with hole punch. |
345 | * This is indicated if we find a mapped page. | ||
341 | * Note: If the passed end of range value is beyond the end of file, but | 346 | * Note: If the passed end of range value is beyond the end of file, but |
342 | * not LLONG_MAX this routine still performs a hole punch operation. | 347 | * not LLONG_MAX this routine still performs a hole punch operation. |
343 | */ | 348 | */ |
@@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
361 | next = start; | 366 | next = start; |
362 | while (next < end) { | 367 | while (next < end) { |
363 | /* | 368 | /* |
364 | * Make sure to never grab more pages that we | 369 | * Don't grab more pages than the number left in the range. |
365 | * might possibly need. | ||
366 | */ | 370 | */ |
367 | if (end - next < lookup_nr) | 371 | if (end - next < lookup_nr) |
368 | lookup_nr = end - next; | 372 | lookup_nr = end - next; |
369 | 373 | ||
370 | /* | 374 | /* |
371 | * This pagevec_lookup() may return pages past 'end', | 375 | * When no more pages are found, we are done. |
372 | * so we must check for page->index > end. | ||
373 | */ | 376 | */ |
374 | if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { | 377 | if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) |
375 | if (next == start) | 378 | break; |
376 | break; | ||
377 | next = start; | ||
378 | continue; | ||
379 | } | ||
380 | 379 | ||
381 | for (i = 0; i < pagevec_count(&pvec); ++i) { | 380 | for (i = 0; i < pagevec_count(&pvec); ++i) { |
382 | struct page *page = pvec.pages[i]; | 381 | struct page *page = pvec.pages[i]; |
383 | u32 hash; | 382 | u32 hash; |
384 | 383 | ||
384 | /* | ||
385 | * The page (index) could be beyond end. This is | ||
386 | * only possible in the punch hole case as end is | ||
387 | * max page offset in the truncate case. | ||
388 | */ | ||
389 | next = page->index; | ||
390 | if (next >= end) | ||
391 | break; | ||
392 | |||
385 | hash = hugetlb_fault_mutex_hash(h, current->mm, | 393 | hash = hugetlb_fault_mutex_hash(h, current->mm, |
386 | &pseudo_vma, | 394 | &pseudo_vma, |
387 | mapping, next, 0); | 395 | mapping, next, 0); |
388 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 396 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
389 | 397 | ||
390 | lock_page(page); | 398 | lock_page(page); |
391 | if (page->index >= end) { | 399 | if (likely(!page_mapped(page))) { |
392 | unlock_page(page); | ||
393 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | ||
394 | next = end; /* we are done */ | ||
395 | break; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * If page is mapped, it was faulted in after being | ||
400 | * unmapped. Do nothing in this race case. In the | ||
401 | * normal case page is not mapped. | ||
402 | */ | ||
403 | if (!page_mapped(page)) { | ||
404 | bool rsv_on_error = !PagePrivate(page); | 400 | bool rsv_on_error = !PagePrivate(page); |
405 | /* | 401 | /* |
406 | * We must free the huge page and remove | 402 | * We must free the huge page and remove |
@@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
421 | hugetlb_fix_reserve_counts( | 417 | hugetlb_fix_reserve_counts( |
422 | inode, rsv_on_error); | 418 | inode, rsv_on_error); |
423 | } | 419 | } |
420 | } else { | ||
421 | /* | ||
422 | * If page is mapped, it was faulted in after | ||
423 | * being unmapped. It indicates a race between | ||
424 | * hole punch and page fault. Do nothing in | ||
425 | * this case. Getting here in a truncate | ||
426 | * operation is a bug. | ||
427 | */ | ||
428 | BUG_ON(truncate_op); | ||
424 | } | 429 | } |
425 | 430 | ||
426 | if (page->index > next) | ||
427 | next = page->index; | ||
428 | |||
429 | ++next; | ||
430 | unlock_page(page); | 431 | unlock_page(page); |
431 | |||
432 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 432 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
433 | } | 433 | } |
434 | ++next; | ||
434 | huge_pagevec_release(&pvec); | 435 | huge_pagevec_release(&pvec); |
436 | cond_resched(); | ||
435 | } | 437 | } |
436 | 438 | ||
437 | if (truncate_op) | 439 | if (truncate_op) |
@@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |||
647 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | 649 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
648 | i_size_write(inode, offset + len); | 650 | i_size_write(inode, offset + len); |
649 | inode->i_ctime = CURRENT_TIME; | 651 | inode->i_ctime = CURRENT_TIME; |
650 | spin_lock(&inode->i_lock); | ||
651 | inode->i_private = NULL; | ||
652 | spin_unlock(&inode->i_lock); | ||
653 | out: | 652 | out: |
654 | mutex_unlock(&inode->i_mutex); | 653 | mutex_unlock(&inode->i_mutex); |
655 | return error; | 654 | return error; |
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 79b113048eac..0a3f9b594602 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c | |||
@@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg | |||
525 | switch (rqdata.cmd) { | 525 | switch (rqdata.cmd) { |
526 | case NCP_LOCK_EX: | 526 | case NCP_LOCK_EX: |
527 | case NCP_LOCK_SH: | 527 | case NCP_LOCK_SH: |
528 | if (rqdata.timeout < 0) | ||
529 | return -EINVAL; | ||
528 | if (rqdata.timeout == 0) | 530 | if (rqdata.timeout == 0) |
529 | rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; | 531 | rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; |
530 | else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) | 532 | else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 3b48ac25d8a7..a03f6f433075 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir, | |||
372 | mlog_errno(status); | 372 | mlog_errno(status); |
373 | goto leave; | 373 | goto leave; |
374 | } | 374 | } |
375 | /* update inode->i_mode after mask with "umask". */ | ||
376 | inode->i_mode = mode; | ||
375 | 377 | ||
376 | handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, | 378 | handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, |
377 | S_ISDIR(mode), | 379 | S_ISDIR(mode), |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index a8a335b7fce0..758a029011b1 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro | |||
197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); | 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); |
198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); | 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); |
199 | 199 | ||
200 | int configfs_register_group(struct config_group *parent_group, | ||
201 | struct config_group *group); | ||
202 | void configfs_unregister_group(struct config_group *group); | ||
203 | |||
204 | struct config_group * | ||
205 | configfs_register_default_group(struct config_group *parent_group, | ||
206 | const char *name, | ||
207 | struct config_item_type *item_type); | ||
208 | void configfs_unregister_default_group(struct config_group *group); | ||
209 | |||
200 | /* These functions can sleep and can alloc with GFP_KERNEL */ | 210 | /* These functions can sleep and can alloc with GFP_KERNEL */ |
201 | /* WARNING: These cannot be called underneath configfs callbacks!! */ | 211 | /* WARNING: These cannot be called underneath configfs callbacks!! */ |
202 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); | 212 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6523109e136d..8942af0813e3 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) | |||
271 | 271 | ||
272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) | 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
273 | { | 273 | { |
274 | return gfp_flags & __GFP_DIRECT_RECLAIM; | 274 | return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); |
275 | } | 275 | } |
276 | 276 | ||
277 | #ifdef CONFIG_HIGHMEM | 277 | #ifdef CONFIG_HIGHMEM |
diff --git a/include/linux/signal.h b/include/linux/signal.h index ab1e0392b5ac..92557bbce7e7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); | |||
239 | extern void set_current_blocked(sigset_t *); | 239 | extern void set_current_blocked(sigset_t *); |
240 | extern void __set_current_blocked(const sigset_t *); | 240 | extern void __set_current_blocked(const sigset_t *); |
241 | extern int show_unhandled_signals; | 241 | extern int show_unhandled_signals; |
242 | extern int sigsuspend(sigset_t *); | ||
243 | 242 | ||
244 | struct sigaction { | 243 | struct sigaction { |
245 | #ifndef __ARCH_HAS_IRIX_SIGACTION | 244 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 7c82e3b307a3..96940772bb92 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -158,6 +158,24 @@ size_t ksize(const void *); | |||
158 | #endif | 158 | #endif |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
162 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
163 | * aligned buffers. | ||
164 | */ | ||
165 | #ifndef ARCH_SLAB_MINALIGN | ||
166 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
167 | #endif | ||
168 | |||
169 | /* | ||
170 | * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned | ||
171 | * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN | ||
172 | * aligned pointers. | ||
173 | */ | ||
174 | #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) | ||
175 | #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) | ||
176 | #define __assume_page_alignment __assume_aligned(PAGE_SIZE) | ||
177 | |||
178 | /* | ||
161 | * Kmalloc array related definitions | 179 | * Kmalloc array related definitions |
162 | */ | 180 | */ |
163 | 181 | ||
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
286 | } | 304 | } |
287 | #endif /* !CONFIG_SLOB */ | 305 | #endif /* !CONFIG_SLOB */ |
288 | 306 | ||
289 | void *__kmalloc(size_t size, gfp_t flags); | 307 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; |
290 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | 308 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; |
291 | void kmem_cache_free(struct kmem_cache *, void *); | 309 | void kmem_cache_free(struct kmem_cache *, void *); |
292 | 310 | ||
293 | /* | 311 | /* |
@@ -301,8 +319,8 @@ void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | |||
301 | bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 319 | bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
302 | 320 | ||
303 | #ifdef CONFIG_NUMA | 321 | #ifdef CONFIG_NUMA |
304 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 322 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; |
305 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 323 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; |
306 | #else | 324 | #else |
307 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 325 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
308 | { | 326 | { |
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | |||
316 | #endif | 334 | #endif |
317 | 335 | ||
318 | #ifdef CONFIG_TRACING | 336 | #ifdef CONFIG_TRACING |
319 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | 337 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; |
320 | 338 | ||
321 | #ifdef CONFIG_NUMA | 339 | #ifdef CONFIG_NUMA |
322 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | 340 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
323 | gfp_t gfpflags, | 341 | gfp_t gfpflags, |
324 | int node, size_t size); | 342 | int node, size_t size) __assume_slab_alignment; |
325 | #else | 343 | #else |
326 | static __always_inline void * | 344 | static __always_inline void * |
327 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | 345 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
354 | } | 372 | } |
355 | #endif /* CONFIG_TRACING */ | 373 | #endif /* CONFIG_TRACING */ |
356 | 374 | ||
357 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); | 375 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
358 | 376 | ||
359 | #ifdef CONFIG_TRACING | 377 | #ifdef CONFIG_TRACING |
360 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | 378 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
361 | #else | 379 | #else |
362 | static __always_inline void * | 380 | static __always_inline void * |
363 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | 381 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
482 | return __kmalloc_node(size, flags, node); | 500 | return __kmalloc_node(size, flags, node); |
483 | } | 501 | } |
484 | 502 | ||
485 | /* | ||
486 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
487 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
488 | * aligned buffers. | ||
489 | */ | ||
490 | #ifndef ARCH_SLAB_MINALIGN | ||
491 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
492 | #endif | ||
493 | |||
494 | struct memcg_cache_array { | 503 | struct memcg_cache_array { |
495 | struct rcu_head rcu; | 504 | struct rcu_head rcu; |
496 | struct kmem_cache *entries[0]; | 505 | struct kmem_cache *entries[0]; |
diff --git a/kernel/panic.c b/kernel/panic.c index 4579dbb7ed87..4b150bc0c6c1 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -152,8 +152,11 @@ void panic(const char *fmt, ...) | |||
152 | * We may have ended up stopping the CPU holding the lock (in | 152 | * We may have ended up stopping the CPU holding the lock (in |
153 | * smp_send_stop()) while still having some valuable data in the console | 153 | * smp_send_stop()) while still having some valuable data in the console |
154 | * buffer. Try to acquire the lock then release it regardless of the | 154 | * buffer. Try to acquire the lock then release it regardless of the |
155 | * result. The release will also print the buffers out. | 155 | * result. The release will also print the buffers out. Locks debug |
156 | * should be disabled to avoid reporting bad unlock balance when | ||
157 | * panic() is not being callled from OOPS. | ||
156 | */ | 158 | */ |
159 | debug_locks_off(); | ||
157 | console_trylock(); | 160 | console_trylock(); |
158 | console_unlock(); | 161 | console_unlock(); |
159 | 162 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index c0b01fe24bbd..f3f1f7a972fd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause) | |||
3503 | 3503 | ||
3504 | #endif | 3504 | #endif |
3505 | 3505 | ||
3506 | int sigsuspend(sigset_t *set) | 3506 | static int sigsuspend(sigset_t *set) |
3507 | { | 3507 | { |
3508 | current->saved_sigmask = current->blocked; | 3508 | current->saved_sigmask = current->blocked; |
3509 | set_current_blocked(set); | 3509 | set_current_blocked(set); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c29ddebc8705..62fe06bb7d04 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma, | |||
2009 | /* | 2009 | /* |
2010 | * Be somewhat over-protective like KSM for now! | 2010 | * Be somewhat over-protective like KSM for now! |
2011 | */ | 2011 | */ |
2012 | if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) | 2012 | if (*vm_flags & VM_NO_THP) |
2013 | return -EINVAL; | 2013 | return -EINVAL; |
2014 | *vm_flags &= ~VM_NOHUGEPAGE; | 2014 | *vm_flags &= ~VM_NOHUGEPAGE; |
2015 | *vm_flags |= VM_HUGEPAGE; | 2015 | *vm_flags |= VM_HUGEPAGE; |
@@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma, | |||
2025 | /* | 2025 | /* |
2026 | * Be somewhat over-protective like KSM for now! | 2026 | * Be somewhat over-protective like KSM for now! |
2027 | */ | 2027 | */ |
2028 | if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) | 2028 | if (*vm_flags & VM_NO_THP) |
2029 | return -EINVAL; | 2029 | return -EINVAL; |
2030 | *vm_flags &= ~VM_HUGEPAGE; | 2030 | *vm_flags &= ~VM_HUGEPAGE; |
2031 | *vm_flags |= VM_NOHUGEPAGE; | 2031 | *vm_flags |= VM_NOHUGEPAGE; |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d41b21bce6a0..bc0a8d8b8f42 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/kmemleak.h> | ||
22 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
23 | #include <linux/memory.h> | 24 | #include <linux/memory.h> |
24 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
@@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size) | |||
444 | 445 | ||
445 | if (ret) { | 446 | if (ret) { |
446 | find_vm_area(addr)->flags |= VM_KASAN; | 447 | find_vm_area(addr)->flags |= VM_KASAN; |
448 | kmemleak_ignore(ret); | ||
447 | return 0; | 449 | return 0; |
448 | } | 450 | } |
449 | 451 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2c90357c34ea..3e4d65445fa7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
1542 | for (;;) { | 1542 | for (;;) { |
1543 | unsigned long now = jiffies; | 1543 | unsigned long now = jiffies; |
1544 | unsigned long dirty, thresh, bg_thresh; | 1544 | unsigned long dirty, thresh, bg_thresh; |
1545 | unsigned long m_dirty, m_thresh, m_bg_thresh; | 1545 | unsigned long m_dirty = 0; /* stop bogus uninit warnings */ |
1546 | unsigned long m_thresh = 0; | ||
1547 | unsigned long m_bg_thresh = 0; | ||
1546 | 1548 | ||
1547 | /* | 1549 | /* |
1548 | * Unstable writes are a feature of certain networked | 1550 | * Unstable writes are a feature of certain networked |
@@ -1204,7 +1204,7 @@ unsigned long kmem_cache_flags(unsigned long object_size, | |||
1204 | 1204 | ||
1205 | return flags; | 1205 | return flags; |
1206 | } | 1206 | } |
1207 | #else | 1207 | #else /* !CONFIG_SLUB_DEBUG */ |
1208 | static inline void setup_object_debug(struct kmem_cache *s, | 1208 | static inline void setup_object_debug(struct kmem_cache *s, |
1209 | struct page *page, void *object) {} | 1209 | struct page *page, void *object) {} |
1210 | 1210 | ||
@@ -2295,23 +2295,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) | |||
2295 | * And if we were unable to get a new slab from the partial slab lists then | 2295 | * And if we were unable to get a new slab from the partial slab lists then |
2296 | * we need to allocate a new slab. This is the slowest path since it involves | 2296 | * we need to allocate a new slab. This is the slowest path since it involves |
2297 | * a call to the page allocator and the setup of a new slab. | 2297 | * a call to the page allocator and the setup of a new slab. |
2298 | * | ||
2299 | * Version of __slab_alloc to use when we know that interrupts are | ||
2300 | * already disabled (which is the case for bulk allocation). | ||
2298 | */ | 2301 | */ |
2299 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | 2302 | static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
2300 | unsigned long addr, struct kmem_cache_cpu *c) | 2303 | unsigned long addr, struct kmem_cache_cpu *c) |
2301 | { | 2304 | { |
2302 | void *freelist; | 2305 | void *freelist; |
2303 | struct page *page; | 2306 | struct page *page; |
2304 | unsigned long flags; | ||
2305 | |||
2306 | local_irq_save(flags); | ||
2307 | #ifdef CONFIG_PREEMPT | ||
2308 | /* | ||
2309 | * We may have been preempted and rescheduled on a different | ||
2310 | * cpu before disabling interrupts. Need to reload cpu area | ||
2311 | * pointer. | ||
2312 | */ | ||
2313 | c = this_cpu_ptr(s->cpu_slab); | ||
2314 | #endif | ||
2315 | 2307 | ||
2316 | page = c->page; | 2308 | page = c->page; |
2317 | if (!page) | 2309 | if (!page) |
@@ -2369,7 +2361,6 @@ load_freelist: | |||
2369 | VM_BUG_ON(!c->page->frozen); | 2361 | VM_BUG_ON(!c->page->frozen); |
2370 | c->freelist = get_freepointer(s, freelist); | 2362 | c->freelist = get_freepointer(s, freelist); |
2371 | c->tid = next_tid(c->tid); | 2363 | c->tid = next_tid(c->tid); |
2372 | local_irq_restore(flags); | ||
2373 | return freelist; | 2364 | return freelist; |
2374 | 2365 | ||
2375 | new_slab: | 2366 | new_slab: |
@@ -2386,7 +2377,6 @@ new_slab: | |||
2386 | 2377 | ||
2387 | if (unlikely(!freelist)) { | 2378 | if (unlikely(!freelist)) { |
2388 | slab_out_of_memory(s, gfpflags, node); | 2379 | slab_out_of_memory(s, gfpflags, node); |
2389 | local_irq_restore(flags); | ||
2390 | return NULL; | 2380 | return NULL; |
2391 | } | 2381 | } |
2392 | 2382 | ||
@@ -2402,11 +2392,35 @@ new_slab: | |||
2402 | deactivate_slab(s, page, get_freepointer(s, freelist)); | 2392 | deactivate_slab(s, page, get_freepointer(s, freelist)); |
2403 | c->page = NULL; | 2393 | c->page = NULL; |
2404 | c->freelist = NULL; | 2394 | c->freelist = NULL; |
2405 | local_irq_restore(flags); | ||
2406 | return freelist; | 2395 | return freelist; |
2407 | } | 2396 | } |
2408 | 2397 | ||
2409 | /* | 2398 | /* |
2399 | * Another one that disabled interrupt and compensates for possible | ||
2400 | * cpu changes by refetching the per cpu area pointer. | ||
2401 | */ | ||
2402 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | ||
2403 | unsigned long addr, struct kmem_cache_cpu *c) | ||
2404 | { | ||
2405 | void *p; | ||
2406 | unsigned long flags; | ||
2407 | |||
2408 | local_irq_save(flags); | ||
2409 | #ifdef CONFIG_PREEMPT | ||
2410 | /* | ||
2411 | * We may have been preempted and rescheduled on a different | ||
2412 | * cpu before disabling interrupts. Need to reload cpu area | ||
2413 | * pointer. | ||
2414 | */ | ||
2415 | c = this_cpu_ptr(s->cpu_slab); | ||
2416 | #endif | ||
2417 | |||
2418 | p = ___slab_alloc(s, gfpflags, node, addr, c); | ||
2419 | local_irq_restore(flags); | ||
2420 | return p; | ||
2421 | } | ||
2422 | |||
2423 | /* | ||
2410 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) | 2424 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) |
2411 | * have the fastpath folded into their functions. So no function call | 2425 | * have the fastpath folded into their functions. So no function call |
2412 | * overhead for requests that can be satisfied on the fastpath. | 2426 | * overhead for requests that can be satisfied on the fastpath. |
@@ -2804,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
2804 | void *object = c->freelist; | 2818 | void *object = c->freelist; |
2805 | 2819 | ||
2806 | if (unlikely(!object)) { | 2820 | if (unlikely(!object)) { |
2807 | local_irq_enable(); | ||
2808 | /* | 2821 | /* |
2809 | * Invoking slow path likely have side-effect | 2822 | * Invoking slow path likely have side-effect |
2810 | * of re-populating per CPU c->freelist | 2823 | * of re-populating per CPU c->freelist |
2811 | */ | 2824 | */ |
2812 | p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, | 2825 | p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
2813 | _RET_IP_, c); | 2826 | _RET_IP_, c); |
2814 | if (unlikely(!p[i])) { | 2827 | if (unlikely(!p[i])) |
2815 | __kmem_cache_free_bulk(s, i, p); | 2828 | goto error; |
2816 | return false; | 2829 | |
2817 | } | ||
2818 | local_irq_disable(); | ||
2819 | c = this_cpu_ptr(s->cpu_slab); | 2830 | c = this_cpu_ptr(s->cpu_slab); |
2820 | continue; /* goto for-loop */ | 2831 | continue; /* goto for-loop */ |
2821 | } | 2832 | } |
2822 | 2833 | ||
2823 | /* kmem_cache debug support */ | 2834 | /* kmem_cache debug support */ |
2824 | s = slab_pre_alloc_hook(s, flags); | 2835 | s = slab_pre_alloc_hook(s, flags); |
2825 | if (unlikely(!s)) { | 2836 | if (unlikely(!s)) |
2826 | __kmem_cache_free_bulk(s, i, p); | 2837 | goto error; |
2827 | c->tid = next_tid(c->tid); | ||
2828 | local_irq_enable(); | ||
2829 | return false; | ||
2830 | } | ||
2831 | 2838 | ||
2832 | c->freelist = get_freepointer(s, object); | 2839 | c->freelist = get_freepointer(s, object); |
2833 | p[i] = object; | 2840 | p[i] = object; |
@@ -2847,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
2847 | } | 2854 | } |
2848 | 2855 | ||
2849 | return true; | 2856 | return true; |
2857 | |||
2858 | error: | ||
2859 | __kmem_cache_free_bulk(s, i, p); | ||
2860 | local_irq_enable(); | ||
2861 | return false; | ||
2850 | } | 2862 | } |
2851 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); | 2863 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); |
2852 | 2864 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d04563480c94..8e3c9c5a3042 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1443 | vmap_debug_free_range(va->va_start, va->va_end); | 1443 | vmap_debug_free_range(va->va_start, va->va_end); |
1444 | kasan_free_shadow(vm); | 1444 | kasan_free_shadow(vm); |
1445 | free_unmap_vmap_area(va); | 1445 | free_unmap_vmap_area(va); |
1446 | vm->size -= PAGE_SIZE; | ||
1447 | 1446 | ||
1448 | return vm; | 1447 | return vm; |
1449 | } | 1448 | } |
@@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
1468 | return; | 1467 | return; |
1469 | } | 1468 | } |
1470 | 1469 | ||
1471 | debug_check_no_locks_freed(addr, area->size); | 1470 | debug_check_no_locks_freed(addr, get_vm_area_size(area)); |
1472 | debug_check_no_obj_freed(addr, area->size); | 1471 | debug_check_no_obj_freed(addr, get_vm_area_size(area)); |
1473 | 1472 | ||
1474 | if (deallocate_pages) { | 1473 | if (deallocate_pages) { |
1475 | int i; | 1474 | int i; |
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index bcf5ec760eb9..5a6016224bb9 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c | |||
@@ -128,6 +128,7 @@ static const char * const page_flag_names[] = { | |||
128 | [KPF_THP] = "t:thp", | 128 | [KPF_THP] = "t:thp", |
129 | [KPF_BALLOON] = "o:balloon", | 129 | [KPF_BALLOON] = "o:balloon", |
130 | [KPF_ZERO_PAGE] = "z:zero_page", | 130 | [KPF_ZERO_PAGE] = "z:zero_page", |
131 | [KPF_IDLE] = "i:idle_page", | ||
131 | 132 | ||
132 | [KPF_RESERVED] = "r:reserved", | 133 | [KPF_RESERVED] = "r:reserved", |
133 | [KPF_MLOCKED] = "m:mlocked", | 134 | [KPF_MLOCKED] = "m:mlocked", |