diff options
| -rw-r--r-- | fs/configfs/dir.c | 39 | ||||
| -rw-r--r-- | fs/ocfs2/cluster/heartbeat.c | 61 | ||||
| -rw-r--r-- | fs/ocfs2/dir.c | 2 | ||||
| -rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 3 | ||||
| -rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 3 | ||||
| -rw-r--r-- | fs/ocfs2/file.c | 12 | ||||
| -rw-r--r-- | fs/ocfs2/journal.c | 3 |
7 files changed, 96 insertions, 27 deletions
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 3313dd19f543..9a37a9b6de3a 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
| @@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock); | |||
| 53 | static void configfs_d_iput(struct dentry * dentry, | 53 | static void configfs_d_iput(struct dentry * dentry, |
| 54 | struct inode * inode) | 54 | struct inode * inode) |
| 55 | { | 55 | { |
| 56 | struct configfs_dirent * sd = dentry->d_fsdata; | 56 | struct configfs_dirent *sd = dentry->d_fsdata; |
| 57 | 57 | ||
| 58 | if (sd) { | 58 | if (sd) { |
| 59 | BUG_ON(sd->s_dentry != dentry); | 59 | BUG_ON(sd->s_dentry != dentry); |
| 60 | /* Coordinate with configfs_readdir */ | ||
| 61 | spin_lock(&configfs_dirent_lock); | ||
| 60 | sd->s_dentry = NULL; | 62 | sd->s_dentry = NULL; |
| 63 | spin_unlock(&configfs_dirent_lock); | ||
| 61 | configfs_put(sd); | 64 | configfs_put(sd); |
| 62 | } | 65 | } |
| 63 | iput(inode); | 66 | iput(inode); |
| @@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group, | |||
| 689 | sd = child->d_fsdata; | 692 | sd = child->d_fsdata; |
| 690 | sd->s_type |= CONFIGFS_USET_DEFAULT; | 693 | sd->s_type |= CONFIGFS_USET_DEFAULT; |
| 691 | } else { | 694 | } else { |
| 692 | d_delete(child); | 695 | BUG_ON(child->d_inode); |
| 696 | d_drop(child); | ||
| 693 | dput(child); | 697 | dput(child); |
| 694 | } | 698 | } |
| 695 | } | 699 | } |
| @@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
| 1545 | struct configfs_dirent * parent_sd = dentry->d_fsdata; | 1549 | struct configfs_dirent * parent_sd = dentry->d_fsdata; |
| 1546 | struct configfs_dirent *cursor = filp->private_data; | 1550 | struct configfs_dirent *cursor = filp->private_data; |
| 1547 | struct list_head *p, *q = &cursor->s_sibling; | 1551 | struct list_head *p, *q = &cursor->s_sibling; |
| 1548 | ino_t ino; | 1552 | ino_t ino = 0; |
| 1549 | int i = filp->f_pos; | 1553 | int i = filp->f_pos; |
| 1550 | 1554 | ||
| 1551 | switch (i) { | 1555 | switch (i) { |
| @@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
| 1573 | struct configfs_dirent *next; | 1577 | struct configfs_dirent *next; |
| 1574 | const char * name; | 1578 | const char * name; |
| 1575 | int len; | 1579 | int len; |
| 1580 | struct inode *inode = NULL; | ||
| 1576 | 1581 | ||
| 1577 | next = list_entry(p, struct configfs_dirent, | 1582 | next = list_entry(p, struct configfs_dirent, |
| 1578 | s_sibling); | 1583 | s_sibling); |
| @@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
| 1581 | 1586 | ||
| 1582 | name = configfs_get_name(next); | 1587 | name = configfs_get_name(next); |
| 1583 | len = strlen(name); | 1588 | len = strlen(name); |
| 1584 | if (next->s_dentry) | 1589 | |
| 1585 | ino = next->s_dentry->d_inode->i_ino; | 1590 | /* |
| 1586 | else | 1591 | * We'll have a dentry and an inode for |
| 1592 | * PINNED items and for open attribute | ||
| 1593 | * files. We lock here to prevent a race | ||
| 1594 | * with configfs_d_iput() clearing | ||
| 1595 | * s_dentry before calling iput(). | ||
| 1596 | * | ||
| 1597 | * Why do we go to the trouble? If | ||
| 1598 | * someone has an attribute file open, | ||
| 1599 | * the inode number should match until | ||
| 1600 | * they close it. Beyond that, we don't | ||
| 1601 | * care. | ||
| 1602 | */ | ||
| 1603 | spin_lock(&configfs_dirent_lock); | ||
| 1604 | dentry = next->s_dentry; | ||
| 1605 | if (dentry) | ||
| 1606 | inode = dentry->d_inode; | ||
| 1607 | if (inode) | ||
| 1608 | ino = inode->i_ino; | ||
| 1609 | spin_unlock(&configfs_dirent_lock); | ||
| 1610 | if (!inode) | ||
| 1587 | ino = iunique(configfs_sb, 2); | 1611 | ino = iunique(configfs_sb, 2); |
| 1588 | 1612 | ||
| 1589 | if (filldir(dirent, name, len, filp->f_pos, ino, | 1613 | if (filldir(dirent, name, len, filp->f_pos, ino, |
| @@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) | |||
| 1683 | err = configfs_attach_group(sd->s_element, &group->cg_item, | 1707 | err = configfs_attach_group(sd->s_element, &group->cg_item, |
| 1684 | dentry); | 1708 | dentry); |
| 1685 | if (err) { | 1709 | if (err) { |
| 1686 | d_delete(dentry); | 1710 | BUG_ON(dentry->d_inode); |
| 1711 | d_drop(dentry); | ||
| 1687 | dput(dentry); | 1712 | dput(dentry); |
| 1688 | } else { | 1713 | } else { |
| 1689 | spin_lock(&configfs_dirent_lock); | 1714 | spin_lock(&configfs_dirent_lock); |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 643720209a98..9a3e6bbff27b 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
| @@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg, | |||
| 539 | 539 | ||
| 540 | /* We want to make sure that nobody is heartbeating on top of us -- | 540 | /* We want to make sure that nobody is heartbeating on top of us -- |
| 541 | * this will help detect an invalid configuration. */ | 541 | * this will help detect an invalid configuration. */ |
| 542 | static int o2hb_check_last_timestamp(struct o2hb_region *reg) | 542 | static void o2hb_check_last_timestamp(struct o2hb_region *reg) |
| 543 | { | 543 | { |
| 544 | int node_num, ret; | ||
| 545 | struct o2hb_disk_slot *slot; | 544 | struct o2hb_disk_slot *slot; |
| 546 | struct o2hb_disk_heartbeat_block *hb_block; | 545 | struct o2hb_disk_heartbeat_block *hb_block; |
| 546 | char *errstr; | ||
| 547 | 547 | ||
| 548 | node_num = o2nm_this_node(); | 548 | slot = ®->hr_slots[o2nm_this_node()]; |
| 549 | |||
| 550 | ret = 1; | ||
| 551 | slot = ®->hr_slots[node_num]; | ||
| 552 | /* Don't check on our 1st timestamp */ | 549 | /* Don't check on our 1st timestamp */ |
| 553 | if (slot->ds_last_time) { | 550 | if (!slot->ds_last_time) |
| 554 | hb_block = slot->ds_raw_block; | 551 | return; |
| 555 | 552 | ||
| 556 | if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) | 553 | hb_block = slot->ds_raw_block; |
| 557 | ret = 0; | 554 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && |
| 558 | } | 555 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && |
| 556 | hb_block->hb_node == slot->ds_node_num) | ||
| 557 | return; | ||
| 559 | 558 | ||
| 560 | return ret; | 559 | #define ERRSTR1 "Another node is heartbeating on device" |
| 560 | #define ERRSTR2 "Heartbeat generation mismatch on device" | ||
| 561 | #define ERRSTR3 "Heartbeat sequence mismatch on device" | ||
| 562 | |||
| 563 | if (hb_block->hb_node != slot->ds_node_num) | ||
| 564 | errstr = ERRSTR1; | ||
| 565 | else if (le64_to_cpu(hb_block->hb_generation) != | ||
| 566 | slot->ds_last_generation) | ||
| 567 | errstr = ERRSTR2; | ||
| 568 | else | ||
| 569 | errstr = ERRSTR3; | ||
| 570 | |||
| 571 | mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " | ||
| 572 | "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, | ||
| 573 | slot->ds_node_num, (unsigned long long)slot->ds_last_generation, | ||
| 574 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, | ||
| 575 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), | ||
| 576 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); | ||
| 561 | } | 577 | } |
| 562 | 578 | ||
| 563 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | 579 | static inline void o2hb_prepare_block(struct o2hb_region *reg, |
| @@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
| 983 | /* With an up to date view of the slots, we can check that no | 999 | /* With an up to date view of the slots, we can check that no |
| 984 | * other node has been improperly configured to heartbeat in | 1000 | * other node has been improperly configured to heartbeat in |
| 985 | * our slot. */ | 1001 | * our slot. */ |
| 986 | if (!o2hb_check_last_timestamp(reg)) | 1002 | o2hb_check_last_timestamp(reg); |
| 987 | mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " | ||
| 988 | "in our slot!\n", reg->hr_dev_name); | ||
| 989 | 1003 | ||
| 990 | /* fill in the proper info for our next heartbeat */ | 1004 | /* fill in the proper info for our next heartbeat */ |
| 991 | o2hb_prepare_block(reg, reg->hr_generation); | 1005 | o2hb_prepare_block(reg, reg->hr_generation); |
| @@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
| 999 | } | 1013 | } |
| 1000 | 1014 | ||
| 1001 | i = -1; | 1015 | i = -1; |
| 1002 | while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | 1016 | while((i = find_next_bit(configured_nodes, |
| 1003 | 1017 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | |
| 1004 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); | 1018 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); |
| 1005 | } | 1019 | } |
| 1006 | 1020 | ||
| @@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
| 1690 | struct file *filp = NULL; | 1704 | struct file *filp = NULL; |
| 1691 | struct inode *inode = NULL; | 1705 | struct inode *inode = NULL; |
| 1692 | ssize_t ret = -EINVAL; | 1706 | ssize_t ret = -EINVAL; |
| 1707 | int live_threshold; | ||
| 1693 | 1708 | ||
| 1694 | if (reg->hr_bdev) | 1709 | if (reg->hr_bdev) |
| 1695 | goto out; | 1710 | goto out; |
| @@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
| 1766 | * A node is considered live after it has beat LIVE_THRESHOLD | 1781 | * A node is considered live after it has beat LIVE_THRESHOLD |
| 1767 | * times. We're not steady until we've given them a chance | 1782 | * times. We're not steady until we've given them a chance |
| 1768 | * _after_ our first read. | 1783 | * _after_ our first read. |
| 1784 | * The default threshold is bare minimum so as to limit the delay | ||
| 1785 | * during mounts. For global heartbeat, the threshold doubled for the | ||
| 1786 | * first region. | ||
| 1769 | */ | 1787 | */ |
| 1770 | atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); | 1788 | live_threshold = O2HB_LIVE_THRESHOLD; |
| 1789 | if (o2hb_global_heartbeat_active()) { | ||
| 1790 | spin_lock(&o2hb_live_lock); | ||
| 1791 | if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) | ||
| 1792 | live_threshold <<= 1; | ||
| 1793 | spin_unlock(&o2hb_live_lock); | ||
| 1794 | } | ||
| 1795 | atomic_set(®->hr_steady_iterations, live_threshold + 1); | ||
| 1771 | 1796 | ||
| 1772 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", | 1797 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
| 1773 | reg->hr_item.ci_name); | 1798 | reg->hr_item.ci_name); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 9fe5b8fd658f..8582e3f4f120 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
| @@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
| 2868 | bytes = blocks_wanted << sb->s_blocksize_bits; | 2868 | bytes = blocks_wanted << sb->s_blocksize_bits; |
| 2869 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | 2869 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
| 2870 | struct ocfs2_inode_info *oi = OCFS2_I(dir); | 2870 | struct ocfs2_inode_info *oi = OCFS2_I(dir); |
| 2871 | struct ocfs2_alloc_context *data_ac; | 2871 | struct ocfs2_alloc_context *data_ac = NULL; |
| 2872 | struct ocfs2_alloc_context *meta_ac = NULL; | 2872 | struct ocfs2_alloc_context *meta_ac = NULL; |
| 2873 | struct buffer_head *dirdata_bh = NULL; | 2873 | struct buffer_head *dirdata_bh = NULL; |
| 2874 | struct buffer_head *dx_root_bh = NULL; | 2874 | struct buffer_head *dx_root_bh = NULL; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 7540a492eaba..3b179d6cbde0 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
| @@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
| 1614 | spin_unlock(&dlm->spinlock); | 1614 | spin_unlock(&dlm->spinlock); |
| 1615 | 1615 | ||
| 1616 | /* Support for global heartbeat and node info was added in 1.1 */ | 1616 | /* Support for global heartbeat and node info was added in 1.1 */ |
| 1617 | if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { | 1617 | if (dlm->dlm_locking_proto.pv_major > 1 || |
| 1618 | dlm->dlm_locking_proto.pv_minor > 0) { | ||
| 1618 | status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); | 1619 | status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); |
| 1619 | if (status) { | 1620 | if (status) { |
| 1620 | mlog_errno(status); | 1621 | mlog_errno(status); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index fede57ed005f..84d166328cf7 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
| @@ -2574,6 +2574,9 @@ fail: | |||
| 2574 | res->state &= ~DLM_LOCK_RES_MIGRATING; | 2574 | res->state &= ~DLM_LOCK_RES_MIGRATING; |
| 2575 | wake = 1; | 2575 | wake = 1; |
| 2576 | spin_unlock(&res->spinlock); | 2576 | spin_unlock(&res->spinlock); |
| 2577 | if (dlm_is_host_down(ret)) | ||
| 2578 | dlm_wait_for_node_death(dlm, target, | ||
| 2579 | DLM_NODE_DEATH_WAIT_MAX); | ||
| 2577 | goto leave; | 2580 | goto leave; |
| 2578 | } | 2581 | } |
| 2579 | 2582 | ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41565ae52856..89659d6dc206 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, | |||
| 1607 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | 1607 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); |
| 1608 | 1608 | ||
| 1609 | if (le32_to_cpu(rec->e_cpos) >= trunc_start) { | 1609 | if (le32_to_cpu(rec->e_cpos) >= trunc_start) { |
| 1610 | /* | ||
| 1611 | * remove an entire extent record. | ||
| 1612 | */ | ||
| 1610 | *trunc_cpos = le32_to_cpu(rec->e_cpos); | 1613 | *trunc_cpos = le32_to_cpu(rec->e_cpos); |
| 1611 | /* | 1614 | /* |
| 1612 | * Skip holes if any. | 1615 | * Skip holes if any. |
| @@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, | |||
| 1617 | *blkno = le64_to_cpu(rec->e_blkno); | 1620 | *blkno = le64_to_cpu(rec->e_blkno); |
| 1618 | *trunc_end = le32_to_cpu(rec->e_cpos); | 1621 | *trunc_end = le32_to_cpu(rec->e_cpos); |
| 1619 | } else if (range > trunc_start) { | 1622 | } else if (range > trunc_start) { |
| 1623 | /* | ||
| 1624 | * remove a partial extent record, which means we're | ||
| 1625 | * removing the last extent record. | ||
| 1626 | */ | ||
| 1620 | *trunc_cpos = trunc_start; | 1627 | *trunc_cpos = trunc_start; |
| 1628 | /* | ||
| 1629 | * skip hole if any. | ||
| 1630 | */ | ||
| 1631 | if (range < *trunc_end) | ||
| 1632 | *trunc_end = range; | ||
| 1621 | *trunc_len = *trunc_end - trunc_start; | 1633 | *trunc_len = *trunc_end - trunc_start; |
| 1622 | coff = trunc_start - le32_to_cpu(rec->e_cpos); | 1634 | coff = trunc_start - le32_to_cpu(rec->e_cpos); |
| 1623 | *blkno = le64_to_cpu(rec->e_blkno) + | 1635 | *blkno = le64_to_cpu(rec->e_blkno) + |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index b141a44605ca..295d56454e8b 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
| @@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) | |||
| 1260 | { | 1260 | { |
| 1261 | struct ocfs2_journal *journal = osb->journal; | 1261 | struct ocfs2_journal *journal = osb->journal; |
| 1262 | 1262 | ||
| 1263 | if (ocfs2_is_hard_readonly(osb)) | ||
| 1264 | return; | ||
| 1265 | |||
| 1263 | /* No need to queue up our truncate_log as regular cleanup will catch | 1266 | /* No need to queue up our truncate_log as regular cleanup will catch |
| 1264 | * that */ | 1267 | * that */ |
| 1265 | ocfs2_queue_recovery_completion(journal, osb->slot_num, | 1268 | ocfs2_queue_recovery_completion(journal, osb->slot_num, |
