summaryrefslogtreecommitdiffstats
path: root/fs/utimes.c
blob: 99cf2cb11fec4ad0090c9d2f12d966a7dadb1ef1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/utime.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>

#ifdef __ARCH_WANT_SYS_UTIME

/*
 * sys_utime() can be implemented in user-level using sys_utimes().
 * Is this for backwards compatibility?  If so, why not move it
 * into the appropriate arch directory (for those architectures that
 * need it).
 */

/* If times==NULL, set access and modification to current time,
 * must be owner or have write permission.
 * Else, update from *times, must be owner or super user.
 */
asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
{
	int error;
	struct nameidata nd;
	struct inode * inode;
	struct iattr newattrs;

	error = user_path_walk(filename, &nd);
	if (error)
		goto out;
	inode = nd.dentry->d_inode;

	error = -EROFS;
	if (IS_RDONLY(inode))
		goto dput_and_out;

	/* Don't worry, the checks are done in inode_change_ok() */
	newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
	if (times) {
		error = -EPERM;
		if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
			goto dput_and_out;

		error = get_user(newattrs.ia_atime.tv_sec, &times->actime);
		newattrs.ia_atime.tv_nsec = 0;
		if (!error)
			error = get_user(newattrs.ia_mtime.tv_sec, &times->modtime);
		newattrs.ia_mtime.tv_nsec = 0;
		if (error)
			goto dput_and_out;

		newattrs.ia_valid |= ATTR_ATIME_SET | ATTR_MTIME_SET;
	} else {
                error = -EACCES;
                if (IS_IMMUTABLE(inode))
                        goto dput_and_out;

		if (current->fsuid != inode->i_uid &&
		    (error = vfs_permission(&nd, MAY_WRITE)) != 0)
			goto dput_and_out;
	}
	mutex_lock(&inode->i_mutex);
	error = notify_change(nd.dentry, &newattrs);
	mutex_unlock(&inode->i_mutex);
dput_and_out:
	path_release(&nd);
out:
	return error;
}

#endif

/* If times==NULL, set access and modification to current time,
 * must be owner or have write permission.
 * Else, update from *times, must be owner or super user.
 */
long do_utimes(int dfd, char __user *filename, struct timeval *times)
{
	int error;
	struct nameidata nd;
	struct inode * inode;
	struct iattr newattrs;

	error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);

	if (error)
		goto out;
	inode = nd.dentry->d_inode;

	error = -EROFS;
	if (IS_RDONLY(inode))
		goto dput_and_out;

	/* Don't worry, the checks are done in inode_change_ok() */
	newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
	if (times) {
		error = -EPERM;
                if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
                        goto dput_and_out;

		newattrs.ia_atime.tv_sec = times[0].tv_sec;
		newattrs.ia_atime.tv_nsec = times[0].tv_usec * 1000;
		newattrs.ia_mtime.tv_sec = times[1].tv_sec;
		newattrs.ia_mtime.tv_nsec = times[1].tv_usec * 1000;
		newattrs.ia_valid |= ATTR_ATIME_SET | ATTR_MTIME_SET;
	} else {
		error = -EACCES;
                if (IS_IMMUTABLE(inode))
                        goto dput_and_out;

		if (current->fsuid != inode->i_uid &&
		    (error = vfs_permission(&nd, MAY_WRITE)) != 0)
			goto dput_and_out;
	}
	mutex_lock(&inode->i_mutex);
	error = notify_change(nd.dentry, &newattrs);
	mutex_unlock(&inode->i_mutex);
dput_and_out:
	path_release(&nd);
out:
	return error;
}

asmlinkage long sys_futimesat(int dfd, char __user *filename, struct timeval __user *utimes)
{
	struct timeval times[2];

	if (utimes && copy_from_user(&times, utimes, sizeof(times)))
		return -EFAULT;
	return do_utimes(dfd, filename, utimes ? times : NULL);
}

asmlinkage long sys_utimes(char __user *filename, struct timeval __user *utimes)
{
	return sys_futimesat(AT_FDCWD, filename, utimes);
}
s;s cleanup time was met after it expired, so we need to get the * reaper to go through a cycle finding expired keys. */ static void key_gc_timer_func(unsigned long data) { kenter(""); key_gc_next_run = LONG_MAX; set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); queue_work(system_nrt_wq, &key_gc_work); } /* * wait_on_bit() sleep function for uninterruptible waiting */ static int key_gc_wait_bit(void *flags) { schedule(); return 0; } /* * Reap keys of dead type. * * We use three flags to make sure we see three complete cycles of the garbage * collector: the first to mark keys of that type as being dead, the second to * collect dead links and the third to clean up the dead keys. We have to be * careful as there may already be a cycle in progress. * * The caller must be holding key_types_sem. */ void key_gc_keytype(struct key_type *ktype) { kenter("%s", ktype->name); key_gc_dead_keytype = ktype; set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); smp_mb(); set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); kdebug("schedule"); queue_work(system_nrt_wq, &key_gc_work); kdebug("sleep"); wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, TASK_UNINTERRUPTIBLE); key_gc_dead_keytype = NULL; kleave(""); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ static void key_gc_keyring(struct key *keyring, time_t limit) { struct keyring_list *klist; struct key *key; int loop; kenter("%x", key_serial(keyring)); if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); klist = rcu_dereference(keyring->payload.subscriptions); if (!klist) goto unlock_dont_gc; for (loop = klist->nkeys - 1; loop >= 0; loop--) { key = klist->keys[loop]; if (test_bit(KEY_FLAG_DEAD, &key->flags) || (key->expiry > 0 && key->expiry <= limit)) goto do_gc; } unlock_dont_gc: rcu_read_unlock(); dont_gc: kleave(" [no gc]"); return; do_gc: rcu_read_unlock(); keyring_gc(keyring, limit); kleave(" [gc]"); } /* * Garbage collect an unreferenced, detached key */ static noinline void key_gc_unused_key(struct key *key) { key_check(key); security_key_free(key); /* deal with the user's key tracking and quota */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); key_user_put(key->user); /* now throw away the key memory */ if (key->type->destroy) key->type->destroy(key); kfree(key->description); #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC_X; #endif kmem_cache_free(key_jar, key); } /* * Garbage collector for unused keys. * * This is done in process context so that we don't have to disable interrupts * all over the place. key_put() schedules this rather than trying to do the * cleanup itself, which means key_put() doesn't have to sleep. */ static void key_garbage_collector(struct work_struct *work) { static u8 gc_state; /* Internal persistent state */ #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */ #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */ #define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */ #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */ #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */ #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */ #define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */ struct rb_node *cursor; struct key *key; time_t new_timer, limit; kenter("[%lx,%x]", key_gc_flags, gc_state); limit = current_kernel_time().tv_sec; if (limit > key_gc_delay) limit -= key_gc_delay; else limit = key_gc_delay; /* Work out what we're going to be doing in this pass */ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; gc_state <<= 1; if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) gc_state |= KEY_GC_REAPING_DEAD_1; kdebug("new pass %x", gc_state); new_timer = LONG_MAX; /* As only this function is permitted to remove things from the key * serial tree, if cursor is non-NULL then it will always point to a * valid node in the tree - even if lock got dropped. */ spin_lock(&key_serial_lock); cursor = rb_first(&key_serial_tree); continue_scanning: while (cursor) { key = rb_entry(cursor, struct key, serial_node); cursor = rb_next(cursor); if (atomic_read(&key->usage) == 0) goto found_unreferenced_key; if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { if (key->type == key_gc_dead_keytype) { gc_state |= KEY_GC_FOUND_DEAD_KEY; set_bit(KEY_FLAG_DEAD, &key->flags); key->perm = 0; goto skip_dead_key; } } if (gc_state & KEY_GC_SET_TIMER) { if (key->expiry > limit && key->expiry < new_timer) { kdebug("will expire %x in %ld", key_serial(key), key->expiry - limit); new_timer = key->expiry; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) if (key->type == key_gc_dead_keytype) gc_state |= KEY_GC_FOUND_DEAD_KEY; if ((gc_state & KEY_GC_REAPING_LINKS) || unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { if (key->type == &key_type_keyring) goto found_keyring; } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) if (key->type == key_gc_dead_keytype) goto destroy_dead_key; skip_dead_key: if (spin_is_contended(&key_serial_lock) || need_resched()) goto contended; } contended: spin_unlock(&key_serial_lock); maybe_resched: if (cursor) { cond_resched(); spin_lock(&key_serial_lock); goto continue_scanning; } /* We've completed the pass. Set the timer if we need to and queue a * new cycle if necessary. We keep executing cycles until we find one * where we didn't reap any keys. */ kdebug("pass complete"); if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { /* Make sure everyone revalidates their keys if we marked a * bunch as being dead and make sure all keyring ex-payloads * are destroyed. */ kdebug("dead sync"); synchronize_rcu(); } if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2))) { if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) { /* No remaining dead keys: short circuit the remaining * keytype reap cycles. */ kdebug("dead short"); gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2); gc_state |= KEY_GC_REAPING_DEAD_3; } else { gc_state |= KEY_GC_REAP_AGAIN; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) { kdebug("dead wake"); smp_mb(); clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE); } if (gc_state & KEY_GC_REAP_AGAIN) queue_work(system_nrt_wq, &key_gc_work); kleave(" [end %x]", gc_state); return; /* We found an unreferenced key - once we've removed it from the tree, * we can safely drop the lock. */ found_unreferenced_key: kdebug("unrefd key %d", key->serial); rb_erase(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); key_gc_unused_key(key); gc_state |= KEY_GC_REAP_AGAIN; goto maybe_resched; /* We found a keyring and we need to check the payload for links to * dead or expired keys. We don't flag another reap immediately as we * have to wait for the old payload to be destroyed by RCU before we * can reap the keys to which it refers. */ found_keyring: spin_unlock(&key_serial_lock); kdebug("scan keyring %d", key->serial); key_gc_keyring(key, limit); goto maybe_resched; /* We found a dead key that is still referenced. Reset its type and * destroy its payload with its semaphore held. */ destroy_dead_key: spin_unlock(&key_serial_lock); kdebug("destroy key %d", key->serial); down_write(&key->sem); key->type = &key_type_dead; if (key_gc_dead_keytype->destroy) key_gc_dead_keytype->destroy(key); memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); up_write(&key->sem); goto maybe_resched; }