aboutsummaryrefslogblamecommitdiffstats
path: root/fs/compat.c
blob: ed43e17a5dc68ad6663d28bc5abef135492c2e52 (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
















                                                                        
                         







                          
                          
                      

                        


                            
                             




                           
                           





                               
                              
                           
                          
                         
                       
                     
                            
 


                            
                     
 













                                       

                       





                                                                                        
                              




                                                         

                                  
         






















                                                                                                                          

 
                                                                                                              
 
                              
 
                
                                                           
                                                             
                                                           
                                                           
                                       




                                                                    
         
                                                          




                                                                                         





                                                          
                                                           









                                                           
                                                            





                                                       
                          
                                                                              


















                                                             
      
 



























































                                                                                            
                                                         

                                                             
                                   













                                                                                     
                                                      

                                                     














































                                                                                                                  
                                                         

                                                               
                                   
















                                                                                                           
                                                      

                                                       




                     






































































                                                                                   











                                                                                     
                                                 

                                                           

















                                                                            

                                                                                        
                                                 

                                                            










































































































                                                                              
                                   




                                                       

                           























































                                                                                                                    


                                   



















                                                     

                          
                           























                                                                   
                                

                                     
                                    





























                                                                         



                 

                               
                              





























                                                                                
                                     



                                                                    


                                                                        







                                                                      
      











                                                                     













                                                                       
                                                                    


                                                      
                             


                               


                                                        





                                                                      
                                                    

















































                                                                          
                                                            


                                                     
                             
                                                                                    



                                                            


                                                        





                                                       
                                              



























































                                                                              








                                                                                       
                                                  



                                                       
                                                         































































                                                                          


                                                                    













                                                                         







































































                                                                             
                    




                                      
                                           

                                                
                                            

         




                                                                        
 


                            
                                         
                                                            




                                                
















                                                                                             
                                                                        





















                                                                                              
                                                                          








                                                                           




                                                                    
                                 
                                 












                                                                         
  





                                                                    







                                                                      
                                                                                     

                                                       


  
































                                                                             
                               







                                               
                                                                             



                                      
                                           



                                     
                                                            
                              

                               

                                 
                                                  

                                                 



























                                                                             

                                                 
 

                                                                               
                                                             

                                                               

                                                           


                                                                            
                         

                                                                         


                                              



                 

                                                       
                                     
                                       
         
                   

 











                                                                         

                         
                                                  

                             







                                   


                                  
 


                                    
 
                                                         


                                      
                                                         

























                                                               

                                         
                                               
                                



                              




                                         
                                







                                               
                        






                                                           



                                                             
      


                                                                      
                                                









                                                                                

                                                                             



                                               

                                                      









                                                                         
      

                                                                      

                          
                                                

                    
                         






                                   

                                                                     


                            


                                              






                                                                     











                                                                 

                                                                              

                        
                   
                                         
                            
                                                        
 


                               
                                                                 
                        
                                            
                               
                          

                            





                                                                        
                            






                                                     














                                                          
                                          









                                            



                                                      
    

                              



                   


















                                                                    
                                                                       






                                                                  

                                          

                                                           

                                                                              
                                                           

                                                           
















                                                                             
                               






                                                                            
                                           
























                                                                            
                                                                                  












                                                                          
                  

                                           


                                                           




                                                                 
                 
                                                            
                                 












                                                                             










                                                                             
                                              










































                                                                               
                                                                  



                                          
                                                          




















                                                                             
                                              





                                                          

                                           


                                                                                


                                                                               
                                                            

                                                           















                                                                             
                                     
 





















                                                               

                                      


































                                                               

                                                                     
 






                                                                               

 

                                                                     
 


















                                                                              
 
                 

 

                                                                     
 



















                                                                   


                                                                          
                 

 

                                                                     
 











                                                                 
 
                 

 

                                                                     
 










                                                                               
 
                 




                                                             

                                                                    







                                                    


                                                                     








































                                                        
                      

         


                          























                                                                              


                   
                               























                                                                               
                                                               










                                                                              
                                              





                                                                  
                                     

                         
























                                                                          

                     


                                                                               
 
                  


                                     
                                            
                               

                                                                    
                               













                                                                                 
 






                                                                             


                           
/*
 *  linux/fs/compat.c
 *
 *  Kernel compatibililty routines for e.g. 32 bit syscall support
 *  on 64 bit kernels.
 *
 *  Copyright (C) 2002       Stephen Rothwell, IBM Corporation
 *  Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
 *  Copyright (C) 1998       Eddie C. Dost  (ecd@skynet.be)
 *  Copyright (C) 2001,2002  Andi Kleen, SuSE Labs 
 *  Copyright (C) 2003       Pavel Machek (pavel@suse.cz)
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License version 2 as
 *  published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/vfs.h>
#include <linux/ioctl.h>
#include <linux/init.h>
#include <linux/smb.h>
#include <linux/smb_mount.h>
#include <linux/ncp_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/dirent.h>
#include <linux/fsnotify.h>
#include <linux/highuid.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/syscall.h>
#include <linux/personality.h>
#include <linux/rwsem.h>
#include <linux/tsacct_kern.h>
#include <linux/security.h>
#include <linux/highmem.h>
#include <linux/signal.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/eventpoll.h>

#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/ioctls.h>
#include "internal.h"

int compat_log = 1;

int compat_printk(const char *fmt, ...)
{
	va_list ap;
	int ret;
	if (!compat_log)
		return 0;
	va_start(ap, fmt);
	ret = vprintk(fmt, ap);
	va_end(ap);
	return ret;
}

#include "read_write.h"

/*
 * Not all architectures have sys_utime, so implement this in terms
 * of sys_utimes.
 */
asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __user *t)
{
	struct timespec tv[2];

	if (t) {
		if (get_user(tv[0].tv_sec, &t->actime) ||
		    get_user(tv[1].tv_sec, &t->modtime))
			return -EFAULT;
		tv[0].tv_nsec = 0;
		tv[1].tv_nsec = 0;
	}
	return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
}

asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, struct compat_timespec __user *t, int flags)
{
	struct timespec tv[2];

	if  (t) {
		if (get_compat_timespec(&tv[0], &t[0]) ||
		    get_compat_timespec(&tv[1], &t[1]))
			return -EFAULT;

		if ((tv[0].tv_nsec == UTIME_OMIT || tv[0].tv_nsec == UTIME_NOW)
		    && tv[0].tv_sec != 0)
			return -EINVAL;
		if ((tv[1].tv_nsec == UTIME_OMIT || tv[1].tv_nsec == UTIME_NOW)
		    && tv[1].tv_sec != 0)
			return -EINVAL;

		if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT)
			return 0;
	}
	return do_utimes(dfd, filename, t ? tv : NULL, flags);
}

asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, struct compat_timeval __user *t)
{
	struct timespec tv[2];

	if (t) {
		if (get_user(tv[0].tv_sec, &t[0].tv_sec) ||
		    get_user(tv[0].tv_nsec, &t[0].tv_usec) ||
		    get_user(tv[1].tv_sec, &t[1].tv_sec) ||
		    get_user(tv[1].tv_nsec, &t[1].tv_usec))
			return -EFAULT;
		if (tv[0].tv_nsec >= 1000000 || tv[0].tv_nsec < 0 ||
		    tv[1].tv_nsec >= 1000000 || tv[1].tv_nsec < 0)
			return -EINVAL;
		tv[0].tv_nsec *= 1000;
		tv[1].tv_nsec *= 1000;
	}
	return do_utimes(dfd, filename, t ? tv : NULL, 0);
}

asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t)
{
	return compat_sys_futimesat(AT_FDCWD, filename, t);
}

asmlinkage long compat_sys_newstat(char __user * filename,
		struct compat_stat __user *statbuf)
{
	struct kstat stat;
	int error = vfs_stat_fd(AT_FDCWD, filename, &stat);

	if (!error)
		error = cp_compat_stat(&stat, statbuf);
	return error;
}

asmlinkage long compat_sys_newlstat(char __user * filename,
		struct compat_stat __user *statbuf)
{
	struct kstat stat;
	int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);

	if (!error)
		error = cp_compat_stat(&stat, statbuf);
	return error;
}

#ifndef __ARCH_WANT_STAT64
asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user *filename,
		struct compat_stat __user *statbuf, int flag)
{
	struct kstat stat;
	int error = -EINVAL;

	if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
		goto out;

	if (flag & AT_SYMLINK_NOFOLLOW)
		error = vfs_lstat_fd(dfd, filename, &stat);
	else
		error = vfs_stat_fd(dfd, filename, &stat);

	if (!error)
		error = cp_compat_stat(&stat, statbuf);

out:
	return error;
}
#endif

asmlinkage long compat_sys_newfstat(unsigned int fd,
		struct compat_stat __user * statbuf)
{
	struct kstat stat;
	int error = vfs_fstat(fd, &stat);

	if (!error)
		error = cp_compat_stat(&stat, statbuf);
	return error;
}

static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *kbuf)
{
	
	if (sizeof ubuf->f_blocks == 4) {
		if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail) &
		    0xffffffff00000000ULL)
			return -EOVERFLOW;
		/* f_files and f_ffree may be -1; it's okay
		 * to stuff that into 32 bits */
		if (kbuf->f_files != 0xffffffffffffffffULL
		 && (kbuf->f_files & 0xffffffff00000000ULL))
			return -EOVERFLOW;
		if (kbuf->f_ffree != 0xffffffffffffffffULL
		 && (kbuf->f_ffree & 0xffffffff00000000ULL))
			return -EOVERFLOW;
	}
	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) ||
	    __put_user(kbuf->f_type, &ubuf->f_type) ||
	    __put_user(kbuf->f_bsize, &ubuf->f_bsize) ||
	    __put_user(kbuf->f_blocks, &ubuf->f_blocks) ||
	    __put_user(kbuf->f_bfree, &ubuf->f_bfree) ||
	    __put_user(kbuf->f_bavail, &ubuf->f_bavail) ||
	    __put_user(kbuf->f_files, &ubuf->f_files) ||
	    __put_user(kbuf->f_ffree, &ubuf->f_ffree) ||
	    __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
	    __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
	    __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
	    __put_user(kbuf->f_frsize, &ubuf->f_frsize) ||
	    __put_user(0, &ubuf->f_spare[0]) || 
	    __put_user(0, &ubuf->f_spare[1]) || 
	    __put_user(0, &ubuf->f_spare[2]) || 
	    __put_user(0, &ubuf->f_spare[3]) || 
	    __put_user(0, &ubuf->f_spare[4]))
		return -EFAULT;
	return 0;
}

/*
 * The following statfs calls are copies of code from fs/open.c and
 * should be checked against those from time to time
 */
asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs __user *buf)
{
	struct nameidata nd;
	int error;

	error = user_path_walk(path, &nd);
	if (!error) {
		struct kstatfs tmp;
		error = vfs_statfs(nd.path.dentry, &tmp);
		if (!error)
			error = put_compat_statfs(buf, &tmp);
		path_put(&nd.path);
	}
	return error;
}

asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user *buf)
{
	struct file * file;
	struct kstatfs tmp;
	int error;

	error = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;
	error = vfs_statfs(file->f_path.dentry, &tmp);
	if (!error)
		error = put_compat_statfs(buf, &tmp);
	fput(file);
out:
	return error;
}

static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
{
	if (sizeof ubuf->f_blocks == 4) {
		if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail) &
		    0xffffffff00000000ULL)
			return -EOVERFLOW;
		/* f_files and f_ffree may be -1; it's okay
		 * to stuff that into 32 bits */
		if (kbuf->f_files != 0xffffffffffffffffULL
		 && (kbuf->f_files & 0xffffffff00000000ULL))
			return -EOVERFLOW;
		if (kbuf->f_ffree != 0xffffffffffffffffULL
		 && (kbuf->f_ffree & 0xffffffff00000000ULL))
			return -EOVERFLOW;
	}
	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) ||
	    __put_user(kbuf->f_type, &ubuf->f_type) ||
	    __put_user(kbuf->f_bsize, &ubuf->f_bsize) ||
	    __put_user(kbuf->f_blocks, &ubuf->f_blocks) ||
	    __put_user(kbuf->f_bfree, &ubuf->f_bfree) ||
	    __put_user(kbuf->f_bavail, &ubuf->f_bavail) ||
	    __put_user(kbuf->f_files, &ubuf->f_files) ||
	    __put_user(kbuf->f_ffree, &ubuf->f_ffree) ||
	    __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
	    __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
	    __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
	    __put_user(kbuf->f_frsize, &ubuf->f_frsize))
		return -EFAULT;
	return 0;
}

asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, struct compat_statfs64 __user *buf)
{
	struct nameidata nd;
	int error;

	if (sz != sizeof(*buf))
		return -EINVAL;

	error = user_path_walk(path, &nd);
	if (!error) {
		struct kstatfs tmp;
		error = vfs_statfs(nd.path.dentry, &tmp);
		if (!error)
			error = put_compat_statfs64(buf, &tmp);
		path_put(&nd.path);
	}
	return error;
}

asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf)
{
	struct file * file;
	struct kstatfs tmp;
	int error;

	if (sz != sizeof(*buf))
		return -EINVAL;

	error = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;
	error = vfs_statfs(file->f_path.dentry, &tmp);
	if (!error)
		error = put_compat_statfs64(buf, &tmp);
	fput(file);
out:
	return error;
}

static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
{
	if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
	    __get_user(kfl->l_type, &ufl->l_type) ||
	    __get_user(kfl->l_whence, &ufl->l_whence) ||
	    __get_user(kfl->l_start, &ufl->l_start) ||
	    __get_user(kfl->l_len, &ufl->l_len) ||
	    __get_user(kfl->l_pid, &ufl->l_pid))
		return -EFAULT;
	return 0;
}

static int put_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
{
	if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) ||
	    __put_user(kfl->l_type, &ufl->l_type) ||
	    __put_user(kfl->l_whence, &ufl->l_whence) ||
	    __put_user(kfl->l_start, &ufl->l_start) ||
	    __put_user(kfl->l_len, &ufl->l_len) ||
	    __put_user(kfl->l_pid, &ufl->l_pid))
		return -EFAULT;
	return 0;
}

#ifndef HAVE_ARCH_GET_COMPAT_FLOCK64
static int get_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl)
{
	if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
	    __get_user(kfl->l_type, &ufl->l_type) ||
	    __get_user(kfl->l_whence, &ufl->l_whence) ||
	    __get_user(kfl->l_start, &ufl->l_start) ||
	    __get_user(kfl->l_len, &ufl->l_len) ||
	    __get_user(kfl->l_pid, &ufl->l_pid))
		return -EFAULT;
	return 0;
}
#endif

#ifndef HAVE_ARCH_PUT_COMPAT_FLOCK64
static int put_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl)
{
	if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) ||
	    __put_user(kfl->l_type, &ufl->l_type) ||
	    __put_user(kfl->l_whence, &ufl->l_whence) ||
	    __put_user(kfl->l_start, &ufl->l_start) ||
	    __put_user(kfl->l_len, &ufl->l_len) ||
	    __put_user(kfl->l_pid, &ufl->l_pid))
		return -EFAULT;
	return 0;
}
#endif

asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
		unsigned long arg)
{
	mm_segment_t old_fs;
	struct flock f;
	long ret;

	switch (cmd) {
	case F_GETLK:
	case F_SETLK:
	case F_SETLKW:
		ret = get_compat_flock(&f, compat_ptr(arg));
		if (ret != 0)
			break;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		ret = sys_fcntl(fd, cmd, (unsigned long)&f);
		set_fs(old_fs);
		if (cmd == F_GETLK && ret == 0) {
			/* GETLK was successfule and we need to return the data...
			 * but it needs to fit in the compat structure.
			 * l_start shouldn't be too big, unless the original
			 * start + end is greater than COMPAT_OFF_T_MAX, in which
			 * case the app was asking for trouble, so we return
			 * -EOVERFLOW in that case.
			 * l_len could be too big, in which case we just truncate it,
			 * and only allow the app to see that part of the conflicting
			 * lock that might make sense to it anyway
			 */

			if (f.l_start > COMPAT_OFF_T_MAX)
				ret = -EOVERFLOW;
			if (f.l_len > COMPAT_OFF_T_MAX)
				f.l_len = COMPAT_OFF_T_MAX;
			if (ret == 0)
				ret = put_compat_flock(&f, compat_ptr(arg));
		}
		break;

	case F_GETLK64:
	case F_SETLK64:
	case F_SETLKW64:
		ret = get_compat_flock64(&f, compat_ptr(arg));
		if (ret != 0)
			break;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		ret = sys_fcntl(fd, (cmd == F_GETLK64) ? F_GETLK :
				((cmd == F_SETLK64) ? F_SETLK : F_SETLKW),
				(unsigned long)&f);
		set_fs(old_fs);
		if (cmd == F_GETLK64 && ret == 0) {
			/* need to return lock information - see above for commentary */
			if (f.l_start > COMPAT_LOFF_T_MAX)
				ret = -EOVERFLOW;
			if (f.l_len > COMPAT_LOFF_T_MAX)
				f.l_len = COMPAT_LOFF_T_MAX;
			if (ret == 0)
				ret = put_compat_flock64(&f, compat_ptr(arg));
		}
		break;

	default:
		ret = sys_fcntl(fd, cmd, arg);
		break;
	}
	return ret;
}

asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
		unsigned long arg)
{
	if ((cmd == F_GETLK64) || (cmd == F_SETLK64) || (cmd == F_SETLKW64))
		return -EINVAL;
	return compat_sys_fcntl64(fd, cmd, arg);
}

asmlinkage long
compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
{
	long ret;
	aio_context_t ctx64;

	mm_segment_t oldfs = get_fs();
	if (unlikely(get_user(ctx64, ctx32p)))
		return -EFAULT;

	set_fs(KERNEL_DS);
	/* The __user pointer cast is valid because of the set_fs() */
	ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
	set_fs(oldfs);
	/* truncating is ok because it's a user address */
	if (!ret)
		ret = put_user((u32) ctx64, ctx32p);
	return ret;
}

asmlinkage long
compat_sys_io_getevents(aio_context_t ctx_id,
				 unsigned long min_nr,
				 unsigned long nr,
				 struct io_event __user *events,
				 struct compat_timespec __user *timeout)
{
	long ret;
	struct timespec t;
	struct timespec __user *ut = NULL;

	ret = -EFAULT;
	if (unlikely(!access_ok(VERIFY_WRITE, events, 
				nr * sizeof(struct io_event))))
		goto out;
	if (timeout) {
		if (get_compat_timespec(&t, timeout))
			goto out;

		ut = compat_alloc_user_space(sizeof(*ut));
		if (copy_to_user(ut, &t, sizeof(t)) )
			goto out;
	} 
	ret = sys_io_getevents(ctx_id, min_nr, nr, events, ut);
out:
	return ret;
}

static inline long
copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
{
	compat_uptr_t uptr;
	int i;

	for (i = 0; i < nr; ++i) {
		if (get_user(uptr, ptr32 + i))
			return -EFAULT;
		if (put_user(compat_ptr(uptr), ptr64 + i))
			return -EFAULT;
	}
	return 0;
}

#define MAX_AIO_SUBMITS 	(PAGE_SIZE/sizeof(struct iocb *))

asmlinkage long
compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
{
	struct iocb __user * __user *iocb64; 
	long ret;

	if (unlikely(nr < 0))
		return -EINVAL;

	if (nr > MAX_AIO_SUBMITS)
		nr = MAX_AIO_SUBMITS;
	
	iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
	ret = copy_iocb(nr, iocb, iocb64);
	if (!ret)
		ret = sys_io_submit(ctx_id, nr, iocb64);
	return ret;
}

struct compat_ncp_mount_data {
	compat_int_t version;
	compat_uint_t ncp_fd;
	__compat_uid_t mounted_uid;
	compat_pid_t wdog_pid;
	unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
	compat_uint_t time_out;
	compat_uint_t retry_count;
	compat_uint_t flags;
	__compat_uid_t uid;
	__compat_gid_t gid;
	compat_mode_t file_mode;
	compat_mode_t dir_mode;
};

struct compat_ncp_mount_data_v4 {
	compat_int_t version;
	compat_ulong_t flags;
	compat_ulong_t mounted_uid;
	compat_long_t wdog_pid;
	compat_uint_t ncp_fd;
	compat_uint_t time_out;
	compat_uint_t retry_count;
	compat_ulong_t uid;
	compat_ulong_t gid;
	compat_ulong_t file_mode;
	compat_ulong_t dir_mode;
};

static void *do_ncp_super_data_conv(void *raw_data)
{
	int version = *(unsigned int *)raw_data;

	if (version == 3) {
		struct compat_ncp_mount_data *c_n = raw_data;
		struct ncp_mount_data *n = raw_data;

		n->dir_mode = c_n->dir_mode;
		n->file_mode = c_n->file_mode;
		n->gid = c_n->gid;
		n->uid = c_n->uid;
		memmove (n->mounted_vol, c_n->mounted_vol, (sizeof (c_n->mounted_vol) + 3 * sizeof (unsigned int)));
		n->wdog_pid = c_n->wdog_pid;
		n->mounted_uid = c_n->mounted_uid;
	} else if (version == 4) {
		struct compat_ncp_mount_data_v4 *c_n = raw_data;
		struct ncp_mount_data_v4 *n = raw_data;

		n->dir_mode = c_n->dir_mode;
		n->file_mode = c_n->file_mode;
		n->gid = c_n->gid;
		n->uid = c_n->uid;
		n->retry_count = c_n->retry_count;
		n->time_out = c_n->time_out;
		n->ncp_fd = c_n->ncp_fd;
		n->wdog_pid = c_n->wdog_pid;
		n->mounted_uid = c_n->mounted_uid;
		n->flags = c_n->flags;
	} else if (version != 5) {
		return NULL;
	}

	return raw_data;
}

struct compat_smb_mount_data {
	compat_int_t version;
	__compat_uid_t mounted_uid;
	__compat_uid_t uid;
	__compat_gid_t gid;
	compat_mode_t file_mode;
	compat_mode_t dir_mode;
};

static void *do_smb_super_data_conv(void *raw_data)
{
	struct smb_mount_data *s = raw_data;
	struct compat_smb_mount_data *c_s = raw_data;

	if (c_s->version != SMB_MOUNT_OLDVERSION)
		goto out;
	s->dir_mode = c_s->dir_mode;
	s->file_mode = c_s->file_mode;
	s->gid = c_s->gid;
	s->uid = c_s->uid;
	s->mounted_uid = c_s->mounted_uid;
 out:
	return raw_data;
}

struct compat_nfs_string {
	compat_uint_t len;
	compat_uptr_t data;
};

static inline void compat_nfs_string(struct nfs_string *dst,
				     struct compat_nfs_string *src)
{
	dst->data = compat_ptr(src->data);
	dst->len = src->len;
}

struct compat_nfs4_mount_data_v1 {
	compat_int_t version;
	compat_int_t flags;
	compat_int_t rsize;
	compat_int_t wsize;
	compat_int_t timeo;
	compat_int_t retrans;
	compat_int_t acregmin;
	compat_int_t acregmax;
	compat_int_t acdirmin;
	compat_int_t acdirmax;
	struct compat_nfs_string client_addr;
	struct compat_nfs_string mnt_path;
	struct compat_nfs_string hostname;
	compat_uint_t host_addrlen;
	compat_uptr_t host_addr;
	compat_int_t proto;
	compat_int_t auth_flavourlen;
	compat_uptr_t auth_flavours;
};

static int do_nfs4_super_data_conv(void *raw_data)
{
	int version = *(compat_uint_t *) raw_data;

	if (version == 1) {
		struct compat_nfs4_mount_data_v1 *raw = raw_data;
		struct nfs4_mount_data *real = raw_data;

		/* copy the fields backwards */
		real->auth_flavours = compat_ptr(raw->auth_flavours);
		real->auth_flavourlen = raw->auth_flavourlen;
		real->proto = raw->proto;
		real->host_addr = compat_ptr(raw->host_addr);
		real->host_addrlen = raw->host_addrlen;
		compat_nfs_string(&real->hostname, &raw->hostname);
		compat_nfs_string(&real->mnt_path, &raw->mnt_path);
		compat_nfs_string(&real->client_addr, &raw->client_addr);
		real->acdirmax = raw->acdirmax;
		real->acdirmin = raw->acdirmin;
		real->acregmax = raw->acregmax;
		real->acregmin = raw->acregmin;
		real->retrans = raw->retrans;
		real->timeo = raw->timeo;
		real->wsize = raw->wsize;
		real->rsize = raw->rsize;
		real->flags = raw->flags;
		real->version = raw->version;
	}

	return 0;
}

#define SMBFS_NAME      "smbfs"
#define NCPFS_NAME      "ncpfs"
#define NFS4_NAME	"nfs4"

asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
				 char __user * type, unsigned long flags,
				 void __user * data)
{
	unsigned long type_page;
	unsigned long data_page;
	unsigned long dev_page;
	char *dir_page;
	int retval;

	retval = copy_mount_options (type, &type_page);
	if (retval < 0)
		goto out;

	dir_page = getname(dir_name);
	retval = PTR_ERR(dir_page);
	if (IS_ERR(dir_page))
		goto out1;

	retval = copy_mount_options (dev_name, &dev_page);
	if (retval < 0)
		goto out2;

	retval = copy_mount_options (data, &data_page);
	if (retval < 0)
		goto out3;

	retval = -EINVAL;

	if (type_page && data_page) {
		if (!strcmp((char *)type_page, SMBFS_NAME)) {
			do_smb_super_data_conv((void *)data_page);
		} else if (!strcmp((char *)type_page, NCPFS_NAME)) {
			do_ncp_super_data_conv((void *)data_page);
		} else if (!strcmp((char *)type_page, NFS4_NAME)) {
			if (do_nfs4_super_data_conv((void *) data_page))
				goto out4;
		}
	}

	lock_kernel();
	retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
			flags, (void*)data_page);
	unlock_kernel();

 out4:
	free_page(data_page);
 out3:
	free_page(dev_page);
 out2:
	putname(dir_page);
 out1:
	free_page(type_page);
 out:
	return retval;
}

#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))

struct compat_old_linux_dirent {
	compat_ulong_t	d_ino;
	compat_ulong_t	d_offset;
	unsigned short	d_namlen;
	char		d_name[1];
};

struct compat_readdir_callback {
	struct compat_old_linux_dirent __user *dirent;
	int result;
};

static int compat_fillonedir(void *__buf, const char *name, int namlen,
			loff_t offset, u64 ino, unsigned int d_type)
{
	struct compat_readdir_callback *buf = __buf;
	struct compat_old_linux_dirent __user *dirent;
	compat_ulong_t d_ino;

	if (buf->result)
		return -EINVAL;
	d_ino = ino;
	if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
		return -EOVERFLOW;
	buf->result++;
	dirent = buf->dirent;
	if (!access_ok(VERIFY_WRITE, dirent,
			(unsigned long)(dirent->d_name + namlen + 1) -
				(unsigned long)dirent))
		goto efault;
	if (	__put_user(d_ino, &dirent->d_ino) ||
		__put_user(offset, &dirent->d_offset) ||
		__put_user(namlen, &dirent->d_namlen) ||
		__copy_to_user(dirent->d_name, name, namlen) ||
		__put_user(0, dirent->d_name + namlen))
		goto efault;
	return 0;
efault:
	buf->result = -EFAULT;
	return -EFAULT;
}

asmlinkage long compat_sys_old_readdir(unsigned int fd,
	struct compat_old_linux_dirent __user *dirent, unsigned int count)
{
	int error;
	struct file *file;
	struct compat_readdir_callback buf;

	error = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;

	buf.result = 0;
	buf.dirent = dirent;

	error = vfs_readdir(file, compat_fillonedir, &buf);
	if (error >= 0)
		error = buf.result;

	fput(file);
out:
	return error;
}

struct compat_linux_dirent {
	compat_ulong_t	d_ino;
	compat_ulong_t	d_off;
	unsigned short	d_reclen;
	char		d_name[1];
};

struct compat_getdents_callback {
	struct compat_linux_dirent __user *current_dir;
	struct compat_linux_dirent __user *previous;
	int count;
	int error;
};

static int compat_filldir(void *__buf, const char *name, int namlen,
		loff_t offset, u64 ino, unsigned int d_type)
{
	struct compat_linux_dirent __user * dirent;
	struct compat_getdents_callback *buf = __buf;
	compat_ulong_t d_ino;
	int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 2, sizeof(compat_long_t));

	buf->error = -EINVAL;	/* only used if we fail.. */
	if (reclen > buf->count)
		return -EINVAL;
	d_ino = ino;
	if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
		return -EOVERFLOW;
	dirent = buf->previous;
	if (dirent) {
		if (__put_user(offset, &dirent->d_off))
			goto efault;
	}
	dirent = buf->current_dir;
	if (__put_user(d_ino, &dirent->d_ino))
		goto efault;
	if (__put_user(reclen, &dirent->d_reclen))
		goto efault;
	if (copy_to_user(dirent->d_name, name, namlen))
		goto efault;
	if (__put_user(0, dirent->d_name + namlen))
		goto efault;
	if (__put_user(d_type, (char  __user *) dirent + reclen - 1))
		goto efault;
	buf->previous = dirent;
	dirent = (void __user *)dirent + reclen;
	buf->current_dir = dirent;
	buf->count -= reclen;
	return 0;
efault:
	buf->error = -EFAULT;
	return -EFAULT;
}

asmlinkage long compat_sys_getdents(unsigned int fd,
		struct compat_linux_dirent __user *dirent, unsigned int count)
{
	struct file * file;
	struct compat_linux_dirent __user * lastdirent;
	struct compat_getdents_callback buf;
	int error;

	error = -EFAULT;
	if (!access_ok(VERIFY_WRITE, dirent, count))
		goto out;

	error = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;

	buf.current_dir = dirent;
	buf.previous = NULL;
	buf.count = count;
	buf.error = 0;

	error = vfs_readdir(file, compat_filldir, &buf);
	if (error < 0)
		goto out_putf;
	error = buf.error;
	lastdirent = buf.previous;
	if (lastdirent) {
		if (put_user(file->f_pos, &lastdirent->d_off))
			error = -EFAULT;
		else
			error = count - buf.count;
	}

out_putf:
	fput(file);
out:
	return error;
}

#ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64

struct compat_getdents_callback64 {
	struct linux_dirent64 __user *current_dir;
	struct linux_dirent64 __user *previous;
	int count;
	int error;
};

static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t offset,
		     u64 ino, unsigned int d_type)
{
	struct linux_dirent64 __user *dirent;
	struct compat_getdents_callback64 *buf = __buf;
	int jj = NAME_OFFSET(dirent);
	int reclen = ALIGN(jj + namlen + 1, sizeof(u64));
	u64 off;

	buf->error = -EINVAL;	/* only used if we fail.. */
	if (reclen > buf->count)
		return -EINVAL;
	dirent = buf->previous;

	if (dirent) {
		if (__put_user_unaligned(offset, &dirent->d_off))
			goto efault;
	}
	dirent = buf->current_dir;
	if (__put_user_unaligned(ino, &dirent->d_ino))
		goto efault;
	off = 0;
	if (__put_user_unaligned(off, &dirent->d_off))
		goto efault;
	if (__put_user(reclen, &dirent->d_reclen))
		goto efault;
	if (__put_user(d_type, &dirent->d_type))
		goto efault;
	if (copy_to_user(dirent->d_name, name, namlen))
		goto efault;
	if (__put_user(0, dirent->d_name + namlen))
		goto efault;
	buf->previous = dirent;
	dirent = (void __user *)dirent + reclen;
	buf->current_dir = dirent;
	buf->count -= reclen;
	return 0;
efault:
	buf->error = -EFAULT;
	return -EFAULT;
}

asmlinkage long compat_sys_getdents64(unsigned int fd,
		struct linux_dirent64 __user * dirent, unsigned int count)
{
	struct file * file;
	struct linux_dirent64 __user * lastdirent;
	struct compat_getdents_callback64 buf;
	int error;

	error = -EFAULT;
	if (!access_ok(VERIFY_WRITE, dirent, count))
		goto out;

	error = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;

	buf.current_dir = dirent;
	buf.previous = NULL;
	buf.count = count;
	buf.error = 0;

	error = vfs_readdir(file, compat_filldir64, &buf);
	if (error < 0)
		goto out_putf;
	error = buf.error;
	lastdirent = buf.previous;
	if (lastdirent) {
		typeof(lastdirent->d_off) d_off = file->f_pos;
		error = -EFAULT;
		if (__put_user_unaligned(d_off, &lastdirent->d_off))
			goto out_putf;
		error = count - buf.count;
	}

out_putf:
	fput(file);
out:
	return error;
}
#endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */

static ssize_t compat_do_readv_writev(int type, struct file *file,
			       const struct compat_iovec __user *uvector,
			       unsigned long nr_segs, loff_t *pos)
{
	compat_ssize_t tot_len;
	struct iovec iovstack[UIO_FASTIOV];
	struct iovec *iov=iovstack, *vector;
	ssize_t ret;
	int seg;
	io_fn_t fn;
	iov_fn_t fnv;

	/*
	 * SuS says "The readv() function *may* fail if the iovcnt argument
	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
	 * traditionally returned zero for zero segments, so...
	 */
	ret = 0;
	if (nr_segs == 0)
		goto out;

	/*
	 * First get the "struct iovec" from user memory and
	 * verify all the pointers
	 */
	ret = -EINVAL;
	if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
		goto out;
	if (!file->f_op)
		goto out;
	if (nr_segs > UIO_FASTIOV) {
		ret = -ENOMEM;
		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
		if (!iov)
			goto out;
	}
	ret = -EFAULT;
	if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
		goto out;

	/*
	 * Single unix specification:
	 * We should -EINVAL if an element length is not >= 0 and fitting an
	 * ssize_t.  The total length is fitting an ssize_t
	 *
	 * Be careful here because iov_len is a size_t not an ssize_t
	 */
	tot_len = 0;
	vector = iov;
	ret = -EINVAL;
	for (seg = 0 ; seg < nr_segs; seg++) {
		compat_ssize_t tmp = tot_len;
		compat_ssize_t len;
		compat_uptr_t buf;

		if (__get_user(len, &uvector->iov_len) ||
		    __get_user(buf, &uvector->iov_base)) {
			ret = -EFAULT;
			goto out;
		}
		if (len < 0)	/* size_t not fitting an compat_ssize_t .. */
			goto out;
		tot_len += len;
		if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
			goto out;
		vector->iov_base = compat_ptr(buf);
		vector->iov_len = (compat_size_t) len;
		uvector++;
		vector++;
	}
	if (tot_len == 0) {
		ret = 0;
		goto out;
	}

	ret = rw_verify_area(type, file, pos, tot_len);
	if (ret < 0)
		goto out;

	fnv = NULL;
	if (type == READ) {
		fn = file->f_op->read;
		fnv = file->f_op->aio_read;
	} else {
		fn = (io_fn_t)file->f_op->write;
		fnv = file->f_op->aio_write;
	}

	if (fnv)
		ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
						pos, fnv);
	else
		ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);

out:
	if (iov != iovstack)
		kfree(iov);
	if ((ret + (type == READ)) > 0) {
		struct dentry *dentry = file->f_path.dentry;
		if (type == READ)
			fsnotify_access(dentry);
		else
			fsnotify_modify(dentry);
	}
	return ret;
}

asmlinkage ssize_t
compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen)
{
	struct file *file;
	ssize_t ret = -EBADF;

	file = fget(fd);
	if (!file)
		return -EBADF;

	if (!(file->f_mode & FMODE_READ))
		goto out;

	ret = -EINVAL;
	if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
		goto out;

	ret = compat_do_readv_writev(READ, file, vec, vlen, &file->f_pos);

out:
	fput(file);
	return ret;
}

asmlinkage ssize_t
compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen)
{
	struct file *file;
	ssize_t ret = -EBADF;

	file = fget(fd);
	if (!file)
		return -EBADF;
	if (!(file->f_mode & FMODE_WRITE))
		goto out;

	ret = -EINVAL;
	if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
		goto out;

	ret = compat_do_readv_writev(WRITE, file, vec, vlen, &file->f_pos);

out:
	fput(file);
	return ret;
}

asmlinkage long
compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32,
		    unsigned int nr_segs, unsigned int flags)
{
	unsigned i;
	struct iovec __user *iov;
	if (nr_segs > UIO_MAXIOV)
		return -EINVAL;
	iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
	for (i = 0; i < nr_segs; i++) {
		struct compat_iovec v;
		if (get_user(v.iov_base, &iov32[i].iov_base) ||
		    get_user(v.iov_len, &iov32[i].iov_len) ||
		    put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
		    put_user(v.iov_len, &iov[i].iov_len))
			return -EFAULT;
	}
	return sys_vmsplice(fd, iov, nr_segs, flags);
}

/*
 * Exactly like fs/open.c:sys_open(), except that it doesn't set the
 * O_LARGEFILE flag.
 */
asmlinkage long
compat_sys_open(const char __user *filename, int flags, int mode)
{
	return do_sys_open(AT_FDCWD, filename, flags, mode);
}

/*
 * Exactly like fs/open.c:sys_openat(), except that it doesn't set the
 * O_LARGEFILE flag.
 */
asmlinkage long
compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, int mode)
{
	return do_sys_open(dfd, filename, flags, mode);
}

/*
 * compat_count() counts the number of arguments/envelopes. It is basically
 * a copy of count() from fs/exec.c, except that it works with 32 bit argv
 * and envp pointers.
 */
static int compat_count(compat_uptr_t __user *argv, int max)
{
	int i = 0;

	if (argv != NULL) {
		for (;;) {
			compat_uptr_t p;

			if (get_user(p, argv))
				return -EFAULT;
			if (!p)
				break;
			argv++;
			if(++i > max)
				return -E2BIG;
		}
	}
	return i;
}

/*
 * compat_copy_strings() is basically a copy of copy_strings() from fs/exec.c
 * except that it works with 32 bit argv and envp pointers.
 */
static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
				struct linux_binprm *bprm)
{
	struct page *kmapped_page = NULL;
	char *kaddr = NULL;
	unsigned long kpos = 0;
	int ret;

	while (argc-- > 0) {
		compat_uptr_t str;
		int len;
		unsigned long pos;

		if (get_user(str, argv+argc) ||
		    !(len = strnlen_user(compat_ptr(str), MAX_ARG_STRLEN))) {
			ret = -EFAULT;
			goto out;
		}

		if (len > MAX_ARG_STRLEN) {
			ret = -E2BIG;
			goto out;
		}

		/* We're going to work our way backwords. */
		pos = bprm->p;
		str += len;
		bprm->p -= len;

		while (len > 0) {
			int offset, bytes_to_copy;

			offset = pos % PAGE_SIZE;
			if (offset == 0)
				offset = PAGE_SIZE;

			bytes_to_copy = offset;
			if (bytes_to_copy > len)
				bytes_to_copy = len;

			offset -= bytes_to_copy;
			pos -= bytes_to_copy;
			str -= bytes_to_copy;
			len -= bytes_to_copy;

			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
				struct page *page;

#ifdef CONFIG_STACK_GROWSUP
				ret = expand_stack_downwards(bprm->vma, pos);
				if (ret < 0) {
					/* We've exceed the stack rlimit. */
					ret = -E2BIG;
					goto out;
				}
#endif
				ret = get_user_pages(current, bprm->mm, pos,
						     1, 1, 1, &page, NULL);
				if (ret <= 0) {
					/* We've exceed the stack rlimit. */
					ret = -E2BIG;
					goto out;
				}

				if (kmapped_page) {
					flush_kernel_dcache_page(kmapped_page);
					kunmap(kmapped_page);
					put_page(kmapped_page);
				}
				kmapped_page = page;
				kaddr = kmap(kmapped_page);
				kpos = pos & PAGE_MASK;
				flush_cache_page(bprm->vma, kpos,
						 page_to_pfn(kmapped_page));
			}
			if (copy_from_user(kaddr+offset, compat_ptr(str),
						bytes_to_copy)) {
				ret = -EFAULT;
				goto out;
			}
		}
	}
	ret = 0;
out:
	if (kmapped_page) {
		flush_kernel_dcache_page(kmapped_page);
		kunmap(kmapped_page);
		put_page(kmapped_page);
	}
	return ret;
}

/*
 * compat_do_execve() is mostly a copy of do_execve(), with the exception
 * that it processes 32 bit argv and envp pointers.
 */
int compat_do_execve(char * filename,
	compat_uptr_t __user *argv,
	compat_uptr_t __user *envp,
	struct pt_regs * regs)
{
	struct linux_binprm *bprm;
	struct file *file;
	int retval;

	retval = -ENOMEM;
	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
	if (!bprm)
		goto out_ret;

	file = open_exec(filename);
	retval = PTR_ERR(file);
	if (IS_ERR(file))
		goto out_kfree;

	sched_exec();

	bprm->file = file;
	bprm->filename = filename;
	bprm->interp = filename;

	retval = bprm_mm_init(bprm);
	if (retval)
		goto out_file;

	bprm->argc = compat_count(argv, MAX_ARG_STRINGS);
	if ((retval = bprm->argc) < 0)
		goto out_mm;

	bprm->envc = compat_count(envp, MAX_ARG_STRINGS);
	if ((retval = bprm->envc) < 0)
		goto out_mm;

	retval = security_bprm_alloc(bprm);
	if (retval)
		goto out;

	retval = prepare_binprm(bprm);
	if (retval < 0)
		goto out;

	retval = copy_strings_kernel(1, &bprm->filename, bprm);
	if (retval < 0)
		goto out;

	bprm->exec = bprm->p;
	retval = compat_copy_strings(bprm->envc, envp, bprm);
	if (retval < 0)
		goto out;

	retval = compat_copy_strings(bprm->argc, argv, bprm);
	if (retval < 0)
		goto out;

	retval = search_binary_handler(bprm, regs);
	if (retval >= 0) {
		/* execve success */
		security_bprm_free(bprm);
		acct_update_integrals(current);
		free_bprm(bprm);
		return retval;
	}

out:
	if (bprm->security)
		security_bprm_free(bprm);

out_mm:
	if (bprm->mm)
		mmput(bprm->mm);

out_file:
	if (bprm->file) {
		allow_write_access(bprm->file);
		fput(bprm->file);
	}

out_kfree:
	free_bprm(bprm);

out_ret:
	return retval;
}

#define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))

/*
 * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
 * 64-bit unsigned longs.
 */
static
int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
			unsigned long *fdset)
{
	nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS);
	if (ufdset) {
		unsigned long odd;

		if (!access_ok(VERIFY_WRITE, ufdset, nr*sizeof(compat_ulong_t)))
			return -EFAULT;

		odd = nr & 1UL;
		nr &= ~1UL;
		while (nr) {
			unsigned long h, l;
			if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
				return -EFAULT;
			ufdset += 2;
			*fdset++ = h << 32 | l;
			nr -= 2;
		}
		if (odd && __get_user(*fdset, ufdset))
			return -EFAULT;
	} else {
		/* Tricky, must clear full unsigned long in the
		 * kernel fdset at the end, this makes sure that
		 * actually happens.
		 */
		memset(fdset, 0, ((nr + 1) & ~1)*sizeof(compat_ulong_t));
	}
	return 0;
}

static
int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
		      unsigned long *fdset)
{
	unsigned long odd;
	nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS);

	if (!ufdset)
		return 0;

	odd = nr & 1UL;
	nr &= ~1UL;
	while (nr) {
		unsigned long h, l;
		l = *fdset++;
		h = l >> 32;
		if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
			return -EFAULT;
		ufdset += 2;
		nr -= 2;
	}
	if (odd && __put_user(*fdset, ufdset))
		return -EFAULT;
	return 0;
}


/*
 * This is a virtual copy of sys_select from fs/select.c and probably
 * should be compared to it from time to time
 */

/*
 * We can actually return ERESTARTSYS instead of EINTR, but I'd
 * like to be certain this leads to no problems. So I return
 * EINTR just for safety.
 *
 * Update: ERESTARTSYS breaks at least the xview clock binary, so
 * I'm trying ERESTARTNOHAND which restart only when you want to.
 */
#define MAX_SELECT_SECONDS \
	((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)

int compat_core_sys_select(int n, compat_ulong_t __user *inp,
	compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout)
{
	fd_set_bits fds;
	void *bits;
	int size, max_fds, ret = -EINVAL;
	struct fdtable *fdt;
	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];

	if (n < 0)
		goto out_nofds;

	/* max_fds can increase, so grab it once to avoid race */
	rcu_read_lock();
	fdt = files_fdtable(current->files);
	max_fds = fdt->max_fds;
	rcu_read_unlock();
	if (n > max_fds)
		n = max_fds;

	/*
	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
	 * since we used fdset we need to allocate memory in units of
	 * long-words.
	 */
	size = FDS_BYTES(n);
	bits = stack_fds;
	if (size > sizeof(stack_fds) / 6) {
		bits = kmalloc(6 * size, GFP_KERNEL);
		ret = -ENOMEM;
		if (!bits)
			goto out_nofds;
	}
	fds.in      = (unsigned long *)  bits;
	fds.out     = (unsigned long *) (bits +   size);
	fds.ex      = (unsigned long *) (bits + 2*size);
	fds.res_in  = (unsigned long *) (bits + 3*size);
	fds.res_out = (unsigned long *) (bits + 4*size);
	fds.res_ex  = (unsigned long *) (bits + 5*size);

	if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
	    (ret = compat_get_fd_set(n, outp, fds.out)) ||
	    (ret = compat_get_fd_set(n, exp, fds.ex)))
		goto out;
	zero_fd_set(n, fds.res_in);
	zero_fd_set(n, fds.res_out);
	zero_fd_set(n, fds.res_ex);

	ret = do_select(n, &fds, timeout);

	if (ret < 0)
		goto out;
	if (!ret) {
		ret = -ERESTARTNOHAND;
		if (signal_pending(current))
			goto out;
		ret = 0;
	}

	if (compat_set_fd_set(n, inp, fds.res_in) ||
	    compat_set_fd_set(n, outp, fds.res_out) ||
	    compat_set_fd_set(n, exp, fds.res_ex))
		ret = -EFAULT;
out:
	if (bits != stack_fds)
		kfree(bits);
out_nofds:
	return ret;
}

asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
	struct compat_timeval __user *tvp)
{
	s64 timeout = -1;
	struct compat_timeval tv;
	int ret;

	if (tvp) {
		if (copy_from_user(&tv, tvp, sizeof(tv)))
			return -EFAULT;

		if (tv.tv_sec < 0 || tv.tv_usec < 0)
			return -EINVAL;

		/* Cast to u64 to make GCC stop complaining */
		if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
			timeout = -1;	/* infinite */
		else {
			timeout = DIV_ROUND_UP(tv.tv_usec, 1000000/HZ);
			timeout += tv.tv_sec * HZ;
		}
	}

	ret = compat_core_sys_select(n, inp, outp, exp, &timeout);

	if (tvp) {
		struct compat_timeval rtv;

		if (current->personality & STICKY_TIMEOUTS)
			goto sticky;
		rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
		rtv.tv_sec = timeout;
		if (compat_timeval_compare(&rtv, &tv) >= 0)
			rtv = tv;
		if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
sticky:
			/*
			 * If an application puts its timeval in read-only
			 * memory, we don't want the Linux-specific update to
			 * the timeval to cause a fault after the select has
			 * completed successfully. However, because we're not
			 * updating the timeval, we can't restart the system
			 * call.
			 */
			if (ret == -ERESTARTNOHAND)
				ret = -EINTR;
		}
	}

	return ret;
}

#ifdef HAVE_SET_RESTORE_SIGMASK
asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
	struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
	compat_size_t sigsetsize)
{
	compat_sigset_t ss32;
	sigset_t ksigmask, sigsaved;
	s64 timeout = MAX_SCHEDULE_TIMEOUT;
	struct compat_timespec ts;
	int ret;

	if (tsp) {
		if (copy_from_user(&ts, tsp, sizeof(ts)))
			return -EFAULT;

		if (ts.tv_sec < 0 || ts.tv_nsec < 0)
			return -EINVAL;
	}

	if (sigmask) {
		if (sigsetsize != sizeof(compat_sigset_t))
			return -EINVAL;
		if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
			return -EFAULT;
		sigset_from_compat(&ksigmask, &ss32);

		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
	}

	do {
		if (tsp) {
			if ((unsigned long)ts.tv_sec < MAX_SELECT_SECONDS) {
				timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ);
				timeout += ts.tv_sec * (unsigned long)HZ;
				ts.tv_sec = 0;
				ts.tv_nsec = 0;
			} else {
				ts.tv_sec -= MAX_SELECT_SECONDS;
				timeout = MAX_SELECT_SECONDS * HZ;
			}
		}

		ret = compat_core_sys_select(n, inp, outp, exp, &timeout);

	} while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));

	if (tsp) {
		struct compat_timespec rts;

		if (current->personality & STICKY_TIMEOUTS)
			goto sticky;

		rts.tv_sec = timeout / HZ;
		rts.tv_nsec = (timeout % HZ) * (NSEC_PER_SEC/HZ);
		if (rts.tv_nsec >= NSEC_PER_SEC) {
			rts.tv_sec++;
			rts.tv_nsec -= NSEC_PER_SEC;
		}
		if (compat_timespec_compare(&rts, &ts) >= 0)
			rts = ts;
		if (copy_to_user(tsp, &rts, sizeof(rts))) {
sticky:
			/*
			 * If an application puts its timeval in read-only
			 * memory, we don't want the Linux-specific update to
			 * the timeval to cause a fault after the select has
			 * completed successfully. However, because we're not
			 * updating the timeval, we can't restart the system
			 * call.
			 */
			if (ret == -ERESTARTNOHAND)
				ret = -EINTR;
		}
	}

	if (ret == -ERESTARTNOHAND) {
		/*
		 * Don't restore the signal mask yet. Let do_signal() deliver
		 * the signal on the way back to userspace, before the signal
		 * mask is restored.
		 */
		if (sigmask) {
			memcpy(&current->saved_sigmask, &sigsaved,
					sizeof(sigsaved));
			set_restore_sigmask();
		}
	} else if (sigmask)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	return ret;
}

asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
	struct compat_timespec __user *tsp, void __user *sig)
{
	compat_size_t sigsetsize = 0;
	compat_uptr_t up = 0;

	if (sig) {
		if (!access_ok(VERIFY_READ, sig,
				sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
		    	__get_user(up, (compat_uptr_t __user *)sig) ||
		    	__get_user(sigsetsize,
				(compat_size_t __user *)(sig+sizeof(up))))
			return -EFAULT;
	}
	return compat_sys_pselect7(n, inp, outp, exp, tsp, compat_ptr(up),
					sigsetsize);
}

asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
	unsigned int nfds, struct compat_timespec __user *tsp,
	const compat_sigset_t __user *sigmask, compat_size_t sigsetsize)
{
	compat_sigset_t ss32;
	sigset_t ksigmask, sigsaved;
	struct compat_timespec ts;
	s64 timeout = -1;
	int ret;

	if (tsp) {
		if (copy_from_user(&ts, tsp, sizeof(ts)))
			return -EFAULT;

		/* We assume that ts.tv_sec is always lower than
		   the number of seconds that can be expressed in
		   an s64. Otherwise the compiler bitches at us */
		timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ);
		timeout += ts.tv_sec * HZ;
	}

	if (sigmask) {
		if (sigsetsize != sizeof(compat_sigset_t))
			return -EINVAL;
		if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
			return -EFAULT;
		sigset_from_compat(&ksigmask, &ss32);

		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
	}

	ret = do_sys_poll(ufds, nfds, &timeout);

	/* We can restart this syscall, usually */
	if (ret == -EINTR) {
		/*
		 * Don't restore the signal mask yet. Let do_signal() deliver
		 * the signal on the way back to userspace, before the signal
		 * mask is restored.
		 */
		if (sigmask) {
			memcpy(&current->saved_sigmask, &sigsaved,
				sizeof(sigsaved));
			set_restore_sigmask();
		}
		ret = -ERESTARTNOHAND;
	} else if (sigmask)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	if (tsp && timeout >= 0) {
		struct compat_timespec rts;

		if (current->personality & STICKY_TIMEOUTS)
			goto sticky;
		/* Yes, we know it's actually an s64, but it's also positive. */
		rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
					1000;
		rts.tv_sec = timeout;
		if (compat_timespec_compare(&rts, &ts) >= 0)
			rts = ts;
		if (copy_to_user(tsp, &rts, sizeof(rts))) {
sticky:
			/*
			 * If an application puts its timeval in read-only
			 * memory, we don't want the Linux-specific update to
			 * the timeval to cause a fault after the select has
			 * completed successfully. However, because we're not
			 * updating the timeval, we can't restart the system
			 * call.
			 */
			if (ret == -ERESTARTNOHAND && timeout >= 0)
				ret = -EINTR;
		}
	}

	return ret;
}
#endif /* HAVE_SET_RESTORE_SIGMASK */

#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
/* Stuff for NFS server syscalls... */
struct compat_nfsctl_svc {
	u16			svc32_port;
	s32			svc32_nthreads;
};

struct compat_nfsctl_client {
	s8			cl32_ident[NFSCLNT_IDMAX+1];
	s32			cl32_naddr;
	struct in_addr		cl32_addrlist[NFSCLNT_ADDRMAX];
	s32			cl32_fhkeytype;
	s32			cl32_fhkeylen;
	u8			cl32_fhkey[NFSCLNT_KEYMAX];
};

struct compat_nfsctl_export {
	char		ex32_client[NFSCLNT_IDMAX+1];
	char		ex32_path[NFS_MAXPATHLEN+1];
	compat_dev_t	ex32_dev;
	compat_ino_t	ex32_ino;
	compat_int_t	ex32_flags;
	__compat_uid_t	ex32_anon_uid;
	__compat_gid_t	ex32_anon_gid;
};

struct compat_nfsctl_fdparm {
	struct sockaddr		gd32_addr;
	s8			gd32_path[NFS_MAXPATHLEN+1];
	compat_int_t		gd32_version;
};

struct compat_nfsctl_fsparm {
	struct sockaddr		gd32_addr;
	s8			gd32_path[NFS_MAXPATHLEN+1];
	compat_int_t		gd32_maxlen;
};

struct compat_nfsctl_arg {
	compat_int_t		ca32_version;	/* safeguard */
	union {
		struct compat_nfsctl_svc	u32_svc;
		struct compat_nfsctl_client	u32_client;
		struct compat_nfsctl_export	u32_export;
		struct compat_nfsctl_fdparm	u32_getfd;
		struct compat_nfsctl_fsparm	u32_getfs;
	} u;
#define ca32_svc	u.u32_svc
#define ca32_client	u.u32_client
#define ca32_export	u.u32_export
#define ca32_getfd	u.u32_getfd
#define ca32_getfs	u.u32_getfs
};

union compat_nfsctl_res {
	__u8			cr32_getfh[NFS_FHSIZE];
	struct knfsd_fh		cr32_getfs;
};

static int compat_nfs_svc_trans(struct nfsctl_arg *karg,
				struct compat_nfsctl_arg __user *arg)
{
	if (!access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)) ||
		get_user(karg->ca_version, &arg->ca32_version) ||
		__get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port) ||
		__get_user(karg->ca_svc.svc_nthreads,
				&arg->ca32_svc.svc32_nthreads))
		return -EFAULT;
	return 0;
}

static int compat_nfs_clnt_trans(struct nfsctl_arg *karg,
				struct compat_nfsctl_arg __user *arg)
{
	if (!access_ok(VERIFY_READ, &arg->ca32_client,
			sizeof(arg->ca32_client)) ||
		get_user(karg->ca_version, &arg->ca32_version) ||
		__copy_from_user(&karg->ca_client.cl_ident[0],
				&arg->ca32_client.cl32_ident[0],
				NFSCLNT_IDMAX) ||
		__get_user(karg->ca_client.cl_naddr,
				&arg->ca32_client.cl32_naddr) ||
		__copy_from_user(&karg->ca_client.cl_addrlist[0],
				&arg->ca32_client.cl32_addrlist[0],
				(sizeof(struct in_addr) * NFSCLNT_ADDRMAX)) ||
		__get_user(karg->ca_client.cl_fhkeytype,
				&arg->ca32_client.cl32_fhkeytype) ||
		__get_user(karg->ca_client.cl_fhkeylen,
				&arg->ca32_client.cl32_fhkeylen) ||
		__copy_from_user(&karg->ca_client.cl_fhkey[0],
				&arg->ca32_client.cl32_fhkey[0],
				NFSCLNT_KEYMAX))
		return -EFAULT;

	return 0;
}

static int compat_nfs_exp_trans(struct nfsctl_arg *karg,
				struct compat_nfsctl_arg __user *arg)
{
	if (!access_ok(VERIFY_READ, &arg->ca32_export,
				sizeof(arg->ca32_export)) ||
		get_user(karg->ca_version, &arg->ca32_version) ||
		__copy_from_user(&karg->ca_export.ex_client[0],
				&arg->ca32_export.ex32_client[0],
				NFSCLNT_IDMAX) ||
		__copy_from_user(&karg->ca_export.ex_path[0],
				&arg->ca32_export.ex32_path[0],
				NFS_MAXPATHLEN) ||
		__get_user(karg->ca_export.ex_dev,
				&arg->ca32_export.ex32_dev) ||
		__get_user(karg->ca_export.ex_ino,
				&arg->ca32_export.ex32_ino) ||
		__get_user(karg->ca_export.ex_flags,
				&arg->ca32_export.ex32_flags) ||
		__get_user(karg->ca_export.ex_anon_uid,
				&arg->ca32_export.ex32_anon_uid) ||
		__get_user(karg->ca_export.ex_anon_gid,
				&arg->ca32_export.ex32_anon_gid))
		return -EFAULT;
	SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid);
	SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid);

	return 0;
}

static int compat_nfs_getfd_trans(struct nfsctl_arg *karg,
				struct compat_nfsctl_arg __user *arg)
{
	if (!access_ok(VERIFY_READ, &arg->ca32_getfd,
			sizeof(arg->ca32_getfd)) ||
		get_user(karg->ca_version, &arg->ca32_version) ||
		__copy_from_user(&karg->ca_getfd.gd_addr,
				&arg->ca32_getfd.gd32_addr,
				(sizeof(struct sockaddr))) ||
		__copy_from_user(&karg->ca_getfd.gd_path,
				&arg->ca32_getfd.gd32_path,
				(NFS_MAXPATHLEN+1)) ||
		__get_user(karg->ca_getfd.gd_version,
				&arg->ca32_getfd.gd32_version))
		return -EFAULT;

	return 0;
}

static int compat_nfs_getfs_trans(struct nfsctl_arg *karg,
				struct compat_nfsctl_arg __user *arg)
{
	if (!access_ok(VERIFY_READ,&arg->ca32_getfs,sizeof(arg->ca32_getfs)) ||
		get_user(karg->ca_version, &arg->ca32_version) ||
		__copy_from_user(&karg->ca_getfs.gd_addr,
				&arg->ca32_getfs.gd32_addr,
				(sizeof(struct sockaddr))) ||
		__copy_from_user(&karg->ca_getfs.gd_path,
				&arg->ca32_getfs.gd32_path,
				(NFS_MAXPATHLEN+1)) ||
		__get_user(karg->ca_getfs.gd_maxlen,
				&arg->ca32_getfs.gd32_maxlen))
		return -EFAULT;

	return 0;
}

/* This really doesn't need translations, we are only passing
 * back a union which contains opaque nfs file handle data.
 */
static int compat_nfs_getfh_res_trans(union nfsctl_res *kres,
				union compat_nfsctl_res __user *res)
{
	int err;

	err = copy_to_user(res, kres, sizeof(*res));

	return (err) ? -EFAULT : 0;
}

asmlinkage long compat_sys_nfsservctl(int cmd,
				struct compat_nfsctl_arg __user *arg,
				union compat_nfsctl_res __user *res)
{
	struct nfsctl_arg *karg;
	union nfsctl_res *kres;
	mm_segment_t oldfs;
	int err;

	karg = kmalloc(sizeof(*karg), GFP_USER);
	kres = kmalloc(sizeof(*kres), GFP_USER);
	if(!karg || !kres) {
		err = -ENOMEM;
		goto done;
	}

	switch(cmd) {
	case NFSCTL_SVC:
		err = compat_nfs_svc_trans(karg, arg);
		break;

	case NFSCTL_ADDCLIENT:
		err = compat_nfs_clnt_trans(karg, arg);
		break;

	case NFSCTL_DELCLIENT:
		err = compat_nfs_clnt_trans(karg, arg);
		break;

	case NFSCTL_EXPORT:
	case NFSCTL_UNEXPORT:
		err = compat_nfs_exp_trans(karg, arg);
		break;

	case NFSCTL_GETFD:
		err = compat_nfs_getfd_trans(karg, arg);
		break;

	case NFSCTL_GETFS:
		err = compat_nfs_getfs_trans(karg, arg);
		break;

	default:
		err = -EINVAL;
		break;
	}

	if (err)
		goto done;

	oldfs = get_fs();
	set_fs(KERNEL_DS);
	/* The __user pointer casts are valid because of the set_fs() */
	err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
	set_fs(oldfs);

	if (err)
		goto done;

	if((cmd == NFSCTL_GETFD) ||
	   (cmd == NFSCTL_GETFS))
		err = compat_nfs_getfh_res_trans(kres, res);

done:
	kfree(karg);
	kfree(kres);
	return err;
}
#else /* !NFSD */
long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
{
	return sys_ni_syscall();
}
#endif

#ifdef CONFIG_EPOLL

#ifdef HAVE_SET_RESTORE_SIGMASK
asmlinkage long compat_sys_epoll_pwait(int epfd,
			struct compat_epoll_event __user *events,
			int maxevents, int timeout,
			const compat_sigset_t __user *sigmask,
			compat_size_t sigsetsize)
{
	long err;
	compat_sigset_t csigmask;
	sigset_t ksigmask, sigsaved;

	/*
	 * If the caller wants a certain signal mask to be set during the wait,
	 * we apply it here.
	 */
	if (sigmask) {
		if (sigsetsize != sizeof(compat_sigset_t))
			return -EINVAL;
		if (copy_from_user(&csigmask, sigmask, sizeof(csigmask)))
			return -EFAULT;
		sigset_from_compat(&ksigmask, &csigmask);
		sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
	}

	err = sys_epoll_wait(epfd, events, maxevents, timeout);

	/*
	 * If we changed the signal mask, we need to restore the original one.
	 * In case we've got a signal while waiting, we do not restore the
	 * signal mask yet, and we allow do_signal() to deliver the signal on
	 * the way back to userspace, before the signal mask is restored.
	 */
	if (sigmask) {
		if (err == -EINTR) {
			memcpy(&current->saved_sigmask, &sigsaved,
			       sizeof(sigsaved));
			set_restore_sigmask();
		} else
			sigprocmask(SIG_SETMASK, &sigsaved, NULL);
	}

	return err;
}
#endif /* HAVE_SET_RESTORE_SIGMASK */

#endif /* CONFIG_EPOLL */

#ifdef CONFIG_SIGNALFD

asmlinkage long compat_sys_signalfd(int ufd,
				    const compat_sigset_t __user *sigmask,
				    compat_size_t sigsetsize)
{
	compat_sigset_t ss32;
	sigset_t tmp;
	sigset_t __user *ksigmask;

	if (sigsetsize != sizeof(compat_sigset_t))
		return -EINVAL;
	if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
		return -EFAULT;
	sigset_from_compat(&tmp, &ss32);
	ksigmask = compat_alloc_user_space(sizeof(sigset_t));
	if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t)))
		return -EFAULT;

	return sys_signalfd(ufd, ksigmask, sizeof(sigset_t));
}

#endif /* CONFIG_SIGNALFD */

#ifdef CONFIG_TIMERFD

asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
				   const struct compat_itimerspec __user *utmr,
				   struct compat_itimerspec __user *otmr)
{
	int error;
	struct itimerspec t;
	struct itimerspec __user *ut;

	if (get_compat_itimerspec(&t, utmr))
		return -EFAULT;
	ut = compat_alloc_user_space(2 * sizeof(struct itimerspec));
	if (copy_to_user(&ut[0], &t, sizeof(t)))
		return -EFAULT;
	error = sys_timerfd_settime(ufd, flags, &ut[0], &ut[1]);
	if (!error && otmr)
		error = (copy_from_user(&t, &ut[1], sizeof(struct itimerspec)) ||
			 put_compat_itimerspec(otmr, &t)) ? -EFAULT: 0;

	return error;
}

asmlinkage long compat_sys_timerfd_gettime(int ufd,
				   struct compat_itimerspec __user *otmr)
{
	int error;
	struct itimerspec t;
	struct itimerspec __user *ut;

	ut = compat_alloc_user_space(sizeof(struct itimerspec));
	error = sys_timerfd_gettime(ufd, ut);
	if (!error)
		error = (copy_from_user(&t, ut, sizeof(struct itimerspec)) ||
			 put_compat_itimerspec(otmr, &t)) ? -EFAULT: 0;

	return error;
}

#endif /* CONFIG_TIMERFD */
class="hl kwb">void idle_balance(int cpu, struct rq *rq) { } #endif static inline int wake_priority_sleeper(struct rq *rq) { int ret = 0; #ifdef CONFIG_SCHED_SMT spin_lock(&rq->lock); /* * If an SMT sibling task has been put to sleep for priority * reasons reschedule the idle task to see if it can now run. */ if (rq->nr_running) { resched_task(rq->idle); ret = 1; } spin_unlock(&rq->lock); #endif return ret; } DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); /* * This is called on clock ticks and on context switches. * Bank in p->sched_time the ns elapsed since the last tick or switch. */ static inline void update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) { p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); } /* * Return current->sched_time plus any more ns on the sched_clock * that have not yet been banked. */ unsigned long long current_sched_time(const struct task_struct *p) { unsigned long long ns; unsigned long flags; local_irq_save(flags); ns = max(p->timestamp, task_rq(p)->timestamp_last_tick); ns = p->sched_time + sched_clock() - ns; local_irq_restore(flags); return ns; } /* * We place interactive tasks back into the active array, if possible. * * To guarantee that this does not starve expired tasks we ignore the * interactivity of a task if the first expired task had to wait more * than a 'reasonable' amount of time. This deadline timeout is * load-dependent, as the frequency of array switched decreases with * increasing number of running tasks. We also ignore the interactivity * if a better static_prio task has expired: */ static inline int expired_starving(struct rq *rq) { if (rq->curr->static_prio > rq->best_expired_prio) return 1; if (!STARVATION_LIMIT || !rq->expired_timestamp) return 0; if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running) return 1; return 0; } /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in user space since the last update */ void account_user_time(struct task_struct *p, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp; p->utime = cputime_add(p->utime, cputime); /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (TASK_NICE(p) > 0) cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); } /* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in kernel space since the last update */ void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct rq *rq = this_rq(); cputime64_t tmp; p->stime = cputime_add(p->stime, cputime); /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else if (p != rq->idle) cpustat->system = cputime64_add(cpustat->system, tmp); else if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); /* Account for system time used */ acct_update_integrals(p); } /* * Account for involuntary wait time. * @p: the process from which the cpu time has been stolen * @steal: the cpu time spent in involuntary wait */ void account_steal_time(struct task_struct *p, cputime_t steal) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp = cputime_to_cputime64(steal); struct rq *rq = this_rq(); if (p == rq->idle) { p->stime = cputime_add(p->stime, steal); if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); } else cpustat->steal = cputime64_add(cpustat->steal, tmp); } /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. * * It also gets called by the fork code, when changing the parent's * timeslices. */ void scheduler_tick(void) { unsigned long long now = sched_clock(); struct task_struct *p = current; int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); update_cpu_clock(p, rq, now); rq->timestamp_last_tick = now; if (p == rq->idle) { if (wake_priority_sleeper(rq)) goto out; rebalance_tick(cpu, rq, SCHED_IDLE); return; } /* Task might have expired already, but not scheduled off yet */ if (p->array != rq->active) { set_tsk_need_resched(p); goto out; } spin_lock(&rq->lock); /* * The task was running during this tick - update the * time slice counter. Note: we do not update a thread's * priority until it either goes to sleep or uses up its * timeslice. This makes it possible for interactive tasks * to use up their timeslices at their highest priority levels. */ if (rt_task(p)) { /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. */ if ((p->policy == SCHED_RR) && !--p->time_slice) { p->time_slice = task_timeslice(p); p->first_time_slice = 0; set_tsk_need_resched(p); /* put it at the end of the queue: */ requeue_task(p, rq->active); } goto out_unlock; } if (!--p->time_slice) { dequeue_task(p, rq->active); set_tsk_need_resched(p); p->prio = effective_prio(p); p->time_slice = task_timeslice(p); p->first_time_slice = 0; if (!rq->expired_timestamp) rq->expired_timestamp = jiffies; if (!TASK_INTERACTIVE(p) || expired_starving(rq)) { enqueue_task(p, rq->expired); if (p->static_prio < rq->best_expired_prio) rq->best_expired_prio = p->static_prio; } else enqueue_task(p, rq->active); } else { /* * Prevent a too long timeslice allowing a task to monopolize * the CPU. We do this by splitting up the timeslice into * smaller pieces. * * Note: this does not mean the task's timeslices expire or * get lost in any way, they just might be preempted by * another task of equal priority. (one with higher * priority would have preempted this task already.) We * requeue this task to the end of the list on this priority * level, which is in essence a round-robin of tasks with * equal priority. * * This only applies to tasks in the interactive * delta range with at least TIMESLICE_GRANULARITY to requeue. */ if (TASK_INTERACTIVE(p) && !((task_timeslice(p) - p->time_slice) % TIMESLICE_GRANULARITY(p)) && (p->time_slice >= TIMESLICE_GRANULARITY(p)) && (p->array == rq->active)) { requeue_task(p, rq->active); set_tsk_need_resched(p); } } out_unlock: spin_unlock(&rq->lock); out: rebalance_tick(cpu, rq, NOT_IDLE); } #ifdef CONFIG_SCHED_SMT static inline void wakeup_busy_runqueue(struct rq *rq) { /* If an SMT runqueue is sleeping due to priority reasons wake it up */ if (rq->curr == rq->idle && rq->nr_running) resched_task(rq->idle); } /* * Called with interrupt disabled and this_rq's runqueue locked. */ static void wake_sleeping_dependent(int this_cpu) { struct sched_domain *tmp, *sd = NULL; int i; for_each_domain(this_cpu, tmp) { if (tmp->flags & SD_SHARE_CPUPOWER) { sd = tmp; break; } } if (!sd) return; for_each_cpu_mask(i, sd->span) { struct rq *smt_rq = cpu_rq(i); if (i == this_cpu) continue; if (unlikely(!spin_trylock(&smt_rq->lock))) continue; wakeup_busy_runqueue(smt_rq); spin_unlock(&smt_rq->lock); } } /* * number of 'lost' timeslices this task wont be able to fully * utilize, if another task runs on a sibling. This models the * slowdown effect of other tasks running on siblings: */ static inline unsigned long smt_slice(struct task_struct *p, struct sched_domain *sd) { return p->time_slice * (100 - sd->per_cpu_gain) / 100; } /* * To minimise lock contention and not have to drop this_rq's runlock we only * trylock the sibling runqueues and bypass those runqueues if we fail to * acquire their lock. As we only trylock the normal locking order does not * need to be obeyed. */ static int dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) { struct sched_domain *tmp, *sd = NULL; int ret = 0, i; /* kernel/rt threads do not participate in dependent sleeping */ if (!p->mm || rt_task(p)) return 0; for_each_domain(this_cpu, tmp) { if (tmp->flags & SD_SHARE_CPUPOWER) { sd = tmp; break; } } if (!sd) return 0; for_each_cpu_mask(i, sd->span) { struct task_struct *smt_curr; struct rq *smt_rq; if (i == this_cpu) continue; smt_rq = cpu_rq(i); if (unlikely(!spin_trylock(&smt_rq->lock))) continue; smt_curr = smt_rq->curr; if (!smt_curr->mm) goto unlock; /* * If a user task with lower static priority than the * running task on the SMT sibling is trying to schedule, * delay it till there is proportionately less timeslice * left of the sibling task to prevent a lower priority * task from using an unfair proportion of the * physical cpu's resources. -ck */ if (rt_task(smt_curr)) { /* * With real time tasks we run non-rt tasks only * per_cpu_gain% of the time. */ if ((jiffies % DEF_TIMESLICE) > (sd->per_cpu_gain * DEF_TIMESLICE / 100)) ret = 1; } else { if (smt_curr->static_prio < p->static_prio && !TASK_PREEMPTS_CURR(p, smt_rq) && smt_slice(smt_curr, sd) > task_timeslice(p)) ret = 1; } unlock: spin_unlock(&smt_rq->lock); } return ret; } #else static inline void wake_sleeping_dependent(int this_cpu) { } static inline int dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) { return 0; } #endif #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) void fastcall add_preempt_count(int val) { /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) return; preempt_count() += val; /* * Spinlock count overflowing soon? */ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); } EXPORT_SYMBOL(add_preempt_count); void fastcall sub_preempt_count(int val) { /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) return; /* * Is the spinlock portion underflowing? */ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK))) return; preempt_count() -= val; } EXPORT_SYMBOL(sub_preempt_count); #endif static inline int interactive_sleep(enum sleep_type sleep_type) { return (sleep_type == SLEEP_INTERACTIVE || sleep_type == SLEEP_INTERRUPTED); } /* * schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) { struct task_struct *prev, *next; struct prio_array *array; struct list_head *queue; unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; long *switch_count; struct rq *rq; /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ if (unlikely(in_atomic() && !current->exit_state)) { printk(KERN_ERR "BUG: scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); dump_stack(); } profile_hit(SCHED_PROFILING, __builtin_return_address(0)); need_resched: preempt_disable(); prev = current; release_kernel_lock(prev); need_resched_nonpreemptible: rq = this_rq(); /* * The idle thread is not allowed to schedule! * Remove this check after it has been exercised a bit. */ if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) { printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); } schedstat_inc(rq, sched_cnt); now = sched_clock(); if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) { run_time = now - prev->timestamp; if (unlikely((long long)(now - prev->timestamp) < 0)) run_time = 0; } else run_time = NS_MAX_SLEEP_AVG; /* * Tasks charged proportionately less run_time at high sleep_avg to * delay them losing their interactive status */ run_time /= (CURRENT_BONUS(prev) ? : 1); spin_lock_irq(&rq->lock); if (unlikely(prev->flags & PF_DEAD)) prev->state = EXIT_DEAD; switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { switch_count = &prev->nvcsw; if (unlikely((prev->state & TASK_INTERRUPTIBLE) && unlikely(signal_pending(prev)))) prev->state = TASK_RUNNING; else { if (prev->state == TASK_UNINTERRUPTIBLE) rq->nr_uninterruptible++; deactivate_task(prev, rq); } } cpu = smp_processor_id(); if (unlikely(!rq->nr_running)) { idle_balance(cpu, rq); if (!rq->nr_running) { next = rq->idle; rq->expired_timestamp = 0; wake_sleeping_dependent(cpu); goto switch_tasks; } } array = rq->active; if (unlikely(!array->nr_active)) { /* * Switch the active and expired arrays. */ schedstat_inc(rq, sched_switch); rq->active = rq->expired; rq->expired = array; array = rq->active; rq->expired_timestamp = 0; rq->best_expired_prio = MAX_PRIO; } idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; next = list_entry(queue->next, struct task_struct, run_list); if (!rt_task(next) && interactive_sleep(next->sleep_type)) { unsigned long long delta = now - next->timestamp; if (unlikely((long long)(now - next->timestamp) < 0)) delta = 0; if (next->sleep_type == SLEEP_INTERACTIVE) delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; array = next->array; new_prio = recalc_task_prio(next, next->timestamp + delta); if (unlikely(next->prio != new_prio)) { dequeue_task(next, array); next->prio = new_prio; enqueue_task(next, array); } } next->sleep_type = SLEEP_NORMAL; if (dependent_sleeper(cpu, rq, next)) next = rq->idle; switch_tasks: if (next == rq->idle) schedstat_inc(rq, sched_goidle); prefetch(next); prefetch_stack(next); clear_tsk_need_resched(prev); rcu_qsctr_inc(task_cpu(prev)); update_cpu_clock(prev, rq, now); prev->sleep_avg -= run_time; if ((long)prev->sleep_avg <= 0) prev->sleep_avg = 0; prev->timestamp = prev->last_ran = now; sched_info_switch(prev, next); if (likely(prev != next)) { next->timestamp = now; rq->nr_switches++; rq->curr = next; ++*switch_count; prepare_task_switch(rq, next); prev = context_switch(rq, prev, next); barrier(); /* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack * frame will be invalid. */ finish_task_switch(this_rq(), prev); } else spin_unlock_irq(&rq->lock); prev = current; if (unlikely(reacquire_kernel_lock(prev) < 0)) goto need_resched_nonpreemptible; preempt_enable_no_resched(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } EXPORT_SYMBOL(schedule); #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); #ifdef CONFIG_PREEMPT_BKL struct task_struct *task = current; int saved_lock_depth; #endif /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. */ if (unlikely(ti->preempt_count || irqs_disabled())) return; need_resched: add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt * auto-release the semaphore: */ #ifdef CONFIG_PREEMPT_BKL saved_lock_depth = task->lock_depth; task->lock_depth = -1; #endif schedule(); #ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } EXPORT_SYMBOL(preempt_schedule); /* * this is the entry point to schedule() from kernel preemption * off of irq context. * Note, that this is called and return with irqs disabled. This will * protect us against recursive calling from irq. */ asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); #ifdef CONFIG_PREEMPT_BKL struct task_struct *task = current; int saved_lock_depth; #endif /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); need_resched: add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt * auto-release the semaphore: */ #ifdef CONFIG_PREEMPT_BKL saved_lock_depth = task->lock_depth; task->lock_depth = -1; #endif local_irq_enable(); schedule(); local_irq_disable(); #ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } #endif /* CONFIG_PREEMPT */ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { return try_to_wake_up(curr->private, mode, sync); } EXPORT_SYMBOL(default_wake_function); /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync, void *key) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, &q->task_list) { wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); unsigned flags = curr->flags; if (curr->func(curr, mode, sync, key) && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; } } /** * __wake_up - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function */ void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, 0, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { __wake_up_common(q, mode, 1, 0, NULL); } /** * __wake_up_sync - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. */ void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; int sync = 1; if (unlikely(!q)) return; if (unlikely(!nr_exclusive)) sync = 0; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, sync, NULL); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ void fastcall complete(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done++; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); void fastcall complete_all(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); void fastcall __sched wait_for_completion(struct completion *x) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); schedule(); spin_lock_irq(&x->wait.lock); } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; spin_unlock_irq(&x->wait.lock); } EXPORT_SYMBOL(wait_for_completion); unsigned long fastcall __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); int fastcall __sched wait_for_completion_interruptible(struct completion *x) { int ret = 0; might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { if (signal_pending(current)) { ret = -ERESTARTSYS; __remove_wait_queue(&x->wait, &wait); goto out; } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); schedule(); spin_lock_irq(&x->wait.lock); } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return ret; } EXPORT_SYMBOL(wait_for_completion_interruptible); unsigned long fastcall __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { if (signal_pending(current)) { timeout = -ERESTARTSYS; __remove_wait_queue(&x->wait, &wait); goto out; } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); #define SLEEP_ON_VAR \ unsigned long flags; \ wait_queue_t wait; \ init_waitqueue_entry(&wait, current); #define SLEEP_ON_HEAD \ spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ spin_unlock(&q->lock); #define SLEEP_ON_TAIL \ spin_lock_irq(&q->lock); \ __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD schedule(); SLEEP_ON_TAIL } EXPORT_SYMBOL(interruptible_sleep_on); long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD timeout = schedule_timeout(timeout); SLEEP_ON_TAIL return timeout; } EXPORT_SYMBOL(interruptible_sleep_on_timeout); void fastcall __sched sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD schedule(); SLEEP_ON_TAIL } EXPORT_SYMBOL(sleep_on); long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD timeout = schedule_timeout(timeout); SLEEP_ON_TAIL return timeout; } EXPORT_SYMBOL(sleep_on_timeout); #ifdef CONFIG_RT_MUTEXES /* * rt_mutex_setprio - set the current priority of a task * @p: task * @prio: prio value (kernel-internal form) * * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * * Used by the rt_mutex code to implement priority inheritance logic. */ void rt_mutex_setprio(struct task_struct *p, int prio) { struct prio_array *array; unsigned long flags; struct rq *rq; int oldprio; BUG_ON(prio < 0 || prio > MAX_PRIO); rq = task_rq_lock(p, &flags); oldprio = p->prio; array = p->array; if (array) dequeue_task(p, array); p->prio = prio; if (array) { /* * If changing to an RT priority then queue it * in the active array! */ if (rt_task(p)) array = rq->active; enqueue_task(p, array); /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ if (task_running(rq, p)) { if (p->prio > oldprio) resched_task(rq->curr); } else if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } task_rq_unlock(rq, &flags); } #endif void set_user_nice(struct task_struct *p, long nice) { struct prio_array *array; int old_prio, delta; unsigned long flags; struct rq *rq; if (TASK_NICE(p) == nice || nice < -20 || nice > 19) return; /* * We have to be careful, if called from sys_setpriority(), * the task might be in the middle of scheduling on another CPU. */ rq = task_rq_lock(p, &flags); /* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected * it wont have any effect on scheduling until the task is * not SCHED_NORMAL/SCHED_BATCH: */ if (has_rt_policy(p)) { p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } array = p->array; if (array) { dequeue_task(p, array); dec_raw_weighted_load(rq, p); } p->static_prio = NICE_TO_PRIO(nice); set_load_weight(p); old_prio = p->prio; p->prio = effective_prio(p); delta = p->prio - old_prio; if (array) { enqueue_task(p, array); inc_raw_weighted_load(rq, p); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: */ if (delta < 0 || (delta > 0 && task_running(rq, p))) resched_task(rq->curr); } out_unlock: task_rq_unlock(rq, &flags); } EXPORT_SYMBOL(set_user_nice); /* * can_nice - check if a task can reduce its nice value * @p: task * @nice: nice value */ int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || capable(CAP_SYS_NICE)); } #ifdef __ARCH_WANT_SYS_NICE /* * sys_nice - change the priority of the current process. * @increment: priority increment * * sys_setpriority is a more generic, but much slower function that * does similar things. */ asmlinkage long sys_nice(int increment) { long nice, retval; /* * Setpriority might change our priority at the same moment. * We don't have to worry. Conceptually one call occurs first * and we have a single winner. */ if (increment < -40) increment = -40; if (increment > 40) increment = 40; nice = PRIO_TO_NICE(current->static_prio) + increment; if (nice < -20) nice = -20; if (nice > 19) nice = 19; if (increment < 0 && !can_nice(current, nice)) return -EPERM; retval = security_task_setnice(current, nice); if (retval) return retval; set_user_nice(current, nice); return 0; } #endif /** * task_prio - return the priority value of a given task. * @p: the task in question. * * This is the priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } /** * task_nice - return the nice value of a given task. * @p: the task in question. */ int task_nice(const struct task_struct *p) { return TASK_NICE(p); } EXPORT_SYMBOL_GPL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. */ int idle_cpu(int cpu) { return cpu_curr(cpu) == cpu_rq(cpu)->idle; } /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } /* Actually do priority change: must hold rq lock. */ static void __setscheduler(struct task_struct *p, int policy, int prio) { BUG_ON(p->array); p->policy = policy; p->rt_priority = prio; p->normal_prio = normal_prio(p); /* we are holding p->pi_lock already */ p->prio = rt_mutex_getprio(p); /* * SCHED_BATCH tasks are treated as perpetual CPU hogs: */ if (policy == SCHED_BATCH) p->sleep_avg = 0; set_load_weight(p); } /** * sched_setscheduler - change the scheduling policy and/or RT priority of * a thread. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. */ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) { int retval, oldprio, oldpolicy = -1; struct prio_array *array; unsigned long flags; struct rq *rq; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); recheck: /* double check policy once rq lock held */ if (policy < 0) policy = oldpolicy = p->policy; else if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_NORMAL && policy != SCHED_BATCH) return -EINVAL; /* * Valid priorities for SCHED_FIFO and SCHED_RR are * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and * SCHED_BATCH is 0. */ if (param->sched_priority < 0 || (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) return -EINVAL; if ((policy == SCHED_NORMAL || policy == SCHED_BATCH) != (param->sched_priority == 0)) return -EINVAL; /* * Allow unprivileged RT tasks to decrease priority: */ if (!capable(CAP_SYS_NICE)) { /* * can't change policy, except between SCHED_NORMAL * and SCHED_BATCH: */ if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) && (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) && !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) return -EPERM; /* can't increase priority */ if ((policy != SCHED_NORMAL && policy != SCHED_BATCH) && param->sched_priority > p->rt_priority && param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) return -EPERM; /* can't change other user's priorities */ if ((current->euid != p->euid) && (current->euid != p->uid)) return -EPERM; } retval = security_task_setscheduler(p, policy, param); if (retval) return retval; /* * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: */ spin_lock_irqsave(&p->pi_lock, flags); /* * To be able to change p->policy safely, the apropriate * runqueue lock must be held. */ rq = __task_rq_lock(p); /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } array = p->array; if (array) deactivate_task(p, rq); oldprio = p->prio; __setscheduler(p, policy, param->sched_priority); if (array) { __activate_task(p, rq); /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ if (task_running(rq, p)) { if (p->prio > oldprio) resched_task(rq->curr); } else if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); return 0; } EXPORT_SYMBOL_GPL(sched_setscheduler); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { struct sched_param lparam; struct task_struct *p; int retval; if (!param || pid < 0) return -EINVAL; if (copy_from_user(&lparam, param, sizeof(struct sched_param))) return -EFAULT; read_lock_irq(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock_irq(&tasklist_lock); return -ESRCH; } get_task_struct(p); read_unlock_irq(&tasklist_lock); retval = sched_setscheduler(p, policy, &lparam); put_task_struct(p); return retval; } /** * sys_sched_setscheduler - set/change the scheduler policy and RT priority * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. */ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { /* negative values for policy are not valid */ if (policy < 0) return -EINVAL; return do_sched_setscheduler(pid, policy, param); } /** * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. */ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) { return do_sched_setscheduler(pid, -1, param); } /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. */ asmlinkage long sys_sched_getscheduler(pid_t pid) { struct task_struct *p; int retval = -EINVAL; if (pid < 0) goto out_nounlock; retval = -ESRCH; read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) { retval = security_task_getscheduler(p); if (!retval) retval = p->policy; } read_unlock(&tasklist_lock); out_nounlock: return retval; } /** * sys_sched_getscheduler - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. */ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; struct task_struct *p; int retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; read_lock(&tasklist_lock); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; lp.sched_priority = p->rt_priority; read_unlock(&tasklist_lock); /* * This one might sleep, we cannot do it with a spinlock held ... */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: read_unlock(&tasklist_lock); return retval; } long sched_setaffinity(pid_t pid, cpumask_t new_mask) { cpumask_t cpus_allowed; struct task_struct *p; int retval; lock_cpu_hotplug(); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); unlock_cpu_hotplug(); return -ESRCH; } /* * It is not safe to call set_cpus_allowed with the * tasklist_lock held. We will bump the task_struct's * usage count and then drop tasklist_lock. */ get_task_struct(p); read_unlock(&tasklist_lock); retval = -EPERM; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p, 0, NULL); if (retval) goto out_unlock; cpus_allowed = cpuset_cpus_allowed(p); cpus_and(new_mask, new_mask, cpus_allowed); retval = set_cpus_allowed(p, new_mask); out_unlock: put_task_struct(p); unlock_cpu_hotplug(); return retval; } static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, cpumask_t *new_mask) { if (len < sizeof(cpumask_t)) { memset(new_mask, 0, sizeof(cpumask_t)); } else if (len > sizeof(cpumask_t)) { len = sizeof(cpumask_t); } return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; } /** * sys_sched_setaffinity - set the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask */ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { cpumask_t new_mask; int retval; retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); if (retval) return retval; return sched_setaffinity(pid, new_mask); } /* * Represents all cpu's present in the system * In systems capable of hotplug, this map could dynamically grow * as new cpu's are detected in the system via any platform specific * method, such as ACPI for e.g. */ cpumask_t cpu_present_map __read_mostly; EXPORT_SYMBOL(cpu_present_map); #ifndef CONFIG_SMP cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; #endif long sched_getaffinity(pid_t pid, cpumask_t *mask) { struct task_struct *p; int retval; lock_cpu_hotplug(); read_lock(&tasklist_lock); retval = -ESRCH; p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; cpus_and(*mask, p->cpus_allowed, cpu_online_map); out_unlock: read_unlock(&tasklist_lock); unlock_cpu_hotplug(); if (retval) return retval; return 0; } /** * sys_sched_getaffinity - get the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask */ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { int ret; cpumask_t mask; if (len < sizeof(cpumask_t)) return -EINVAL; ret = sched_getaffinity(pid, &mask); if (ret < 0) return ret; if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) return -EFAULT; return sizeof(cpumask_t); } /** * sys_sched_yield - yield the current processor to other threads. * * this function yields the current CPU by moving the calling thread * to the expired array. If there are no other threads running on this * CPU then this function will return. */ asmlinkage long sys_sched_yield(void) { struct rq *rq = this_rq_lock(); struct prio_array *array = current->array, *target = rq->expired; schedstat_inc(rq, yld_cnt); /* * We implement yielding by moving the task into the expired * queue. * * (special rule: RT tasks will just roundrobin in the active * array.) */ if (rt_task(current)) target = rq->active; if (array->nr_active == 1) { schedstat_inc(rq, yld_act_empty); if (!rq->expired->nr_active) schedstat_inc(rq, yld_both_empty); } else if (!rq->expired->nr_active) schedstat_inc(rq, yld_exp_empty); if (array != target) { dequeue_task(current, array); enqueue_task(current, target); } else /* * requeue_task is cheaper so perform that if possible. */ requeue_task(current, array); /* * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ __release(rq->lock); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); _raw_spin_unlock(&rq->lock); preempt_enable_no_resched(); schedule(); return 0; } static inline int __resched_legal(int expected_preempt_count) { if (unlikely(preempt_count() != expected_preempt_count)) return 0; if (unlikely(system_state != SYSTEM_RUNNING)) return 0; return 1; } static void __cond_resched(void) { #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP __might_sleep(__FILE__, __LINE__); #endif /* * The BKS might be reacquired before we have dropped * PREEMPT_ACTIVE, which could trigger a second * cond_resched() call. */ do { add_preempt_count(PREEMPT_ACTIVE); schedule(); sub_preempt_count(PREEMPT_ACTIVE); } while (need_resched()); } int __sched cond_resched(void) { if (need_resched() && __resched_legal(0)) { __cond_resched(); return 1; } return 0; } EXPORT_SYMBOL(cond_resched); /* * cond_resched_lock() - if a reschedule is pending, drop the given lock, * call schedule, and on return reacquire the lock. * * This works OK both with and without CONFIG_PREEMPT. We do strange low-level * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ int cond_resched_lock(spinlock_t *lock) { int ret = 0; if (need_lockbreak(lock)) { spin_unlock(lock); cpu_relax(); ret = 1; spin_lock(lock); } if (need_resched() && __resched_legal(1)) { spin_release(&lock->dep_map, 1, _THIS_IP_); _raw_spin_unlock(lock); preempt_enable_no_resched(); __cond_resched(); ret = 1; spin_lock(lock); } return ret; } EXPORT_SYMBOL(cond_resched_lock); int __sched cond_resched_softirq(void) { BUG_ON(!in_softirq()); if (need_resched() && __resched_legal(0)) { raw_local_irq_disable(); _local_bh_enable(); raw_local_irq_enable(); __cond_resched(); local_bh_disable(); return 1; } return 0; } EXPORT_SYMBOL(cond_resched_softirq); /** * yield - yield the current processor to other threads. * * this is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ void __sched yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); } EXPORT_SYMBOL(yield); /* * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. * * But don't do that if it is a deliberate, throttling IO wait (this task * has set its backing_dev_info: the queue against which it should throttle) */ void __sched io_schedule(void) { struct rq *rq = &__raw_get_cpu_var(runqueues); delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); schedule(); atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); } EXPORT_SYMBOL(io_schedule); long __sched io_schedule_timeout(long timeout) { struct rq *rq = &__raw_get_cpu_var(runqueues); long ret; delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); ret = schedule_timeout(timeout); atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); return ret; } /** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * * this syscall returns the maximum rt_priority that can be used * by a given scheduling class. */ asmlinkage long sys_sched_get_priority_max(int policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = MAX_USER_RT_PRIO-1; break; case SCHED_NORMAL: case SCHED_BATCH: ret = 0; break; } return ret; } /** * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * this syscall returns the minimum rt_priority that can be used * by a given scheduling class. */ asmlinkage long sys_sched_get_priority_min(int policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = 1; break; case SCHED_NORMAL: case SCHED_BATCH: ret = 0; } return ret; } /** * sys_sched_rr_get_interval - return the default timeslice of a process. * @pid: pid of the process. * @interval: userspace pointer to the timeslice value. * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. */ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { struct task_struct *p; int retval = -EINVAL; struct timespec t; if (pid < 0) goto out_nounlock; retval = -ESRCH; read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; jiffies_to_timespec(p->policy == SCHED_FIFO ? 0 : task_timeslice(p), &t); read_unlock(&tasklist_lock); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: read_unlock(&tasklist_lock); return retval; } static inline struct task_struct *eldest_child(struct task_struct *p) { if (list_empty(&p->children)) return NULL; return list_entry(p->children.next,struct task_struct,sibling); } static inline struct task_struct *older_sibling(struct task_struct *p) { if (p->sibling.prev==&p->parent->children) return NULL; return list_entry(p->sibling.prev,struct task_struct,sibling); } static inline struct task_struct *younger_sibling(struct task_struct *p) { if (p->sibling.next==&p->parent->children) return NULL; return list_entry(p->sibling.next,struct task_struct,sibling); } static const char stat_nam[] = "RSDTtZX"; static void show_task(struct task_struct *p) { struct task_struct *relative; unsigned long free = 0; unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; printk("%-13.13s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if (BITS_PER_LONG == 32) if (state == TASK_RUNNING) printk(" running "); else printk(" %08lX ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) printk(" running task "); else printk(" %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE { unsigned long *n = end_of_stack(p); while (!*n) n++; free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif printk("%5lu %5d %6d ", free, p->pid, p->parent->pid); if ((relative = eldest_child(p))) printk("%5d ", relative->pid); else printk(" "); if ((relative = younger_sibling(p))) printk("%7d", relative->pid); else printk(" "); if ((relative = older_sibling(p))) printk(" %5d", relative->pid); else printk(" "); if (!p->mm) printk(" (L-TLB)\n"); else printk(" (NOTLB)\n"); if (state != TASK_RUNNING) show_stack(p, NULL); } void show_state(void) { struct task_struct *g, *p; #if (BITS_PER_LONG == 32) printk("\n" " sibling\n"); printk(" task PC pid father child younger older\n"); #else printk("\n" " sibling\n"); printk(" task PC pid father child younger older\n"); #endif read_lock(&tasklist_lock); do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow * console might take alot of time: */ touch_nmi_watchdog(); show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); debug_show_all_locks(); } /** * init_idle - set up an idle thread for a given CPU * @idle: task in question * @cpu: cpu the idle task belongs to * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ void __devinit init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; idle->timestamp = sched_clock(); idle->sleep_avg = 0; idle->array = NULL; idle->prio = idle->normal_prio = MAX_PRIO; idle->state = TASK_RUNNING; idle->cpus_allowed = cpumask_of_cpu(cpu); set_task_cpu(idle, cpu); spin_lock_irqsave(&rq->lock, flags); rq->curr = rq->idle = idle; #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); #else task_thread_info(idle)->preempt_count = 0; #endif } /* * In a system that switches off the HZ timer nohz_cpu_mask * indicates which cpus entered this state. This is used * in the rcu update to wait only for active cpus. For system * which do not switch off the HZ timer nohz_cpu_mask should * always be CPU_MASK_NONE. */ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; #ifdef CONFIG_SMP /* * This is how migration works: * * 1) we queue a struct migration_req structure in the source CPU's * runqueue and wake up that CPU's migration thread. * 2) we down() the locked semaphore => thread blocks. * 3) migration thread wakes up (implicitly it forces the migrated * thread off the CPU) * 4) it gets the migration request and checks whether the migrated * task is still in the wrong runqueue. * 5) if it's in the wrong runqueue then the migration thread removes * it and puts it into the right queue. * 6) migration thread up()s the semaphore. * 7) we wake up and the migration is done. */ /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on * is removed from the allowed bitmask. * * NOTE: the caller must have a valid reference to the task, the * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { struct migration_req req; unsigned long flags; struct rq *rq; int ret = 0; rq = task_rq_lock(p, &flags); if (!cpus_intersects(new_mask, cpu_online_map)) { ret = -EINVAL; goto out; } p->cpus_allowed = new_mask; /* Can the task run on the task's current CPU? If so, we're done */ if (cpu_isset(task_cpu(p), new_mask)) goto out; if (migrate_task(p, any_online_cpu(new_mask), &req)) { /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, &flags); wake_up_process(rq->migration_thread); wait_for_completion(&req.done); tlb_migrate_finish(p->mm); return 0; } out: task_rq_unlock(rq, &flags); return ret; } EXPORT_SYMBOL_GPL(set_cpus_allowed); /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're * attempting to rebalance this task on exec (sched_exec). * * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. * * Returns non-zero if task was successfully migrated. */ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { struct rq *rq_dest, *rq_src; int ret = 0; if (unlikely(cpu_is_offline(dest_cpu))) return ret; rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); double_rq_lock(rq_src, rq_dest); /* Already moved. */ if (task_cpu(p) != src_cpu) goto out; /* Affinity changed (again). */ if (!cpu_isset(dest_cpu, p->cpus_allowed)) goto out; set_task_cpu(p, dest_cpu); if (p->array) { /* * Sync timestamp with rq_dest's before activating. * The same thing could be achieved by doing this step * afterwards, and pretending it was a local activate. * This way is cleaner and logically correct. */ p->timestamp = p->timestamp - rq_src->timestamp_last_tick + rq_dest->timestamp_last_tick; deactivate_task(p, rq_src); __activate_task(p, rq_dest); if (TASK_PREEMPTS_CURR(p, rq_dest)) resched_task(rq_dest->curr); } ret = 1; out: double_rq_unlock(rq_src, rq_dest); return ret; } /* * migration_thread - this is a highprio system thread that performs * thread migration by bumping thread off CPU then 'pushing' onto * another runqueue. */ static int migration_thread(void *data) { int cpu = (long)data; struct rq *rq; rq = cpu_rq(cpu); BUG_ON(rq->migration_thread != current); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { struct migration_req *req; struct list_head *head; try_to_freeze(); spin_lock_irq(&rq->lock); if (cpu_is_offline(cpu)) { spin_unlock_irq(&rq->lock); goto wait_to_die; } if (rq->active_balance) { active_load_balance(rq, cpu); rq->active_balance = 0; } head = &rq->migration_queue; if (list_empty(head)) { spin_unlock_irq(&rq->lock); schedule(); set_current_state(TASK_INTERRUPTIBLE); continue; } req = list_entry(head->next, struct migration_req, list); list_del_init(head->next); spin_unlock(&rq->lock); __migrate_task(req->task, cpu, req->dest_cpu); local_irq_enable(); complete(&req->done); } __set_current_state(TASK_RUNNING); return 0; wait_to_die: /* Wait for kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; } #ifdef CONFIG_HOTPLUG_CPU /* Figure out where task on dead CPU should go, use force if neccessary. */ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) { unsigned long flags; cpumask_t mask; struct rq *rq; int dest_cpu; restart: /* On same node? */ mask = node_to_cpumask(cpu_to_node(dead_cpu)); cpus_and(mask, mask, p->cpus_allowed); dest_cpu = any_online_cpu(mask); /* On any allowed CPU? */ if (dest_cpu == NR_CPUS) dest_cpu = any_online_cpu(p->cpus_allowed); /* No more Mr. Nice Guy. */ if (dest_cpu == NR_CPUS) { rq = task_rq_lock(p, &flags); cpus_setall(p->cpus_allowed); dest_cpu = any_online_cpu(p->cpus_allowed); task_rq_unlock(rq, &flags); /* * Don't tell them about moving exiting tasks or * kernel threads (both mm NULL), since they never * leave kernel. */ if (p->mm && printk_ratelimit()) printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", p->pid, p->comm, dead_cpu); } if (!__migrate_task(p, dead_cpu, dest_cpu)) goto restart; } /* * While a dead CPU has no uninterruptible tasks queued at this point, * it might still have a nonzero ->nr_uninterruptible counter, because * for performance reasons the counter is not stricly tracking tasks to * their home CPUs. So we just add the counter to another CPU's counter, * to keep the global sum constant after CPU-down: */ static void migrate_nr_uninterruptible(struct rq *rq_src) { struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); unsigned long flags; local_irq_save(flags); double_rq_lock(rq_src, rq_dest); rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; rq_src->nr_uninterruptible = 0; double_rq_unlock(rq_src, rq_dest); local_irq_restore(flags); } /* Run through task list and migrate tasks from the dead cpu. */ static void migrate_live_tasks(int src_cpu) { struct task_struct *p, *t; write_lock_irq(&tasklist_lock); do_each_thread(t, p) { if (p == current) continue; if (task_cpu(p) == src_cpu) move_task_off_dead_cpu(src_cpu, p); } while_each_thread(t, p); write_unlock_irq(&tasklist_lock); } /* Schedules idle task to be the next runnable task on current CPU. * It does so by boosting its priority to highest possible and adding it to * the _front_ of the runqueue. Used by CPU offline code. */ void sched_idle_next(void) { int this_cpu = smp_processor_id(); struct rq *rq = cpu_rq(this_cpu); struct task_struct *p = rq->idle; unsigned long flags; /* cpu has to be offline */ BUG_ON(cpu_online(this_cpu)); /* * Strictly not necessary since rest of the CPUs are stopped by now * and interrupts disabled on the current cpu. */ spin_lock_irqsave(&rq->lock, flags); __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); /* Add idle task to the _front_ of its priority queue: */ __activate_idle_task(p, rq); spin_unlock_irqrestore(&rq->lock, flags); } /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. */ void idle_task_exit(void) { struct mm_struct *mm = current->active_mm; BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) switch_mm(mm, &init_mm, current); mmdrop(mm); } static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); /* Cannot have done final schedule yet: would have vanished. */ BUG_ON(p->flags & PF_DEAD); get_task_struct(p); /* * Drop lock around migration; if someone else moves it, * that's OK. No task can be added to this CPU, so iteration is * fine. */ spin_unlock_irq(&rq->lock); move_task_off_dead_cpu(dead_cpu, p); spin_lock_irq(&rq->lock); put_task_struct(p); } /* release_task() removes task from tasklist, so we won't find dead tasks. */ static void migrate_dead_tasks(unsigned int dead_cpu) { struct rq *rq = cpu_rq(dead_cpu); unsigned int arr, i; for (arr = 0; arr < 2; arr++) { for (i = 0; i < MAX_PRIO; i++) { struct list_head *list = &rq->arrays[arr].queue[i]; while (!list_empty(list)) migrate_dead(dead_cpu, list_entry(list->next, struct task_struct, run_list)); } } } #endif /* CONFIG_HOTPLUG_CPU */ /* * migration_call - callback that gets triggered when a CPU is added. * Here we can start up the necessary migration thread for the new CPU. */ static int __cpuinit migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct task_struct *p; int cpu = (long)hcpu; unsigned long flags; struct rq *rq; switch (action) { case CPU_UP_PREPARE: p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); if (IS_ERR(p)) return NOTIFY_BAD; p->flags |= PF_NOFREEZE; kthread_bind(p, cpu); /* Must be high prio: stop_machine expects to yield to it. */ rq = task_rq_lock(p, &flags); __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); task_rq_unlock(rq, &flags); cpu_rq(cpu)->migration_thread = p; break; case CPU_ONLINE: /* Strictly unneccessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: if (!cpu_rq(cpu)->migration_thread) break; /* Unbind it from offline cpu so it can run. Fall thru. */ kthread_bind(cpu_rq(cpu)->migration_thread, any_online_cpu(cpu_online_map)); kthread_stop(cpu_rq(cpu)->migration_thread); cpu_rq(cpu)->migration_thread = NULL; break; case CPU_DEAD: migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ rq = task_rq_lock(rq->idle, &flags); deactivate_task(rq->idle, rq); rq->idle->static_prio = MAX_PRIO; __setscheduler(rq->idle, SCHED_NORMAL, 0); migrate_dead_tasks(cpu); task_rq_unlock(rq, &flags); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); /* No need to migrate the tasks: it was best-effort if * they didn't do lock_cpu_hotplug(). Just wake up * the requestors. */ spin_lock_irq(&rq->lock); while (!list_empty(&rq->migration_queue)) { struct migration_req *req; req = list_entry(rq->migration_queue.next, struct migration_req, list); list_del_init(&req->list); complete(&req->done); } spin_unlock_irq(&rq->lock); break; #endif } return NOTIFY_OK; } /* Register at highest priority so that task migration (migrate_all_tasks) * happens before everything else. */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, .priority = 10 }; int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); /* Start one for the boot CPU: */ migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); return 0; } #endif #ifdef CONFIG_SMP #undef SCHED_DOMAIN_DEBUG #ifdef SCHED_DOMAIN_DEBUG static void sched_domain_debug(struct sched_domain *sd, int cpu) { int level = 0; if (!sd) { printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); return; } printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); do { int i; char str[NR_CPUS]; struct sched_group *group = sd->groups; cpumask_t groupmask; cpumask_scnprintf(str, NR_CPUS, sd->span); cpus_clear(groupmask); printk(KERN_DEBUG); for (i = 0; i < level + 1; i++) printk(" "); printk("domain %d: ", level); if (!(sd->flags & SD_LOAD_BALANCE)) { printk("does not load-balance\n"); if (sd->parent) printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent"); break; } printk("span %s\n", str); if (!cpu_isset(cpu, sd->span)) printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); if (!cpu_isset(cpu, group->cpumask)) printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); printk(KERN_DEBUG); for (i = 0; i < level + 2; i++) printk(" "); printk("groups:"); do { if (!group) { printk("\n"); printk(KERN_ERR "ERROR: group is NULL\n"); break; } if (!group->cpu_power) { printk("\n"); printk(KERN_ERR "ERROR: domain->cpu_power not set\n"); } if (!cpus_weight(group->cpumask)) { printk("\n"); printk(KERN_ERR "ERROR: empty group\n"); } if (cpus_intersects(groupmask, group->cpumask)) { printk("\n"); printk(KERN_ERR "ERROR: repeated CPUs\n"); } cpus_or(groupmask, groupmask, group->cpumask); cpumask_scnprintf(str, NR_CPUS, group->cpumask); printk(" %s", str); group = group->next; } while (group != sd->groups); printk("\n"); if (!cpus_equal(sd->span, groupmask)) printk(KERN_ERR "ERROR: groups don't span domain->span\n"); level++; sd = sd->parent; if (sd) { if (!cpus_subset(groupmask, sd->span)) printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); } } while (sd); } #else # define sched_domain_debug(sd, cpu) do { } while (0) #endif static int sd_degenerate(struct sched_domain *sd) { if (cpus_weight(sd->span) == 1) return 1; /* Following flags need at least 2 groups */ if (sd->flags & (SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC)) { if (sd->groups != sd->groups->next) return 0; } /* Following flags don't use groups */ if (sd->flags & (SD_WAKE_IDLE | SD_WAKE_AFFINE | SD_WAKE_BALANCE)) return 0; return 1; } static int sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) { unsigned long cflags = sd->flags, pflags = parent->flags; if (sd_degenerate(parent)) return 1; if (!cpus_equal(sd->span, parent->span)) return 0; /* Does parent contain flags not in child? */ /* WAKE_BALANCE is a subset of WAKE_AFFINE */ if (cflags & SD_WAKE_AFFINE) pflags &= ~SD_WAKE_BALANCE; /* Flags needing groups don't count if only 1 group in parent */ if (parent->groups == parent->groups->next) { pflags &= ~(SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC); } if (~cflags & pflags) return 0; return 1; } /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ static void cpu_attach_domain(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; tmp = tmp->parent) { struct sched_domain *parent = tmp->parent; if (!parent) break; if (sd_parent_degenerate(tmp, parent)) tmp->parent = parent->parent; } if (sd && sd_degenerate(sd)) sd = sd->parent; sched_domain_debug(sd, cpu); rcu_assign_pointer(rq->sd, sd); } /* cpus with isolated domains */ static cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE; /* Setup the mask of cpus configured for isolated domains */ static int __init isolated_cpu_setup(char *str) { int ints[NR_CPUS], i; str = get_options(str, ARRAY_SIZE(ints), ints); cpus_clear(cpu_isolated_map); for (i = 1; i <= ints[0]; i++) if (ints[i] < NR_CPUS) cpu_set(ints[i], cpu_isolated_map); return 1; } __setup ("isolcpus=", isolated_cpu_setup); /* * init_sched_build_groups takes an array of groups, the cpumask we wish * to span, and a pointer to a function which identifies what group a CPU * belongs to. The return value of group_fn must be a valid index into the * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we * keep track of groups covered with a cpumask_t). * * init_sched_build_groups will build a circular linked list of the groups * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. */ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span, int (*group_fn)(int cpu)) { struct sched_group *first = NULL, *last = NULL; cpumask_t covered = CPU_MASK_NONE; int i; for_each_cpu_mask(i, span) { int group = group_fn(i); struct sched_group *sg = &groups[group]; int j; if (cpu_isset(i, covered)) continue; sg->cpumask = CPU_MASK_NONE; sg->cpu_power = 0; for_each_cpu_mask(j, span) { if (group_fn(j) != group) continue; cpu_set(j, covered); cpu_set(j, sg->cpumask); } if (!first) first = sg; if (last) last->next = sg; last = sg; } last->next = first; } #define SD_NODES_PER_DOMAIN 16 /* * Self-tuning task migration cost measurement between source and target CPUs. * * This is done by measuring the cost of manipulating buffers of varying * sizes. For a given buffer-size here are the steps that are taken: * * 1) the source CPU reads+dirties a shared buffer * 2) the target CPU reads+dirties the same shared buffer * * We measure how long they take, in the following 4 scenarios: * * - source: CPU1, target: CPU2 | cost1 * - source: CPU2, target: CPU1 | cost2 * - source: CPU1, target: CPU1 | cost3 * - source: CPU2, target: CPU2 | cost4 * * We then calculate the cost3+cost4-cost1-cost2 difference - this is * the cost of migration. * * We then start off from a small buffer-size and iterate up to larger * buffer sizes, in 5% steps - measuring each buffer-size separately, and * doing a maximum search for the cost. (The maximum cost for a migration * normally occurs when the working set size is around the effective cache * size.) */ #define SEARCH_SCOPE 2 #define MIN_CACHE_SIZE (64*1024U) #define DEFAULT_CACHE_SIZE (5*1024*1024U) #define ITERATIONS 1 #define SIZE_THRESH 130 #define COST_THRESH 130 /* * The migration cost is a function of 'domain distance'. Domain * distance is the number of steps a CPU has to iterate down its * domain tree to share a domain with the other CPU. The farther * two CPUs are from each other, the larger the distance gets. * * Note that we use the distance only to cache measurement results, * the distance value is not used numerically otherwise. When two * CPUs have the same distance it is assumed that the migration * cost is the same. (this is a simplification but quite practical) */ #define MAX_DOMAIN_DISTANCE 32 static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] = { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = /* * Architectures may override the migration cost and thus avoid * boot-time calibration. Unit is nanoseconds. Mostly useful for * virtualized hardware: */ #ifdef CONFIG_DEFAULT_MIGRATION_COST CONFIG_DEFAULT_MIGRATION_COST #else -1LL #endif }; /* * Allow override of migration cost - in units of microseconds. * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs: */ static int __init migration_cost_setup(char *str) { int ints[MAX_DOMAIN_DISTANCE+1], i; str = get_options(str, ARRAY_SIZE(ints), ints); printk("#ints: %d\n", ints[0]); for (i = 1; i <= ints[0]; i++) { migration_cost[i-1] = (unsigned long long)ints[i]*1000; printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]); } return 1; } __setup ("migration_cost=", migration_cost_setup); /* * Global multiplier (divisor) for migration-cutoff values, * in percentiles. E.g. use a value of 150 to get 1.5 times * longer cache-hot cutoff times. * * (We scale it from 100 to 128 to long long handling easier.) */ #define MIGRATION_FACTOR_SCALE 128 static unsigned int migration_factor = MIGRATION_FACTOR_SCALE; static int __init setup_migration_factor(char *str) { get_option(&str, &migration_factor); migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100; return 1; } __setup("migration_factor=", setup_migration_factor); /* * Estimated distance of two CPUs, measured via the number of domains * we have to pass for the two CPUs to be in the same span: */ static unsigned long domain_distance(int cpu1, int cpu2) { unsigned long distance = 0; struct sched_domain *sd; for_each_domain(cpu1, sd) { WARN_ON(!cpu_isset(cpu1, sd->span)); if (cpu_isset(cpu2, sd->span)) return distance; distance++; } if (distance >= MAX_DOMAIN_DISTANCE) { WARN_ON(1); distance = MAX_DOMAIN_DISTANCE-1; } return distance; } static unsigned int migration_debug; static int __init setup_migration_debug(char *str) { get_option(&str, &migration_debug); return 1; } __setup("migration_debug=", setup_migration_debug); /* * Maximum cache-size that the scheduler should try to measure. * Architectures with larger caches should tune this up during * bootup. Gets used in the domain-setup code (i.e. during SMP * bootup). */ unsigned int max_cache_size; static int __init setup_max_cache_size(char *str) { get_option(&str, &max_cache_size); return 1; } __setup("max_cache_size=", setup_max_cache_size); /* * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This * is the operation that is timed, so we try to generate unpredictable * cachemisses that still end up filling the L2 cache: */ static void touch_cache(void *__cache, unsigned long __size) { unsigned long size = __size/sizeof(long), chunk1 = size/3, chunk2 = 2*size/3; unsigned long *cache = __cache; int i; for (i = 0; i < size/6; i += 8) { switch (i % 6) { case 0: cache[i]++; case 1: cache[size-1-i]++; case 2: cache[chunk1-i]++; case 3: cache[chunk1+i]++; case 4: cache[chunk2-i]++; case 5: cache[chunk2+i]++; } } } /* * Measure the cache-cost of one task migration. Returns in units of nsec. */ static unsigned long long measure_one(void *cache, unsigned long size, int source, int target) { cpumask_t mask, saved_mask; unsigned long long t0, t1, t2, t3, cost; saved_mask = current->cpus_allowed; /* * Flush source caches to RAM and invalidate them: */ sched_cacheflush(); /* * Migrate to the source CPU: */ mask = cpumask_of_cpu(source); set_cpus_allowed(current, mask); WARN_ON(smp_processor_id() != source); /* * Dirty the working set: */ t0 = sched_clock(); touch_cache(cache, size); t1 = sched_clock(); /* * Migrate to the target CPU, dirty the L2 cache and access * the shared buffer. (which represents the working set * of a migrated task.) */ mask = cpumask_of_cpu(target); set_cpus_allowed(current, mask); WARN_ON(smp_processor_id() != target); t2 = sched_clock(); touch_cache(cache, size); t3 = sched_clock(); cost = t1-t0 + t3-t2; if (migration_debug >= 2) printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n", source, target, t1-t0, t1-t0, t3-t2, cost); /* * Flush target caches to RAM and invalidate them: */ sched_cacheflush(); set_cpus_allowed(current, saved_mask); return cost; } /* * Measure a series of task migrations and return the average * result. Since this code runs early during bootup the system * is 'undisturbed' and the average latency makes sense. * * The algorithm in essence auto-detects the relevant cache-size, * so it will properly detect different cachesizes for different * cache-hierarchies, depending on how the CPUs are connected. * * Architectures can prime the upper limit of the search range via * max_cache_size, otherwise the search range defaults to 20MB...64K. */ static unsigned long long measure_cost(int cpu1, int cpu2, void *cache, unsigned int size) { unsigned long long cost1, cost2; int i; /* * Measure the migration cost of 'size' bytes, over an * average of 10 runs: * * (We perturb the cache size by a small (0..4k) * value to compensate size/alignment related artifacts. * We also subtract the cost of the operation done on * the same CPU.) */ cost1 = 0; /* * dry run, to make sure we start off cache-cold on cpu1, * and to get any vmalloc pagefaults in advance: */ measure_one(cache, size, cpu1, cpu2); for (i = 0; i < ITERATIONS; i++) cost1 += measure_one(cache, size - i*1024, cpu1, cpu2); measure_one(cache, size, cpu2, cpu1); for (i = 0; i < ITERATIONS; i++) cost1 += measure_one(cache, size - i*1024, cpu2, cpu1); /* * (We measure the non-migrating [cached] cost on both * cpu1 and cpu2, to handle CPUs with different speeds) */ cost2 = 0; measure_one(cache, size, cpu1, cpu1); for (i = 0; i < ITERATIONS; i++) cost2 += measure_one(cache, size - i*1024, cpu1, cpu1); measure_one(cache, size, cpu2, cpu2); for (i = 0; i < ITERATIONS; i++) cost2 += measure_one(cache, size - i*1024, cpu2, cpu2); /* * Get the per-iteration migration cost: */ do_div(cost1, 2*ITERATIONS); do_div(cost2, 2*ITERATIONS); return cost1 - cost2; } static unsigned long long measure_migration_cost(int cpu1, int cpu2) { unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0; unsigned int max_size, size, size_found = 0; long long cost = 0, prev_cost; void *cache; /* * Search from max_cache_size*5 down to 64K - the real relevant * cachesize has to lie somewhere inbetween. */ if (max_cache_size) { max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE); size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE); } else { /* * Since we have no estimation about the relevant * search range */ max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE; size = MIN_CACHE_SIZE; } if (!cpu_online(cpu1) || !cpu_online(cpu2)) { printk("cpu %d and %d not both online!\n", cpu1, cpu2); return 0; } /* * Allocate the working set: */ cache = vmalloc(max_size); if (!cache) { printk("could not vmalloc %d bytes for cache!\n", 2*max_size); return 1000000; /* return 1 msec on very small boxen */ } while (size <= max_size) { prev_cost = cost; cost = measure_cost(cpu1, cpu2, cache, size); /* * Update the max: */ if (cost > 0) { if (max_cost < cost) { max_cost = cost; size_found = size; } } /* * Calculate average fluctuation, we use this to prevent * noise from triggering an early break out of the loop: */ fluct = abs(cost - prev_cost); avg_fluct = (avg_fluct + fluct)/2; if (migration_debug) printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n", cpu1, cpu2, size, (long)cost / 1000000, ((long)cost / 100000) % 10, (long)max_cost / 1000000, ((long)max_cost / 100000) % 10, domain_distance(cpu1, cpu2), cost, avg_fluct); /* * If we iterated at least 20% past the previous maximum, * and the cost has dropped by more than 20% already, * (taking fluctuations into account) then we assume to * have found the maximum and break out of the loop early: */ if (size_found && (size*100 > size_found*SIZE_THRESH)) if (cost+avg_fluct <= 0 || max_cost*100 > (cost+avg_fluct)*COST_THRESH) { if (migration_debug) printk("-> found max.\n"); break; } /* * Increase the cachesize in 10% steps: */ size = size * 10 / 9; } if (migration_debug) printk("[%d][%d] working set size found: %d, cost: %Ld\n", cpu1, cpu2, size_found, max_cost); vfree(cache); /* * A task is considered 'cache cold' if at least 2 times * the worst-case cost of migration has passed. * * (this limit is only listened to if the load-balancing * situation is 'nice' - if there is a large imbalance we * ignore it for the sake of CPU utilization and * processing fairness.) */ return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE; } static void calibrate_migration_costs(const cpumask_t *cpu_map) { int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id(); unsigned long j0, j1, distance, max_distance = 0; struct sched_domain *sd; j0 = jiffies; /* * First pass - calculate the cacheflush times: */ for_each_cpu_mask(cpu1, *cpu_map) { for_each_cpu_mask(cpu2, *cpu_map) { if (cpu1 == cpu2) continue; distance = domain_distance(cpu1, cpu2); max_distance = max(max_distance, distance); /* * No result cached yet? */ if (migration_cost[distance] == -1LL) migration_cost[distance] = measure_migration_cost(cpu1, cpu2); } } /* * Second pass - update the sched domain hierarchy with * the new cache-hot-time estimations: */ for_each_cpu_mask(cpu, *cpu_map) { distance = 0; for_each_domain(cpu, sd) { sd->cache_hot_time = migration_cost[distance]; distance++; } } /* * Print the matrix: */ if (migration_debug) printk("migration: max_cache_size: %d, cpu: %d MHz:\n", max_cache_size, #ifdef CONFIG_X86 cpu_khz/1000 #else -1 #endif ); if (system_state == SYSTEM_BOOTING) { printk("migration_cost="); for (distance = 0; distance <= max_distance; distance++) { if (distance) printk(","); printk("%ld", (long)migration_cost[distance] / 1000); } printk("\n"); } j1 = jiffies; if (migration_debug) printk("migration: %ld seconds\n", (j1-j0)/HZ); /* * Move back to the original CPU. NUMA-Q gets confused * if we migrate to another quad during bootup. */ if (raw_smp_processor_id() != orig_cpu) { cpumask_t mask = cpumask_of_cpu(orig_cpu), saved_mask = current->cpus_allowed; set_cpus_allowed(current, mask); set_cpus_allowed(current, saved_mask); } } #ifdef CONFIG_NUMA /** * find_next_best_node - find the next node to include in a sched_domain * @node: node whose sched_domain we're building * @used_nodes: nodes already in the sched_domain * * Find the next node to include in a given scheduling domain. Simply * finds the closest node not already in the @used_nodes map. * * Should use nodemask_t. */ static int find_next_best_node(int node, unsigned long *used_nodes) { int i, n, val, min_val, best_node = 0; min_val = INT_MAX; for (i = 0; i < MAX_NUMNODES; i++) { /* Start at @node */ n = (node + i) % MAX_NUMNODES; if (!nr_cpus_node(n)) continue; /* Skip already used nodes */ if (test_bit(n, used_nodes)) continue; /* Simple min distance search */ val = node_distance(node, n); if (val < min_val) { min_val = val; best_node = n; } } set_bit(best_node, used_nodes); return best_node; } /** * sched_domain_node_span - get a cpumask for a node's sched_domain * @node: node whose cpumask we're constructing * @size: number of nodes to include in this span * * Given a node, construct a good cpumask for its sched_domain to span. It * should be one that prevents unnecessary balancing, but also spreads tasks * out optimally. */ static cpumask_t sched_domain_node_span(int node) { DECLARE_BITMAP(used_nodes, MAX_NUMNODES); cpumask_t span, nodemask; int i; cpus_clear(span); bitmap_zero(used_nodes, MAX_NUMNODES); nodemask = node_to_cpumask(node); cpus_or(span, span, nodemask); set_bit(node, used_nodes); for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, used_nodes); nodemask = node_to_cpumask(next_node); cpus_or(span, span, nodemask); } return span; } #endif int sched_smt_power_savings = 0, sched_mc_power_savings = 0; /* * SMT sched-domains: */ #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); static struct sched_group sched_group_cpus[NR_CPUS]; static int cpu_to_cpu_group(int cpu) { return cpu; } #endif /* * multi-core sched-domains: */ #ifdef CONFIG_SCHED_MC static DEFINE_PER_CPU(struct sched_domain, core_domains); static struct sched_group *sched_group_core_bycpu[NR_CPUS]; #endif #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) static int cpu_to_core_group(int cpu) { return first_cpu(cpu_sibling_map[cpu]); } #elif defined(CONFIG_SCHED_MC) static int cpu_to_core_group(int cpu) { return cpu; } #endif static DEFINE_PER_CPU(struct sched_domain, phys_domains); static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; static int cpu_to_phys_group(int cpu) { #ifdef CONFIG_SCHED_MC cpumask_t mask = cpu_coregroup_map(cpu); return first_cpu(mask); #elif defined(CONFIG_SCHED_SMT) return first_cpu(cpu_sibling_map[cpu]); #else return cpu; #endif } #ifdef CONFIG_NUMA /* * The init_sched_build_groups can't handle what we want to do with node * groups, so roll our own. Now each node has its own list of groups which * gets dynamically allocated. */ static DEFINE_PER_CPU(struct sched_domain, node_domains); static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; static int cpu_to_allnodes_group(int cpu) { return cpu_to_node(cpu); } static void init_numa_sched_groups_power(struct sched_group *group_head) { struct sched_group *sg = group_head; int j; if (!sg) return; next_sg: for_each_cpu_mask(j, sg->cpumask) { struct sched_domain *sd; sd = &per_cpu(phys_domains, j); if (j != first_cpu(sd->groups->cpumask)) { /* * Only add "power" once for each * physical package. */ continue; } sg->cpu_power += sd->groups->cpu_power; } sg = sg->next; if (sg != group_head) goto next_sg; } #endif /* Free memory allocated for various sched_group structures */ static void free_sched_groups(const cpumask_t *cpu_map) { int cpu; #ifdef CONFIG_NUMA int i; for_each_cpu_mask(cpu, *cpu_map) { struct sched_group *sched_group_allnodes = sched_group_allnodes_bycpu[cpu]; struct sched_group **sched_group_nodes = sched_group_nodes_bycpu[cpu]; if (sched_group_allnodes) { kfree(sched_group_allnodes); sched_group_allnodes_bycpu[cpu] = NULL; } if (!sched_group_nodes) continue; for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); struct sched_group *oldsg, *sg = sched_group_nodes[i]; cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; if (sg == NULL) continue; sg = sg->next; next_sg: oldsg = sg; sg = sg->next; kfree(oldsg); if (oldsg != sched_group_nodes[i]) goto next_sg; } kfree(sched_group_nodes); sched_group_nodes_bycpu[cpu] = NULL; } #endif for_each_cpu_mask(cpu, *cpu_map) { if (sched_group_phys_bycpu[cpu]) { kfree(sched_group_phys_bycpu[cpu]); sched_group_phys_bycpu[cpu] = NULL; } #ifdef CONFIG_SCHED_MC if (sched_group_core_bycpu[cpu]) { kfree(sched_group_core_bycpu[cpu]); sched_group_core_bycpu[cpu] = NULL; } #endif } } /* * Build sched domains for a given set of cpus and attach the sched domains * to the individual cpus */ static int build_sched_domains(const cpumask_t *cpu_map) { int i; struct sched_group *sched_group_phys = NULL; #ifdef CONFIG_SCHED_MC struct sched_group *sched_group_core = NULL; #endif #ifdef CONFIG_NUMA struct sched_group **sched_group_nodes = NULL; struct sched_group *sched_group_allnodes = NULL; /* * Allocate the per-node list of sched groups */ sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES, GFP_KERNEL); if (!sched_group_nodes) { printk(KERN_WARNING "Can not alloc sched group node list\n"); return -ENOMEM; } sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; #endif /* * Set up domains for cpus specified by the cpu_map. */ for_each_cpu_mask(i, *cpu_map) { int group; struct sched_domain *sd = NULL, *p; cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); cpus_and(nodemask, nodemask, *cpu_map); #ifdef CONFIG_NUMA if (cpus_weight(*cpu_map) > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { if (!sched_group_allnodes) { sched_group_allnodes = kmalloc(sizeof(struct sched_group) * MAX_NUMNODES, GFP_KERNEL); if (!sched_group_allnodes) { printk(KERN_WARNING "Can not alloc allnodes sched group\n"); goto error; } sched_group_allnodes_bycpu[i] = sched_group_allnodes; } sd = &per_cpu(allnodes_domains, i); *sd = SD_ALLNODES_INIT; sd->span = *cpu_map; group = cpu_to_allnodes_group(i); sd->groups = &sched_group_allnodes[group]; p = sd; } else p = NULL; sd = &per_cpu(node_domains, i); *sd = SD_NODE_INIT; sd->span = sched_domain_node_span(cpu_to_node(i)); sd->parent = p; cpus_and(sd->span, sd->span, *cpu_map); #endif if (!sched_group_phys) { sched_group_phys = kmalloc(sizeof(struct sched_group) * NR_CPUS, GFP_KERNEL); if (!sched_group_phys) { printk (KERN_WARNING "Can not alloc phys sched" "group\n"); goto error; } sched_group_phys_bycpu[i] = sched_group_phys; } p = sd; sd = &per_cpu(phys_domains, i); group = cpu_to_phys_group(i); *sd = SD_CPU_INIT; sd->span = nodemask; sd->parent = p; sd->groups = &sched_group_phys[group]; #ifdef CONFIG_SCHED_MC if (!sched_group_core) { sched_group_core = kmalloc(sizeof(struct sched_group) * NR_CPUS, GFP_KERNEL); if (!sched_group_core) { printk (KERN_WARNING "Can not alloc core sched" "group\n"); goto error; } sched_group_core_bycpu[i] = sched_group_core; } p = sd; sd = &per_cpu(core_domains, i); group = cpu_to_core_group(i); *sd = SD_MC_INIT; sd->span = cpu_coregroup_map(i); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; sd->groups = &sched_group_core[group]; #endif #ifdef CONFIG_SCHED_SMT p = sd; sd = &per_cpu(cpu_domains, i); group = cpu_to_cpu_group(i); *sd = SD_SIBLING_INIT; sd->span = cpu_sibling_map[i]; cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; sd->groups = &sched_group_cpus[group]; #endif } #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ for_each_cpu_mask(i, *cpu_map) { cpumask_t this_sibling_map = cpu_sibling_map[i]; cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; init_sched_build_groups(sched_group_cpus, this_sibling_map, &cpu_to_cpu_group); } #endif #ifdef CONFIG_SCHED_MC /* Set up multi-core groups */ for_each_cpu_mask(i, *cpu_map) { cpumask_t this_core_map = cpu_coregroup_map(i); cpus_and(this_core_map, this_core_map, *cpu_map); if (i != first_cpu(this_core_map)) continue; init_sched_build_groups(sched_group_core, this_core_map, &cpu_to_core_group); } #endif /* Set up physical groups */ for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; init_sched_build_groups(sched_group_phys, nodemask, &cpu_to_phys_group); } #ifdef CONFIG_NUMA /* Set up node groups */ if (sched_group_allnodes) init_sched_build_groups(sched_group_allnodes, *cpu_map, &cpu_to_allnodes_group); for (i = 0; i < MAX_NUMNODES; i++) { /* Set up node groups */ struct sched_group *sg, *prev; cpumask_t nodemask = node_to_cpumask(i); cpumask_t domainspan; cpumask_t covered = CPU_MASK_NONE; int j; cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) { sched_group_nodes[i] = NULL; continue; } domainspan = sched_domain_node_span(i); cpus_and(domainspan, domainspan, *cpu_map); sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); if (!sg) { printk(KERN_WARNING "Can not alloc domain group for " "node %d\n", i); goto error; } sched_group_nodes[i] = sg; for_each_cpu_mask(j, nodemask) { struct sched_domain *sd; sd = &per_cpu(node_domains, j); sd->groups = sg; } sg->cpu_power = 0; sg->cpumask = nodemask; sg->next = sg; cpus_or(covered, covered, nodemask); prev = sg; for (j = 0; j < MAX_NUMNODES; j++) { cpumask_t tmp, notcovered; int n = (i + j) % MAX_NUMNODES; cpus_complement(notcovered, covered); cpus_and(tmp, notcovered, *cpu_map); cpus_and(tmp, tmp, domainspan); if (cpus_empty(tmp)) break; nodemask = node_to_cpumask(n); cpus_and(tmp, tmp, nodemask); if (cpus_empty(tmp)) continue; sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); if (!sg) { printk(KERN_WARNING "Can not alloc domain group for node %d\n", j); goto error; } sg->cpu_power = 0; sg->cpumask = tmp; sg->next = prev->next; cpus_or(covered, covered, tmp); prev->next = sg; prev = sg; } } #endif /* Calculate CPU power for physical packages and nodes */ #ifdef CONFIG_SCHED_SMT for_each_cpu_mask(i, *cpu_map) { struct sched_domain *sd; sd = &per_cpu(cpu_domains, i); sd->groups->cpu_power = SCHED_LOAD_SCALE; } #endif #ifdef CONFIG_SCHED_MC for_each_cpu_mask(i, *cpu_map) { int power; struct sched_domain *sd; sd = &per_cpu(core_domains, i); if (sched_smt_power_savings) power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask); else power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) * SCHED_LOAD_SCALE / 10; sd->groups->cpu_power = power; } #endif for_each_cpu_mask(i, *cpu_map) { struct sched_domain *sd; #ifdef CONFIG_SCHED_MC sd = &per_cpu(phys_domains, i); if (i != first_cpu(sd->groups->cpumask)) continue; sd->groups->cpu_power = 0; if (sched_mc_power_savings || sched_smt_power_savings) { int j; for_each_cpu_mask(j, sd->groups->cpumask) { struct sched_domain *sd1; sd1 = &per_cpu(core_domains, j); /* * for each core we will add once * to the group in physical domain */ if (j != first_cpu(sd1->groups->cpumask)) continue; if (sched_smt_power_savings) sd->groups->cpu_power += sd1->groups->cpu_power; else sd->groups->cpu_power += SCHED_LOAD_SCALE; } } else /* * This has to be < 2 * SCHED_LOAD_SCALE * Lets keep it SCHED_LOAD_SCALE, so that * while calculating NUMA group's cpu_power * we can simply do * numa_group->cpu_power += phys_group->cpu_power; * * See "only add power once for each physical pkg" * comment below */ sd->groups->cpu_power = SCHED_LOAD_SCALE; #else int power; sd = &per_cpu(phys_domains, i); if (sched_smt_power_savings) power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask); else power = SCHED_LOAD_SCALE; sd->groups->cpu_power = power; #endif } #ifdef CONFIG_NUMA for (i = 0; i < MAX_NUMNODES; i++) init_numa_sched_groups_power(sched_group_nodes[i]); if (sched_group_allnodes) { int group = cpu_to_allnodes_group(first_cpu(*cpu_map)); struct sched_group *sg = &sched_group_allnodes[group]; init_numa_sched_groups_power(sg); } #endif /* Attach the domains */ for_each_cpu_mask(i, *cpu_map) { struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i); #elif defined(CONFIG_SCHED_MC) sd = &per_cpu(core_domains, i); #else sd = &per_cpu(phys_domains, i); #endif cpu_attach_domain(sd, i); } /* * Tune cache-hot values: */ calibrate_migration_costs(cpu_map); return 0; error: free_sched_groups(cpu_map); return -ENOMEM; } /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. */ static int arch_init_sched_domains(const cpumask_t *cpu_map) { cpumask_t cpu_default_map; int err; /* * Setup mask for cpus without special case scheduling requirements. * For now this just excludes isolated cpus, but could be used to * exclude other special cases in the future. */ cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); err = build_sched_domains(&cpu_default_map); return err; } static void arch_destroy_sched_domains(const cpumask_t *cpu_map) { free_sched_groups(cpu_map); } /* * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ static void detach_destroy_domains(const cpumask_t *cpu_map) { int i; for_each_cpu_mask(i, *cpu_map) cpu_attach_domain(NULL, i); synchronize_sched(); arch_destroy_sched_domains(cpu_map); } /* * Partition sched domains as specified by the cpumasks below. * This attaches all cpus from the cpumasks to the NULL domain, * waits for a RCU quiescent period, recalculates sched * domain information and then attaches them back to the * correct sched domains * Call with hotplug lock held */ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) { cpumask_t change_map; int err = 0; cpus_and(*partition1, *partition1, cpu_online_map); cpus_and(*partition2, *partition2, cpu_online_map); cpus_or(change_map, *partition1, *partition2); /* Detach sched domains from all of the affected cpus */ detach_destroy_domains(&change_map); if (!cpus_empty(*partition1)) err = build_sched_domains(partition1); if (!err && !cpus_empty(*partition2)) err = build_sched_domains(partition2); return err; } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int arch_reinit_sched_domains(void) { int err; lock_cpu_hotplug(); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); unlock_cpu_hotplug(); return err; } static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) { int ret; if (buf[0] != '0' && buf[0] != '1') return -EINVAL; if (smt) sched_smt_power_savings = (buf[0] == '1'); else sched_mc_power_savings = (buf[0] == '1'); ret = arch_reinit_sched_domains(); return ret ? ret : count; } int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif #ifdef CONFIG_SCHED_MC static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) { return sprintf(page, "%u\n", sched_mc_power_savings); } static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 0); } SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, sched_mc_power_savings_store); #endif #ifdef CONFIG_SCHED_SMT static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) { return sprintf(page, "%u\n", sched_smt_power_savings); } static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 1); } SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif #ifdef CONFIG_HOTPLUG_CPU /* * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing * code, so we temporarily attach all running cpus to the NULL domain * which will prevent rebalancing while the sched domains are recalculated. */ static int update_sched_domains(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action) { case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: detach_destroy_domains(&cpu_online_map); return NOTIFY_OK; case CPU_UP_CANCELED: case CPU_DOWN_FAILED: case CPU_ONLINE: case CPU_DEAD: /* * Fall through and re-initialise the domains. */ break; default: return NOTIFY_DONE; } /* The hotplug lock is already held by cpu_up/cpu_down */ arch_init_sched_domains(&cpu_online_map); return NOTIFY_OK; } #endif void __init sched_init_smp(void) { lock_cpu_hotplug(); arch_init_sched_domains(&cpu_online_map); unlock_cpu_hotplug(); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); } #else void __init sched_init_smp(void) { } #endif /* CONFIG_SMP */ int in_sched_functions(unsigned long addr) { /* Linker adds these: start and end of __sched functions */ extern char __sched_text_start[], __sched_text_end[]; return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); } void __init sched_init(void) { int i, j, k; for_each_possible_cpu(i) { struct prio_array *array; struct rq *rq; rq = cpu_rq(i); spin_lock_init(&rq->lock); lockdep_set_class(&rq->lock, &rq->rq_lock_key); rq->nr_running = 0; rq->active = rq->arrays; rq->expired = rq->arrays + 1; rq->best_expired_prio = MAX_PRIO; #ifdef CONFIG_SMP rq->sd = NULL; for (j = 1; j < 3; j++) rq->cpu_load[j] = 0; rq->active_balance = 0; rq->push_cpu = 0; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); #endif atomic_set(&rq->nr_iowait, 0); for (j = 0; j < 2; j++) { array = rq->arrays + j; for (k = 0; k < MAX_PRIO; k++) { INIT_LIST_HEAD(array->queue + k); __clear_bit(k, array->bitmap); } // delimiter for bitsearch __set_bit(MAX_PRIO, array->bitmap); } } set_load_weight(&init_task); /* * The boot idle thread does lazy MMU switching as well: */ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); } #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line) { #ifdef in_atomic static unsigned long prev_jiffy; /* ratelimiting */ if ((in_atomic() || irqs_disabled()) && system_state == SYSTEM_RUNNING && !oops_in_progress) { if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; printk(KERN_ERR "BUG: sleeping function called from invalid" " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", in_atomic(), irqs_disabled()); dump_stack(); } #endif } EXPORT_SYMBOL(__might_sleep); #endif #ifdef CONFIG_MAGIC_SYSRQ void normalize_rt_tasks(void) { struct prio_array *array; struct task_struct *p; unsigned long flags; struct rq *rq; read_lock_irq(&tasklist_lock); for_each_process(p) { if (!rt_task(p)) continue; spin_lock_irqsave(&p->pi_lock, flags); rq = __task_rq_lock(p); array = p->array; if (array) deactivate_task(p, task_rq(p)); __setscheduler(p, SCHED_NORMAL, 0); if (array) { __activate_task(p, task_rq(p)); resched_task(rq->curr); } __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); } read_unlock_irq(&tasklist_lock); } #endif /* CONFIG_MAGIC_SYSRQ */ #ifdef CONFIG_IA64 /* * These functions are only useful for the IA64 MCA handling. * * They can only be called when the whole system has been * stopped - every CPU needs to be quiescent, and no scheduling * activity can take place. Using them for anything else would * be a serious bug, and as a result, they aren't even visible * under any other configuration. */ /** * curr_task - return the current task for a given cpu. * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } /** * set_curr_task - set the current task for a given cpu. * @cpu: the processor in question. * @p: the task pointer to set. * * Description: This function must only be used when non-maskable interrupts * are serviced on a separate stack. It allows the architecture to switch the * notion of the current task on a cpu in a non-blocking manner. This function * must be called with all CPU's synchronized, and interrupts disabled, the * and caller must save the original value of the current task (see * curr_task() above) and restore that value before reenabling interrupts and * re-starting the system. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } #endif