aboutsummaryrefslogblamecommitdiffstats
path: root/fs/inode.c
blob: bf21dc6d0dbd752d243a2d3b6d92d2d554dc0381 (plain) (tree)
1
2
3
4
5
6





                          















                              
                          
                        




                                              



























                                                                      

                                               














                                                     
                                                        









                                                       
                                                                          





                                                                         
                                  





                                 
                                                      


                                                        
                                                                
                                                  
                                                       




                                                  
                                                                                    






















                                                                      




























                                                                                        
                                        






                                           
                                         


















                                                        
                                    









                                                              

                                                
                                          
      



                               
                                                                                  



































                                                                 


                                              



                                                          
                                                    
                                 
                                                    

























                                                                        






                                                 























                                                                             
                                                                               









                                                                 











                                                                












                                                                              
                                  
                               
                                              



                                                           
                                    




                                 

































                                                                              
                                  


























                                                                               




                                                     



                                                           


                                 
                                    










                                                                             
                                                       
































                                                                                                                                   
                                                                       























                                                                                                          
                                                                       




















































































































































                                                                                                                                                                         
                                                                        


















































                                                                              
                                                        



















                                                                      
                                                                          











                                                                              
                                                  
                                                                             
                                           







                                                 

                                             




















                                                                              
                                                       
















                                                           
                                                           










                                                                            































                                                                               









                                                                      
                                              























































                                                                                
                                               































































































                                                                               
                                    


                                 





                                                                  



                                                          
                              

                                                        
                                   
         



                                       
                                          












                                                                 

                                                       
                               


                                              

                                          
                                               




                                               
                                    




                                                        
                             







                                                
                                            






                                            

                                      
































































                                                                             


                    

                                                      
                                




                                                                             
                                                             
 
                                              

                            
                             
                       




                                                                             
                       



                                                            




                                                                                












                                                                             
         

                                           




                                                  
 
                           

   

                                                                   
  





                                                                         

   
                                        
 
                                                           








                                           

                                                     
                            
         
 

                                                     
                            
         
 



                                             
                                
















                                                           




























                                                                          








                                                                      





                                                         
















                                                                          



































                                                                    









































                                                                  






                                                                          





































                                                                            
/*
 * linux/fs/inode.c
 *
 * (C) 1997 Linus Torvalds
 */

#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/dcache.h>
#include <linux/init.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/inotify.h>
#include <linux/mount.h>

/*
 * This is needed for the following functions:
 *  - inode_has_buffers
 *  - invalidate_inode_buffers
 *  - invalidate_bdev
 *
 * FIXME: remove all knowledge of the buffer layer from this file
 */
#include <linux/buffer_head.h>

/*
 * New inode.c implementation.
 *
 * This implementation has the basic premise of trying
 * to be extremely low-overhead and SMP-safe, yet be
 * simple enough to be "obviously correct".
 *
 * Famous last words.
 */

/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */

/* #define INODE_PARANOIA 1 */
/* #define INODE_DEBUG 1 */

/*
 * Inode lookup is no longer as critical as it used to be:
 * most of the lookups are going to be through the dcache.
 */
#define I_HASHBITS	i_hash_shift
#define I_HASHMASK	i_hash_mask

static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;

/*
 * Each inode can be on two separate lists. One is
 * the hash list of the inode, used for lookups. The
 * other linked list is the "type" list:
 *  "in_use" - valid inode, i_count > 0, i_nlink > 0
 *  "dirty"  - as "in_use" but also dirty
 *  "unused" - valid inode, i_count = 0
 *
 * A "dirty" list is maintained for each super block,
 * allowing for low-overhead inode sync() operations.
 */

LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
static struct hlist_head *inode_hashtable __read_mostly;

/*
 * A simple spinlock to protect the list manipulations.
 *
 * NOTE! You also have to own the lock if you change
 * the i_state of an inode while it is in use..
 */
DEFINE_SPINLOCK(inode_lock);

/*
 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
 * icache shrinking path, and the umount path.  Without this exclusion,
 * by the time prune_icache calls iput for the inode whose pages it has
 * been invalidating, or by the time it calls clear_inode & destroy_inode
 * from its final dispose_list, the struct super_block they refer to
 * (for inode->i_sb->s_op) may already have been freed and reused.
 */
static DEFINE_MUTEX(iprune_mutex);

/*
 * Statistics gathering..
 */
struct inodes_stat_t inodes_stat;

static struct kmem_cache * inode_cachep __read_mostly;

static struct inode *alloc_inode(struct super_block *sb)
{
	static const struct address_space_operations empty_aops;
	static struct inode_operations empty_iops;
	static const struct file_operations empty_fops;
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);

	if (inode) {
		struct address_space * const mapping = &inode->i_data;

		inode->i_sb = sb;
		inode->i_blkbits = sb->s_blocksize_bits;
		inode->i_flags = 0;
		atomic_set(&inode->i_count, 1);
		inode->i_op = &empty_iops;
		inode->i_fop = &empty_fops;
		inode->i_nlink = 1;
		atomic_set(&inode->i_writecount, 0);
		inode->i_size = 0;
		inode->i_blocks = 0;
		inode->i_bytes = 0;
		inode->i_generation = 0;
#ifdef CONFIG_QUOTA
		memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
#endif
		inode->i_pipe = NULL;
		inode->i_bdev = NULL;
		inode->i_cdev = NULL;
		inode->i_rdev = 0;
		inode->dirtied_when = 0;
		if (security_inode_alloc(inode)) {
			if (inode->i_sb->s_op->destroy_inode)
				inode->i_sb->s_op->destroy_inode(inode);
			else
				kmem_cache_free(inode_cachep, (inode));
			return NULL;
		}

		mapping->a_ops = &empty_aops;
 		mapping->host = inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
		mapping->assoc_mapping = NULL;
		mapping->backing_dev_info = &default_backing_dev_info;

		/*
		 * If the block_device provides a backing_dev_info for client
		 * inodes then use that.  Otherwise the inode share the bdev's
		 * backing_dev_info.
		 */
		if (sb->s_bdev) {
			struct backing_dev_info *bdi;

			bdi = sb->s_bdev->bd_inode_backing_dev_info;
			if (!bdi)
				bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
			mapping->backing_dev_info = bdi;
		}
		inode->i_private = NULL;
		inode->i_mapping = mapping;
	}
	return inode;
}

void destroy_inode(struct inode *inode) 
{
	BUG_ON(inode_has_buffers(inode));
	security_inode_free(inode);
	if (inode->i_sb->s_op->destroy_inode)
		inode->i_sb->s_op->destroy_inode(inode);
	else
		kmem_cache_free(inode_cachep, (inode));
}


/*
 * These are initializations that only need to be done
 * once, because the fields are idempotent across use
 * of the inode, so let the slab aware of that.
 */
void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_dentry);
	INIT_LIST_HEAD(&inode->i_devices);
	mutex_init(&inode->i_mutex);
	init_rwsem(&inode->i_alloc_sem);
	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
	rwlock_init(&inode->i_data.tree_lock);
	spin_lock_init(&inode->i_data.i_mmap_lock);
	INIT_LIST_HEAD(&inode->i_data.private_list);
	spin_lock_init(&inode->i_data.private_lock);
	INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
	spin_lock_init(&inode->i_lock);
	i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY
	INIT_LIST_HEAD(&inode->inotify_watches);
	mutex_init(&inode->inotify_mutex);
#endif
}

EXPORT_SYMBOL(inode_init_once);

static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
	struct inode * inode = (struct inode *) foo;

	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
	    SLAB_CTOR_CONSTRUCTOR)
		inode_init_once(inode);
}

/*
 * inode_lock must be held
 */
void __iget(struct inode * inode)
{
	if (atomic_read(&inode->i_count)) {
		atomic_inc(&inode->i_count);
		return;
	}
	atomic_inc(&inode->i_count);
	if (!(inode->i_state & (I_DIRTY|I_LOCK)))
		list_move(&inode->i_list, &inode_in_use);
	inodes_stat.nr_unused--;
}

/**
 * clear_inode - clear an inode
 * @inode: inode to clear
 *
 * This is called by the filesystem to tell us
 * that the inode is no longer useful. We just
 * terminate it with extreme prejudice.
 */
void clear_inode(struct inode *inode)
{
	might_sleep();
	invalidate_inode_buffers(inode);
       
	BUG_ON(inode->i_data.nrpages);
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(inode->i_state & I_CLEAR);
	wait_on_inode(inode);
	DQUOT_DROP(inode);
	if (inode->i_sb && inode->i_sb->s_op->clear_inode)
		inode->i_sb->s_op->clear_inode(inode);
	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
		bd_forget(inode);
	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
		cd_forget(inode);
	inode->i_state = I_CLEAR;
}

EXPORT_SYMBOL(clear_inode);

/*
 * dispose_list - dispose of the contents of a local list
 * @head: the head of the list to free
 *
 * Dispose-list gets a local list with local inodes in it, so it doesn't
 * need to worry about list corruption and SMP locks.
 */
static void dispose_list(struct list_head *head)
{
	int nr_disposed = 0;

	while (!list_empty(head)) {
		struct inode *inode;

		inode = list_entry(head->next, struct inode, i_list);
		list_del(&inode->i_list);

		if (inode->i_data.nrpages)
			truncate_inode_pages(&inode->i_data, 0);
		clear_inode(inode);

		spin_lock(&inode_lock);
		hlist_del_init(&inode->i_hash);
		list_del_init(&inode->i_sb_list);
		spin_unlock(&inode_lock);

		wake_up_inode(inode);
		destroy_inode(inode);
		nr_disposed++;
	}
	spin_lock(&inode_lock);
	inodes_stat.nr_inodes -= nr_disposed;
	spin_unlock(&inode_lock);
}

/*
 * Invalidate all inodes for a device.
 */
static int invalidate_list(struct list_head *head, struct list_head *dispose)
{
	struct list_head *next;
	int busy = 0, count = 0;

	next = head->next;
	for (;;) {
		struct list_head * tmp = next;
		struct inode * inode;

		/*
		 * We can reschedule here without worrying about the list's
		 * consistency because the per-sb list of inodes must not
		 * change during umount anymore, and because iprune_mutex keeps
		 * shrink_icache_memory() away.
		 */
		cond_resched_lock(&inode_lock);

		next = next->next;
		if (tmp == head)
			break;
		inode = list_entry(tmp, struct inode, i_sb_list);
		invalidate_inode_buffers(inode);
		if (!atomic_read(&inode->i_count)) {
			list_move(&inode->i_list, dispose);
			inode->i_state |= I_FREEING;
			count++;
			continue;
		}
		busy = 1;
	}
	/* only unused inodes may be cached with i_count zero */
	inodes_stat.nr_unused -= count;
	return busy;
}

/**
 *	invalidate_inodes	- discard the inodes on a device
 *	@sb: superblock
 *
 *	Discard all of the inodes for a given superblock. If the discard
 *	fails because there are busy inodes then a non zero value is returned.
 *	If the discard is successful all the inodes have been discarded.
 */
int invalidate_inodes(struct super_block * sb)
{
	int busy;
	LIST_HEAD(throw_away);

	mutex_lock(&iprune_mutex);
	spin_lock(&inode_lock);
	inotify_unmount_inodes(&sb->s_inodes);
	busy = invalidate_list(&sb->s_inodes, &throw_away);
	spin_unlock(&inode_lock);

	dispose_list(&throw_away);
	mutex_unlock(&iprune_mutex);

	return busy;
}

EXPORT_SYMBOL(invalidate_inodes);

static int can_unuse(struct inode *inode)
{
	if (inode->i_state)
		return 0;
	if (inode_has_buffers(inode))
		return 0;
	if (atomic_read(&inode->i_count))
		return 0;
	if (inode->i_data.nrpages)
		return 0;
	return 1;
}

/*
 * Scan `goal' inodes on the unused list for freeable ones. They are moved to
 * a temporary list and then are freed outside inode_lock by dispose_list().
 *
 * Any inodes which are pinned purely because of attached pagecache have their
 * pagecache removed.  We expect the final iput() on that inode to add it to
 * the front of the inode_unused list.  So look for it there and if the
 * inode is still freeable, proceed.  The right inode is found 99.9% of the
 * time in testing on a 4-way.
 *
 * If the inode has metadata buffers attached to mapping->private_list then
 * try to remove them.
 */
static void prune_icache(int nr_to_scan)
{
	LIST_HEAD(freeable);
	int nr_pruned = 0;
	int nr_scanned;
	unsigned long reap = 0;

	mutex_lock(&iprune_mutex);
	spin_lock(&inode_lock);
	for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
		struct inode *inode;

		if (list_empty(&inode_unused))
			break;

		inode = list_entry(inode_unused.prev, struct inode, i_list);

		if (inode->i_state || atomic_read(&inode->i_count)) {
			list_move(&inode->i_list, &inode_unused);
			continue;
		}
		if (inode_has_buffers(inode) || inode->i_data.nrpages) {
			__iget(inode);
			spin_unlock(&inode_lock);
			if (remove_inode_buffers(inode))
				reap += invalidate_inode_pages(&inode->i_data);
			iput(inode);
			spin_lock(&inode_lock);

			if (inode != list_entry(inode_unused.next,
						struct inode, i_list))
				continue;	/* wrong inode or list_empty */
			if (!can_unuse(inode))
				continue;
		}
		list_move(&inode->i_list, &freeable);
		inode->i_state |= I_FREEING;
		nr_pruned++;
	}
	inodes_stat.nr_unused -= nr_pruned;
	if (current_is_kswapd())
		__count_vm_events(KSWAPD_INODESTEAL, reap);
	else
		__count_vm_events(PGINODESTEAL, reap);
	spin_unlock(&inode_lock);

	dispose_list(&freeable);
	mutex_unlock(&iprune_mutex);
}

/*
 * shrink_icache_memory() will attempt to reclaim some unused inodes.  Here,
 * "unused" means that no dentries are referring to the inodes: the files are
 * not open and the dcache references to those inodes have already been
 * reclaimed.
 *
 * This function is passed the number of inodes to scan, and it returns the
 * total number of remaining possibly-reclaimable inodes.
 */
static int shrink_icache_memory(int nr, gfp_t gfp_mask)
{
	if (nr) {
		/*
		 * Nasty deadlock avoidance.  We may hold various FS locks,
		 * and we don't want to recurse into the FS that called us
		 * in clear_inode() and friends..
	 	 */
		if (!(gfp_mask & __GFP_FS))
			return -1;
		prune_icache(nr);
	}
	return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}

static void __wait_on_freeing_inode(struct inode *inode);
/*
 * Called with the inode lock held.
 * NOTE: we are not increasing the inode-refcount, you must call __iget()
 * by hand after calling find_inode now! This simplifies iunique and won't
 * add any additional branch in the common code.
 */
static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
{
	struct hlist_node *node;
	struct inode * inode = NULL;

repeat:
	hlist_for_each (node, head) { 
		inode = hlist_entry(node, struct inode, i_hash);
		if (inode->i_sb != sb)
			continue;
		if (!test(inode, data))
			continue;
		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
		break;
	}
	return node ? inode : NULL;
}

/*
 * find_inode_fast is the fast path version of find_inode, see the comment at
 * iget_locked for details.
 */
static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
{
	struct hlist_node *node;
	struct inode * inode = NULL;

repeat:
	hlist_for_each (node, head) {
		inode = hlist_entry(node, struct inode, i_hash);
		if (inode->i_ino != ino)
			continue;
		if (inode->i_sb != sb)
			continue;
		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
		break;
	}
	return node ? inode : NULL;
}

/**
 *	new_inode 	- obtain an inode
 *	@sb: superblock
 *
 *	Allocates a new inode for given superblock.
 */
struct inode *new_inode(struct super_block *sb)
{
	static unsigned long last_ino;
	struct inode * inode;

	spin_lock_prefetch(&inode_lock);
	
	inode = alloc_inode(sb);
	if (inode) {
		spin_lock(&inode_lock);
		inodes_stat.nr_inodes++;
		list_add(&inode->i_list, &inode_in_use);
		list_add(&inode->i_sb_list, &sb->s_inodes);
		inode->i_ino = ++last_ino;
		inode->i_state = 0;
		spin_unlock(&inode_lock);
	}
	return inode;
}

EXPORT_SYMBOL(new_inode);

void unlock_new_inode(struct inode *inode)
{
	/*
	 * This is special!  We do not need the spinlock
	 * when clearing I_LOCK, because we're guaranteed
	 * that nobody else tries to do anything about the
	 * state of the inode when it is locked, as we
	 * just created it (so there can be no old holders
	 * that haven't tested I_LOCK).
	 */
	inode->i_state &= ~(I_LOCK|I_NEW);
	wake_up_inode(inode);
}

EXPORT_SYMBOL(unlock_new_inode);

/*
 * This is called without the inode lock held.. Be careful.
 *
 * We no longer cache the sb_flags in i_flags - see fs.h
 *	-- rmk@arm.uk.linux.org
 */
static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
{
	struct inode * inode;

	inode = alloc_inode(sb);
	if (inode) {
		struct inode * old;

		spin_lock(&inode_lock);
		/* We released the lock, so.. */
		old = find_inode(sb, head, test, data);
		if (!old) {
			if (set(inode, data))
				goto set_failed;

			inodes_stat.nr_inodes++;
			list_add(&inode->i_list, &inode_in_use);
			list_add(&inode->i_sb_list, &sb->s_inodes);
			hlist_add_head(&inode->i_hash, head);
			inode->i_state = I_LOCK|I_NEW;
			spin_unlock(&inode_lock);

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
		__iget(old);
		spin_unlock(&inode_lock);
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
	}
	return inode;

set_failed:
	spin_unlock(&inode_lock);
	destroy_inode(inode);
	return NULL;
}

/*
 * get_new_inode_fast is the fast path version of get_new_inode, see the
 * comment at iget_locked for details.
 */
static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
{
	struct inode * inode;

	inode = alloc_inode(sb);
	if (inode) {
		struct inode * old;

		spin_lock(&inode_lock);
		/* We released the lock, so.. */
		old = find_inode_fast(sb, head, ino);
		if (!old) {
			inode->i_ino = ino;
			inodes_stat.nr_inodes++;
			list_add(&inode->i_list, &inode_in_use);
			list_add(&inode->i_sb_list, &sb->s_inodes);
			hlist_add_head(&inode->i_hash, head);
			inode->i_state = I_LOCK|I_NEW;
			spin_unlock(&inode_lock);

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
		__iget(old);
		spin_unlock(&inode_lock);
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
	}
	return inode;
}

static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
	return tmp & I_HASHMASK;
}

/**
 *	iunique - get a unique inode number
 *	@sb: superblock
 *	@max_reserved: highest reserved inode number
 *
 *	Obtain an inode number that is unique on the system for a given
 *	superblock. This is used by file systems that have no natural
 *	permanent inode numbering system. An inode number is returned that
 *	is higher than the reserved limit but unique.
 *
 *	BUGS:
 *	With a large number of inodes live on the file system this function
 *	currently becomes quite slow.
 */
ino_t iunique(struct super_block *sb, ino_t max_reserved)
{
	static ino_t counter;
	struct inode *inode;
	struct hlist_head * head;
	ino_t res;
	spin_lock(&inode_lock);
retry:
	if (counter > max_reserved) {
		head = inode_hashtable + hash(sb,counter);
		res = counter++;
		inode = find_inode_fast(sb, head, res);
		if (!inode) {
			spin_unlock(&inode_lock);
			return res;
		}
	} else {
		counter = max_reserved + 1;
	}
	goto retry;
	
}

EXPORT_SYMBOL(iunique);

struct inode *igrab(struct inode *inode)
{
	spin_lock(&inode_lock);
	if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
		__iget(inode);
	else
		/*
		 * Handle the case where s_op->clear_inode is not been
		 * called yet, and somebody is calling igrab
		 * while the inode is getting freed.
		 */
		inode = NULL;
	spin_unlock(&inode_lock);
	return inode;
}

EXPORT_SYMBOL(igrab);

/**
 * ifind - internal function, you want ilookup5() or iget5().
 * @sb:		super block of file system to search
 * @head:       the head of the list to search
 * @test:	callback used for comparisons between inodes
 * @data:	opaque data pointer to pass to @test
 * @wait:	if true wait for the inode to be unlocked, if false do not
 *
 * ifind() searches for the inode specified by @data in the inode
 * cache. This is a generalized version of ifind_fast() for file systems where
 * the inode number is not sufficient for unique identification of an inode.
 *
 * If the inode is in the cache, the inode is returned with an incremented
 * reference count.
 *
 * Otherwise NULL is returned.
 *
 * Note, @test is called with the inode_lock held, so can't sleep.
 */
static struct inode *ifind(struct super_block *sb,
		struct hlist_head *head, int (*test)(struct inode *, void *),
		void *data, const int wait)
{
	struct inode *inode;

	spin_lock(&inode_lock);
	inode = find_inode(sb, head, test, data);
	if (inode) {
		__iget(inode);
		spin_unlock(&inode_lock);
		if (likely(wait))
			wait_on_inode(inode);
		return inode;
	}
	spin_unlock(&inode_lock);
	return NULL;
}

/**
 * ifind_fast - internal function, you want ilookup() or iget().
 * @sb:		super block of file system to search
 * @head:       head of the list to search
 * @ino:	inode number to search for
 *
 * ifind_fast() searches for the inode @ino in the inode cache. This is for
 * file systems where the inode number is sufficient for unique identification
 * of an inode.
 *
 * If the inode is in the cache, the inode is returned with an incremented
 * reference count.
 *
 * Otherwise NULL is returned.
 */
static struct inode *ifind_fast(struct super_block *sb,
		struct hlist_head *head, unsigned long ino)
{
	struct inode *inode;

	spin_lock(&inode_lock);
	inode = find_inode_fast(sb, head, ino);
	if (inode) {
		__iget(inode);
		spin_unlock(&inode_lock);
		wait_on_inode(inode);
		return inode;
	}
	spin_unlock(&inode_lock);
	return NULL;
}

/**
 * ilookup5_nowait - search for an inode in the inode cache
 * @sb:		super block of file system to search
 * @hashval:	hash value (usually inode number) to search for
 * @test:	callback used for comparisons between inodes
 * @data:	opaque data pointer to pass to @test
 *
 * ilookup5() uses ifind() to search for the inode specified by @hashval and
 * @data in the inode cache. This is a generalized version of ilookup() for
 * file systems where the inode number is not sufficient for unique
 * identification of an inode.
 *
 * If the inode is in the cache, the inode is returned with an incremented
 * reference count.  Note, the inode lock is not waited upon so you have to be
 * very careful what you do with the returned inode.  You probably should be
 * using ilookup5() instead.
 *
 * Otherwise NULL is returned.
 *
 * Note, @test is called with the inode_lock held, so can't sleep.
 */
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
{
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);

	return ifind(sb, head, test, data, 0);
}

EXPORT_SYMBOL(ilookup5_nowait);

/**
 * ilookup5 - search for an inode in the inode cache
 * @sb:		super block of file system to search
 * @hashval:	hash value (usually inode number) to search for
 * @test:	callback used for comparisons between inodes
 * @data:	opaque data pointer to pass to @test
 *
 * ilookup5() uses ifind() to search for the inode specified by @hashval and
 * @data in the inode cache. This is a generalized version of ilookup() for
 * file systems where the inode number is not sufficient for unique
 * identification of an inode.
 *
 * If the inode is in the cache, the inode lock is waited upon and the inode is
 * returned with an incremented reference count.
 *
 * Otherwise NULL is returned.
 *
 * Note, @test is called with the inode_lock held, so can't sleep.
 */
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *), void *data)
{
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);

	return ifind(sb, head, test, data, 1);
}

EXPORT_SYMBOL(ilookup5);

/**
 * ilookup - search for an inode in the inode cache
 * @sb:		super block of file system to search
 * @ino:	inode number to search for
 *
 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
 * This is for file systems where the inode number is sufficient for unique
 * identification of an inode.
 *
 * If the inode is in the cache, the inode is returned with an incremented
 * reference count.
 *
 * Otherwise NULL is returned.
 */
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
	struct hlist_head *head = inode_hashtable + hash(sb, ino);

	return ifind_fast(sb, head, ino);
}

EXPORT_SYMBOL(ilookup);

/**
 * iget5_locked - obtain an inode from a mounted file system
 * @sb:		super block of file system
 * @hashval:	hash value (usually inode number) to get
 * @test:	callback used for comparisons between inodes
 * @set:	callback used to initialize a new struct inode
 * @data:	opaque data pointer to pass to @test and @set
 *
 * This is iget() without the read_inode() portion of get_new_inode().
 *
 * iget5_locked() uses ifind() to search for the inode specified by @hashval
 * and @data in the inode cache and if present it is returned with an increased
 * reference count. This is a generalized version of iget_locked() for file
 * systems where the inode number is not sufficient for unique identification
 * of an inode.
 *
 * If the inode is not in cache, get_new_inode() is called to allocate a new
 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
 * file system gets to fill it in before unlocking it via unlock_new_inode().
 *
 * Note both @test and @set are called with the inode_lock held, so can't sleep.
 */
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *),
		int (*set)(struct inode *, void *), void *data)
{
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
	struct inode *inode;

	inode = ifind(sb, head, test, data, 1);
	if (inode)
		return inode;
	/*
	 * get_new_inode() will do the right thing, re-trying the search
	 * in case it had to block at any point.
	 */
	return get_new_inode(sb, head, test, set, data);
}

EXPORT_SYMBOL(iget5_locked);

/**
 * iget_locked - obtain an inode from a mounted file system
 * @sb:		super block of file system
 * @ino:	inode number to get
 *
 * This is iget() without the read_inode() portion of get_new_inode_fast().
 *
 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
 * the inode cache and if present it is returned with an increased reference
 * count. This is for file systems where the inode number is sufficient for
 * unique identification of an inode.
 *
 * If the inode is not in cache, get_new_inode_fast() is called to allocate a
 * new inode and this is returned locked, hashed, and with the I_NEW flag set.
 * The file system gets to fill it in before unlocking it via
 * unlock_new_inode().
 */
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
	struct hlist_head *head = inode_hashtable + hash(sb, ino);
	struct inode *inode;

	inode = ifind_fast(sb, head, ino);
	if (inode)
		return inode;
	/*
	 * get_new_inode_fast() will do the right thing, re-trying the search
	 * in case it had to block at any point.
	 */
	return get_new_inode_fast(sb, head, ino);
}

EXPORT_SYMBOL(iget_locked);

/**
 *	__insert_inode_hash - hash an inode
 *	@inode: unhashed inode
 *	@hashval: unsigned long value used to locate this object in the
 *		inode_hashtable.
 *
 *	Add an inode to the inode hash for this superblock.
 */
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
	struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
	spin_lock(&inode_lock);
	hlist_add_head(&inode->i_hash, head);
	spin_unlock(&inode_lock);
}

EXPORT_SYMBOL(__insert_inode_hash);

/**
 *	remove_inode_hash - remove an inode from the hash
 *	@inode: inode to unhash
 *
 *	Remove an inode from the superblock.
 */
void remove_inode_hash(struct inode *inode)
{
	spin_lock(&inode_lock);
	hlist_del_init(&inode->i_hash);
	spin_unlock(&inode_lock);
}

EXPORT_SYMBOL(remove_inode_hash);

/*
 * Tell the filesystem that this inode is no longer of any interest and should
 * be completely destroyed.
 *
 * We leave the inode in the inode hash table until *after* the filesystem's
 * ->delete_inode completes.  This ensures that an iget (such as nfsd might
 * instigate) will always find up-to-date information either in the hash or on
 * disk.
 *
 * I_FREEING is set so that no-one will take a new reference to the inode while
 * it is being deleted.
 */
void generic_delete_inode(struct inode *inode)
{
	struct super_operations *op = inode->i_sb->s_op;

	list_del_init(&inode->i_list);
	list_del_init(&inode->i_sb_list);
	inode->i_state |= I_FREEING;
	inodes_stat.nr_inodes--;
	spin_unlock(&inode_lock);

	security_inode_delete(inode);

	if (op->delete_inode) {
		void (*delete)(struct inode *) = op->delete_inode;
		if (!is_bad_inode(inode))
			DQUOT_INIT(inode);
		/* Filesystems implementing their own
		 * s_op->delete_inode are required to call
		 * truncate_inode_pages and clear_inode()
		 * internally */
		delete(inode);
	} else {
		truncate_inode_pages(&inode->i_data, 0);
		clear_inode(inode);
	}
	spin_lock(&inode_lock);
	hlist_del_init(&inode->i_hash);
	spin_unlock(&inode_lock);
	wake_up_inode(inode);
	BUG_ON(inode->i_state != I_CLEAR);
	destroy_inode(inode);
}

EXPORT_SYMBOL(generic_delete_inode);

static void generic_forget_inode(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

	if (!hlist_unhashed(&inode->i_hash)) {
		if (!(inode->i_state & (I_DIRTY|I_LOCK)))
			list_move(&inode->i_list, &inode_unused);
		inodes_stat.nr_unused++;
		if (!sb || (sb->s_flags & MS_ACTIVE)) {
			spin_unlock(&inode_lock);
			return;
		}
		inode->i_state |= I_WILL_FREE;
		spin_unlock(&inode_lock);
		write_inode_now(inode, 1);
		spin_lock(&inode_lock);
		inode->i_state &= ~I_WILL_FREE;
		inodes_stat.nr_unused--;
		hlist_del_init(&inode->i_hash);
	}
	list_del_init(&inode->i_list);
	list_del_init(&inode->i_sb_list);
	inode->i_state |= I_FREEING;
	inodes_stat.nr_inodes--;
	spin_unlock(&inode_lock);
	if (inode->i_data.nrpages)
		truncate_inode_pages(&inode->i_data, 0);
	clear_inode(inode);
	wake_up_inode(inode);
	destroy_inode(inode);
}

/*
 * Normal UNIX filesystem behaviour: delete the
 * inode when the usage count drops to zero, and
 * i_nlink is zero.
 */
void generic_drop_inode(struct inode *inode)
{
	if (!inode->i_nlink)
		generic_delete_inode(inode);
	else
		generic_forget_inode(inode);
}

EXPORT_SYMBOL_GPL(generic_drop_inode);

/*
 * Called when we're dropping the last reference
 * to an inode. 
 *
 * Call the FS "drop()" function, defaulting to
 * the legacy UNIX filesystem behaviour..
 *
 * NOTE! NOTE! NOTE! We're called with the inode lock
 * held, and the drop function is supposed to release
 * the lock!
 */
static inline void iput_final(struct inode *inode)
{
	struct super_operations *op = inode->i_sb->s_op;
	void (*drop)(struct inode *) = generic_drop_inode;

	if (op && op->drop_inode)
		drop = op->drop_inode;
	drop(inode);
}

/**
 *	iput	- put an inode 
 *	@inode: inode to put
 *
 *	Puts an inode, dropping its usage count. If the inode use count hits
 *	zero, the inode is then freed and may also be destroyed.
 *
 *	Consequently, iput() can sleep.
 */
void iput(struct inode *inode)
{
	if (inode) {
		struct super_operations *op = inode->i_sb->s_op;

		BUG_ON(inode->i_state == I_CLEAR);

		if (op && op->put_inode)
			op->put_inode(inode);

		if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
			iput_final(inode);
	}
}

EXPORT_SYMBOL(iput);

/**
 *	bmap	- find a block number in a file
 *	@inode: inode of file
 *	@block: block to find
 *
 *	Returns the block number on the device holding the inode that
 *	is the disk block number for the block of the file requested.
 *	That is, asked for block 4 of inode 1 the function will return the
 *	disk block relative to the disk start that holds that block of the 
 *	file.
 */
sector_t bmap(struct inode * inode, sector_t block)
{
	sector_t res = 0;
	if (inode->i_mapping->a_ops->bmap)
		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
	return res;
}
EXPORT_SYMBOL(bmap);

/**
 *	touch_atime	-	update the access time
 *	@mnt: mount the inode is accessed on
 *	@dentry: dentry accessed
 *
 *	Update the accessed time on an inode and mark it for writeback.
 *	This function automatically handles read only file systems and media,
 *	as well as the "noatime" flag and inode specific "noatime" markers.
 */
void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	struct timespec now;

	if (IS_RDONLY(inode))
		return;
	if (inode->i_flags & S_NOATIME)
		return;
	if (inode->i_sb->s_flags & MS_NOATIME)
		return;
	if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
		return;

	/*
	 * We may have a NULL vfsmount when coming from NFSD
	 */
	if (mnt) {
		if (mnt->mnt_flags & MNT_NOATIME)
			return;
		if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
			return;

		if (mnt->mnt_flags & MNT_RELATIME) {
			/*
			 * With relative atime, only update atime if the
			 * previous atime is earlier than either the ctime or
			 * mtime.
			 */
			if (timespec_compare(&inode->i_mtime,
						&inode->i_atime) < 0 &&
			    timespec_compare(&inode->i_ctime,
						&inode->i_atime) < 0)
				return;
		}
	}

	now = current_fs_time(inode->i_sb);
	if (timespec_equal(&inode->i_atime, &now))
		return;

	inode->i_atime = now;
	mark_inode_dirty_sync(inode);
}
EXPORT_SYMBOL(touch_atime);

/**
 *	file_update_time	-	update mtime and ctime time
 *	@file: file accessed
 *
 *	Update the mtime and ctime members of an inode and mark the inode
 *	for writeback.  Note that this function is meant exclusively for
 *	usage in the file write path of filesystems, and filesystems may
 *	choose to explicitly ignore update via this function with the
 *	S_NOCTIME inode flag, e.g. for network filesystem where these
 *	timestamps are handled by the server.
 */

void file_update_time(struct file *file)
{
	struct inode *inode = file->f_path.dentry->d_inode;
	struct timespec now;
	int sync_it = 0;

	if (IS_NOCMTIME(inode))
		return;
	if (IS_RDONLY(inode))
		return;

	now = current_fs_time(inode->i_sb);
	if (!timespec_equal(&inode->i_mtime, &now)) {
		inode->i_mtime = now;
		sync_it = 1;
	}

	if (!timespec_equal(&inode->i_ctime, &now)) {
		inode->i_ctime = now;
		sync_it = 1;
	}

	if (sync_it)
		mark_inode_dirty_sync(inode);
}

EXPORT_SYMBOL(file_update_time);

int inode_needs_sync(struct inode *inode)
{
	if (IS_SYNC(inode))
		return 1;
	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
		return 1;
	return 0;
}

EXPORT_SYMBOL(inode_needs_sync);

/*
 *	Quota functions that want to walk the inode lists..
 */
#ifdef CONFIG_QUOTA

void remove_dquot_ref(struct super_block *sb, int type,
			struct list_head *tofree_head)
{
	struct inode *inode;

	if (!sb->dq_op)
		return;	/* nothing to do */
	spin_lock(&inode_lock);	/* This lock is for inodes code */

	/*
	 * We don't have to lock against quota code - test IS_QUOTAINIT is
	 * just for speedup...
	 */
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list)
		if (!IS_NOQUOTA(inode))
			remove_inode_dquot_ref(inode, type, tofree_head);

	spin_unlock(&inode_lock);
}

#endif

int inode_wait(void *word)
{
	schedule();
	return 0;
}

/*
 * If we try to find an inode in the inode hash while it is being
 * deleted, we have to wait until the filesystem completes its
 * deletion before reporting that it isn't found.  This function waits
 * until the deletion _might_ have completed.  Callers are responsible
 * to recheck inode state.
 *
 * It doesn't matter if I_LOCK is not set initially, a call to
 * wake_up_inode() after removing from the hash list will DTRT.
 *
 * This is called with inode_lock held.
 */
static void __wait_on_freeing_inode(struct inode *inode)
{
	wait_queue_head_t *wq;
	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
	wq = bit_waitqueue(&inode->i_state, __I_LOCK);
	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
	spin_unlock(&inode_lock);
	schedule();
	finish_wait(wq, &wait.wait);
	spin_lock(&inode_lock);
}

void wake_up_inode(struct inode *inode)
{
	/*
	 * Prevent speculative execution through spin_unlock(&inode_lock);
	 */
	smp_mb();
	wake_up_bit(&inode->i_state, __I_LOCK);
}

/*
 * We rarely want to lock two inodes that do not have a parent/child
 * relationship (such as directory, child inode) simultaneously. The
 * vast majority of file systems should be able to get along fine
 * without this. Do not use these functions except as a last resort.
 */
void inode_double_lock(struct inode *inode1, struct inode *inode2)
{
	if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
		if (inode1)
			mutex_lock(&inode1->i_mutex);
		else if (inode2)
			mutex_lock(&inode2->i_mutex);
		return;
	}

	if (inode1 < inode2) {
		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
	} else {
		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
	}
}
EXPORT_SYMBOL(inode_double_lock);

void inode_double_unlock(struct inode *inode1, struct inode *inode2)
{
	if (inode1)
		mutex_unlock(&inode1->i_mutex);

	if (inode2 && inode2 != inode1)
		mutex_unlock(&inode2->i_mutex);
}
EXPORT_SYMBOL(inode_double_unlock);

static __initdata unsigned long ihash_entries;
static int __init set_ihash_entries(char *str)
{
	if (!str)
		return 0;
	ihash_entries = simple_strtoul(str, &str, 0);
	return 1;
}
__setup("ihash_entries=", set_ihash_entries);

/*
 * Initialize the waitqueues and inode hash table.
 */
void __init inode_init_early(void)
{
	int loop;

	/* If hashes are distributed across NUMA nodes, defer
	 * hash allocation until vmalloc space is available.
	 */
	if (hashdist)
		return;

	inode_hashtable =
		alloc_large_system_hash("Inode-cache",
					sizeof(struct hlist_head),
					ihash_entries,
					14,
					HASH_EARLY,
					&i_hash_shift,
					&i_hash_mask,
					0);

	for (loop = 0; loop < (1 << i_hash_shift); loop++)
		INIT_HLIST_HEAD(&inode_hashtable[loop]);
}

void __init inode_init(unsigned long mempages)
{
	int loop;

	/* inode slab cache */
	inode_cachep = kmem_cache_create("inode_cache",
					 sizeof(struct inode),
					 0,
					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
					 SLAB_MEM_SPREAD),
					 init_once,
					 NULL);
	set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);

	/* Hash may have been set up in inode_init_early */
	if (!hashdist)
		return;

	inode_hashtable =
		alloc_large_system_hash("Inode-cache",
					sizeof(struct hlist_head),
					ihash_entries,
					14,
					0,
					&i_hash_shift,
					&i_hash_mask,
					0);

	for (loop = 0; loop < (1 << i_hash_shift); loop++)
		INIT_HLIST_HEAD(&inode_hashtable[loop]);
}

void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
	inode->i_mode = mode;
	if (S_ISCHR(mode)) {
		inode->i_fop = &def_chr_fops;
		inode->i_rdev = rdev;
	} else if (S_ISBLK(mode)) {
		inode->i_fop = &def_blk_fops;
		inode->i_rdev = rdev;
	} else if (S_ISFIFO(mode))
		inode->i_fop = &def_fifo_fops;
	else if (S_ISSOCK(mode))
		inode->i_fop = &bad_sock_fops;
	else
		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
		       mode);
}
EXPORT_SYMBOL(init_special_inode);
j/boardobj.c?id=8340d234d78a7d0f46c11a584de538148b78b7cb#n14'>14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/kmem.h> 24#include <nvgpu/gk20a.h> 25 26#include "boardobj.h" 27#include "ctrl/ctrlboardobj.h" 28 29int boardobj_construct_super(struct gk20a *g, struct boardobj **ppboardobj, 30 u16 size, void *args) 31{ 32 struct boardobj *pboardobj = NULL; 33 struct boardobj *devtmp = (struct boardobj *)args; 34 35 nvgpu_log_info(g, " "); 36 37 if (devtmp == NULL) { 38 return -EINVAL; 39 } 40 41 if (*ppboardobj == NULL) { 42 *ppboardobj = nvgpu_kzalloc(g, size); 43 if (*ppboardobj == NULL) { 44 return -ENOMEM; 45 } 46 (*ppboardobj)->allocated = true; 47 } 48 49 pboardobj = *ppboardobj; 50 pboardobj->g = g; 51 pboardobj->type = devtmp->type; 52 pboardobj->idx = CTRL_BOARDOBJ_IDX_INVALID; 53 pboardobj->type_mask = BIT(pboardobj->type) | devtmp->type_mask; 54 55 pboardobj->implements = boardobj_implements_super; 56 pboardobj->destruct = boardobj_destruct_super; 57 pboardobj->pmudatainit = boardobj_pmudatainit_super; 58 59 nvgpu_list_add(&pboardobj->node, &g->boardobj_head); 60 61 return 0; 62} 63 64int boardobj_destruct_super(struct boardobj *pboardobj) 65{ 66 struct gk20a *g = pboardobj->g; 67 68 nvgpu_log_info(g, " "); 69 if (pboardobj == NULL) { 70 return -EINVAL; 71 } 72 73 nvgpu_list_del(&pboardobj->node); 74 if (pboardobj->allocated) { 75 nvgpu_kfree(pboardobj->g, pboardobj); 76 } 77 78 return 0; 79} 80 81bool boardobj_implements_super(struct gk20a *g, struct boardobj *pboardobj, 82 u8 type) 83{ 84 nvgpu_log_info(g, " "); 85 86 return (0 != (pboardobj->type_mask & BIT(type))); 87} 88 89int boardobj_pmudatainit_super(struct gk20a *g, struct boardobj *pboardobj, 90 struct nv_pmu_boardobj *pmudata) 91{ 92 nvgpu_log_info(g, " "); 93 if (pboardobj == NULL) { 94 return -EINVAL; 95 } 96 if (pmudata == NULL) { 97 return -EINVAL; 98 } 99 pmudata->type = pboardobj->type; 100 nvgpu_log_info(g, " Done"); 101 return 0; 102}
diff --git a/include/boardobj/boardobj.h b/include/boardobj/boardobj.h
deleted file mode 100644
index b1be9bd..0000000
--- a/include/boardobj/boardobj.h
+++ /dev/null
@@ -1,104 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_BOARDOBJ_H 24#define NVGPU_BOARDOBJ_H 25 26#include <nvgpu/list.h> 27#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 28 29#include "ctrl/ctrlboardobj.h" 30 31struct boardobj; 32struct nvgpu_list_node; 33 34/* 35* check whether the specified BOARDOBJ object implements the queried 36* type/class enumeration. 37*/ 38typedef bool boardobj_implements(struct gk20a *g, struct boardobj *pboardobj, 39 u8 type); 40 41/* 42* Fills out the appropriate the nv_pmu_xxxx_device_desc_<xyz> driver->PMU 43* description structure, describing this BOARDOBJ board device to the PMU. 44* 45*/ 46typedef int boardobj_pmudatainit(struct gk20a *g, struct boardobj *pboardobj, 47 struct nv_pmu_boardobj *pmudata); 48 49/* 50* Constructor for the base Board Object. Called by each device-specific 51* implementation of the BOARDOBJ interface to initialize the board object. 52*/ 53typedef int boardobj_construct(struct gk20a *g, struct boardobj **pboardobj, 54 u16 size, void *args); 55 56/* 57* Destructor for the base board object. Called by each device-Specific 58* implementation of the BOARDOBJ interface to destroy the board object. 59* This has to be explicitly set by each device that extends from the 60* board object. 61*/ 62typedef int boardobj_destruct(struct boardobj *pboardobj); 63 64/* 65* Base Class for all physical or logical device on the PCB. 66* Contains fields common to all devices on the board. Specific types of 67* devices may extend this object adding any details specific to that 68* device or device-type. 69*/ 70 71struct boardobj { 72 struct gk20a *g; 73 74 u8 type; /*type of the device*/ 75 u8 idx; /*index of boardobj within in its group*/ 76 /* true if allocated in constructor. destructor should free */ 77 u8 allocated; 78 u32 type_mask; /*mask of types this boardobjimplements*/ 79 boardobj_implements *implements; 80 boardobj_destruct *destruct; 81 /* 82 * Access interface apis which will be overridden by the devices 83 * that inherit from BOARDOBJ 84 */ 85 boardobj_pmudatainit *pmudatainit; 86 struct nvgpu_list_node node; 87}; 88 89boardobj_construct boardobj_construct_super; 90boardobj_destruct boardobj_destruct_super; 91boardobj_implements boardobj_implements_super; 92boardobj_pmudatainit boardobj_pmudatainit_super; 93 94#define BOARDOBJ_GET_TYPE(pobj) (((struct boardobj *)(pobj))->type) 95#define BOARDOBJ_GET_IDX(pobj) (((struct boardobj *)(pobj))->idx) 96 97static inline struct boardobj * 98boardobj_from_node(struct nvgpu_list_node *node) 99{ 100 return (struct boardobj *) 101 ((uintptr_t)node - offsetof(struct boardobj, node)); 102}; 103 104#endif /* NVGPU_BOARDOBJ_H */
diff --git a/include/boardobj/boardobjgrp.c b/include/boardobj/boardobjgrp.c
deleted file mode 100644
index 6832070..0000000
--- a/include/boardobj/boardobjgrp.c
+++ /dev/null
@@ -1,1046 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22#include <nvgpu/bug.h> 23#include <nvgpu/gk20a.h> 24 25#include "boardobjgrp.h" 26#include "ctrl/ctrlboardobj.h" 27#include "boardobj.h" 28 29static boardobjgrp_objinsert boardobjgrp_objinsert_final; 30static boardobjgrp_objgetbyidx boardobjgrp_objgetbyidx_final; 31static boardobjgrp_objgetnext boardobjgrp_objgetnext_final; 32static boardobjgrp_objremoveanddestroy boardobjgrp_objremoveanddestroy_final; 33static boardobjgrp_pmudatainstget boardobjgrp_pmudatainstget_stub; 34static boardobjgrp_pmustatusinstget boardobjgrp_pmustatusinstget_stub; 35static int boardobjgrp_pmucmdsend(struct gk20a *g, 36 struct boardobjgrp *pboardobjgrp, 37 struct boardobjgrp_pmu_cmd *pcmd); 38static int boardobjgrp_pmucmdsend_rpc(struct gk20a *g, 39 struct boardobjgrp *pboardobjgrp, 40 struct boardobjgrp_pmu_cmd *pcmd, 41 bool copy_out); 42struct boardobjgrp_pmucmdhandler_params { 43 /* Pointer to the BOARDOBJGRP associated with this CMD */ 44 struct boardobjgrp *pboardobjgrp; 45 /* Pointer to structure representing this NV_PMU_BOARDOBJ_CMD_GRP */ 46 struct boardobjgrp_pmu_cmd *pcmd; 47 /* Boolean indicating whether the PMU successfully handled the CMD */ 48 u32 success; 49}; 50 51int boardobjgrp_construct_super(struct gk20a *g, 52 struct boardobjgrp *pboardobjgrp) 53{ 54 nvgpu_log_info(g, " "); 55 56 if (pboardobjgrp == NULL) { 57 return -EINVAL; 58 } 59 60 if (pboardobjgrp->ppobjects == NULL) { 61 return -EINVAL; 62 } 63 64 if (pboardobjgrp->mask == NULL) { 65 return -EINVAL; 66 } 67 68 pboardobjgrp->g = g; 69 pboardobjgrp->objmask = 0; 70 71 pboardobjgrp->classid = 0; 72 pboardobjgrp->pmu.unitid = BOARDOBJGRP_UNIT_ID_INVALID; 73 pboardobjgrp->pmu.classid = BOARDOBJGRP_GRP_CLASS_ID_INVALID; 74 pboardobjgrp->pmu.bset = false; 75 pboardobjgrp->pmu.rpc_func_id = BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID; 76 pboardobjgrp->pmu.set.id = BOARDOBJGRP_GRP_CMD_ID_INVALID; 77 pboardobjgrp->pmu.getstatus.id = BOARDOBJGRP_GRP_CMD_ID_INVALID; 78 79 /* Initialize basic interfaces */ 80 pboardobjgrp->destruct = boardobjgrp_destruct_super; 81 pboardobjgrp->objinsert = boardobjgrp_objinsert_final; 82 pboardobjgrp->objgetbyidx = boardobjgrp_objgetbyidx_final; 83 pboardobjgrp->objgetnext = boardobjgrp_objgetnext_final; 84 pboardobjgrp->objremoveanddestroy = 85 boardobjgrp_objremoveanddestroy_final; 86 87 pboardobjgrp->pmuinithandle = boardobjgrp_pmuinithandle_impl; 88 pboardobjgrp->pmuhdrdatainit = boardobjgrp_pmuhdrdatainit_super; 89 pboardobjgrp->pmudatainit = boardobjgrp_pmudatainit_super; 90 pboardobjgrp->pmuset = 91 g->ops.pmu_ver.boardobj.boardobjgrp_pmuset_impl; 92 pboardobjgrp->pmugetstatus = 93 g->ops.pmu_ver.boardobj.boardobjgrp_pmugetstatus_impl; 94 95 pboardobjgrp->pmudatainstget = boardobjgrp_pmudatainstget_stub; 96 pboardobjgrp->pmustatusinstget = boardobjgrp_pmustatusinstget_stub; 97 98 pboardobjgrp->objmaxidx = CTRL_BOARDOBJ_IDX_INVALID; 99 pboardobjgrp->bconstructed = true; 100 101 nvgpu_list_add(&pboardobjgrp->node, &g->boardobjgrp_head); 102 103 return 0; 104} 105 106int boardobjgrp_destruct_impl(struct boardobjgrp *pboardobjgrp) 107{ 108 struct gk20a *g = pboardobjgrp->g; 109 110 nvgpu_log_info(g, " "); 111 112 if (pboardobjgrp == NULL) { 113 return -EINVAL; 114 } 115 116 if (!pboardobjgrp->bconstructed) { 117 return 0; 118 } 119 120 return pboardobjgrp->destruct(pboardobjgrp); 121} 122 123int boardobjgrp_destruct_super(struct boardobjgrp *pboardobjgrp) 124{ 125 struct boardobj *pboardobj; 126 struct gk20a *g = pboardobjgrp->g; 127 int status = 0; 128 int stat; 129 u8 index; 130 131 nvgpu_log_info(g, " "); 132 133 if (pboardobjgrp->mask == NULL) { 134 return -EINVAL; 135 } 136 if (pboardobjgrp->ppobjects == NULL) { 137 return -EINVAL; 138 } 139 140 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct boardobj*, pboardobj, index) { 141 stat = pboardobjgrp->objremoveanddestroy(pboardobjgrp, index); 142 if (status == 0) { 143 status = stat; 144 } 145 146 pboardobjgrp->ppobjects[index] = NULL; 147 pboardobjgrp->objmask &= ~BIT(index); 148 } 149 150 pboardobjgrp->objmask = 0; 151 152 if (pboardobjgrp->objmaxidx != CTRL_BOARDOBJ_IDX_INVALID) { 153 if (status == 0) { 154 status = -EINVAL; 155 } 156 157 WARN_ON(true); 158 } 159 160 /* Destroy the PMU CMD data */ 161 stat = boardobjgrp_pmucmd_destroy_impl(g, &pboardobjgrp->pmu.set); 162 if (status == 0) { 163 status = stat; 164 } 165 166 stat = boardobjgrp_pmucmd_destroy_impl(g, &pboardobjgrp->pmu.getstatus); 167 if (status == 0) { 168 status = stat; 169 } 170 171 nvgpu_list_del(&pboardobjgrp->node); 172 173 pboardobjgrp->bconstructed = false; 174 175 return status; 176} 177 178int boardobjgrp_pmucmd_construct_impl(struct gk20a *g, struct boardobjgrp 179 *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, 180 u16 hdrsize, u16 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id) 181{ 182 nvgpu_log_info(g, " "); 183 184 /* Copy the parameters into the CMD*/ 185 cmd->id = id; 186 cmd->msgid = msgid; 187 cmd->hdrsize = (u8) hdrsize; 188 cmd->entrysize = (u8) entrysize; 189 cmd->fbsize = fbsize; 190 191 return 0; 192} 193 194int boardobjgrp_pmucmd_construct_impl_v1(struct gk20a *g, struct boardobjgrp 195 *pboardobjgrp, struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, 196 u16 hdrsize, u16 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id) 197{ 198 nvgpu_log_fn(g, " "); 199 200 /* Copy the parameters into the CMD*/ 201 cmd->dmem_buffer_size = ((hdrsize > entrysize) ? hdrsize : entrysize); 202 cmd->super_surface_offset = ss_offset; 203 pboardobjgrp->pmu.rpc_func_id = rpc_func_id; 204 cmd->fbsize = fbsize; 205 206 nvgpu_log_fn(g, "DONE"); 207 return 0; 208} 209 210int boardobjgrp_pmucmd_destroy_impl(struct gk20a *g, 211 struct boardobjgrp_pmu_cmd *cmd) 212{ 213 struct nvgpu_mem *mem = &cmd->surf.sysmem_desc; 214 215 nvgpu_pmu_surface_free(g, mem); 216 return 0; 217} 218 219int is_boardobjgrp_pmucmd_id_valid_v0(struct gk20a *g, 220 struct boardobjgrp *pboardobjgrp, 221 struct boardobjgrp_pmu_cmd *pcmd) 222{ 223 int err = 0; 224 225 if (pcmd->id == BOARDOBJGRP_GRP_CMD_ID_INVALID) { 226 err = -EINVAL; 227 } 228 229 return err; 230} 231 232int is_boardobjgrp_pmucmd_id_valid_v1(struct gk20a *g, 233 struct boardobjgrp *pboardobjgrp, 234 struct boardobjgrp_pmu_cmd *cmd) 235{ 236 int err = 0; 237 238 if (pboardobjgrp->pmu.rpc_func_id == 239 BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID) { 240 err = -EINVAL; 241 } 242 243 return err; 244} 245 246int boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g, 247 struct boardobjgrp *pboardobjgrp, 248 struct boardobjgrp_pmu_cmd *pcmd) 249{ 250 int status = 0; 251 struct nvgpu_mem *sysmem_desc = &pcmd->surf.sysmem_desc; 252 253 nvgpu_log_info(g, " "); 254 255 if (g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g, 256 pboardobjgrp, pcmd)) { 257 goto boardobjgrp_pmucmd_pmuinithandle_exit; 258 } 259 260 if (!pcmd->fbsize) { 261 goto boardobjgrp_pmucmd_pmuinithandle_exit; 262 } 263 264 nvgpu_pmu_sysmem_surface_alloc(g, sysmem_desc, pcmd->fbsize); 265 /* we only have got sysmem later this will get copied to vidmem 266 surface*/ 267 pcmd->surf.vidmem_desc.size = 0; 268 269 pcmd->buf = (struct nv_pmu_boardobjgrp_super *)sysmem_desc->cpu_va; 270 271boardobjgrp_pmucmd_pmuinithandle_exit: 272 return status; 273} 274 275int boardobjgrp_pmuinithandle_impl(struct gk20a *g, 276 struct boardobjgrp *pboardobjgrp) 277{ 278 int status = 0; 279 280 nvgpu_log_info(g, " "); 281 282 status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp, 283 &pboardobjgrp->pmu.set); 284 if (status) { 285 nvgpu_err(g, "failed to init pmu set cmd"); 286 goto boardobjgrp_pmuinithandle_exit; 287 } 288 289 status = boardobjgrp_pmucmd_pmuinithandle_impl(g, pboardobjgrp, 290 &pboardobjgrp->pmu.getstatus); 291 if (status) { 292 nvgpu_err(g, "failed to init get status command"); 293 goto boardobjgrp_pmuinithandle_exit; 294 } 295 296 /* If the GRP_SET CMD has not been allocated, nothing left to do. */ 297 if ((g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g, 298 pboardobjgrp, &pboardobjgrp->pmu.set))|| 299 (BOARDOBJGRP_IS_EMPTY(pboardobjgrp))) { 300 goto boardobjgrp_pmuinithandle_exit; 301 } 302 303 /* Send the BOARDOBJGRP to the pmu via RM_PMU_BOARDOBJ_CMD_GRP. */ 304 status = pboardobjgrp->pmuset(g, pboardobjgrp); 305 if (status) { 306 nvgpu_err(g, "failed to send boardobg grp to PMU"); 307 } 308 309boardobjgrp_pmuinithandle_exit: 310 return status; 311} 312 313 314int boardobjgrp_pmuhdrdatainit_super(struct gk20a *g, struct boardobjgrp 315 *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, 316 struct boardobjgrpmask *mask) 317{ 318 nvgpu_log_info(g, " "); 319 320 if (pboardobjgrp == NULL) { 321 return -EINVAL; 322 } 323 if (pboardobjgrppmu == NULL) { 324 return -EINVAL; 325 } 326 pboardobjgrppmu->type = pboardobjgrp->type; 327 pboardobjgrppmu->class_id = pboardobjgrp->classid; 328 pboardobjgrppmu->obj_slots = BOARDOBJGRP_PMU_SLOTS_GET(pboardobjgrp); 329 pboardobjgrppmu->flags = 0; 330 331 nvgpu_log_info(g, " Done"); 332 return 0; 333} 334 335static int boardobjgrp_pmudatainstget_stub(struct gk20a *g, 336 struct nv_pmu_boardobjgrp *boardobjgrppmu, 337 struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx) 338{ 339 nvgpu_log_info(g, " "); 340 return -EINVAL; 341} 342 343 344static int boardobjgrp_pmustatusinstget_stub(struct gk20a *g, 345 void *pboardobjgrppmu, 346 struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx) 347{ 348 nvgpu_log_info(g, " "); 349 return -EINVAL; 350} 351 352int boardobjgrp_pmudatainit_legacy(struct gk20a *g, 353 struct boardobjgrp *pboardobjgrp, 354 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 355{ 356 int status = 0; 357 struct boardobj *pboardobj = NULL; 358 struct nv_pmu_boardobj *ppmudata = NULL; 359 u8 index; 360 361 nvgpu_log_info(g, " "); 362 363 if (pboardobjgrp == NULL) { 364 return -EINVAL; 365 } 366 if (pboardobjgrppmu == NULL) { 367 return -EINVAL; 368 } 369 370 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)pboardobjgrppmu, 371 pboardobjgrp->objmask); 372 373 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, index, pboardobjgrp->objmask) { 374 /* Obtain pointer to the current instance of the Object from the Group */ 375 pboardobj = pboardobjgrp->objgetbyidx(pboardobjgrp, index); 376 if (NULL == pboardobj) { 377 nvgpu_err(g, "could not get object instance"); 378 status = -EINVAL; 379 goto boardobjgrppmudatainit_legacy_done; 380 } 381 382 status = pboardobjgrp->pmudatainstget(g, 383 (struct nv_pmu_boardobjgrp *)pboardobjgrppmu, 384 &ppmudata, index); 385 if (status) { 386 nvgpu_err(g, "could not get object instance"); 387 goto boardobjgrppmudatainit_legacy_done; 388 } 389 390 /* Initialize the PMU Data */ 391 status = pboardobj->pmudatainit(g, pboardobj, ppmudata); 392 if (status) { 393 nvgpu_err(g, 394 "could not parse pmu for device %d", index); 395 goto boardobjgrppmudatainit_legacy_done; 396 } 397 } 398 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END 399 400boardobjgrppmudatainit_legacy_done: 401 nvgpu_log_info(g, " Done"); 402 return status; 403} 404 405int boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp 406 *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 407{ 408 int status = 0; 409 struct boardobj *pboardobj = NULL; 410 struct nv_pmu_boardobj *ppmudata = NULL; 411 u8 index; 412 413 nvgpu_log_info(g, " "); 414 415 if (pboardobjgrp == NULL) { 416 return -EINVAL; 417 } 418 if (pboardobjgrppmu == NULL) { 419 return -EINVAL; 420 } 421 422 /* Initialize the PMU HDR data.*/ 423 status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp, pboardobjgrppmu, 424 pboardobjgrp->mask); 425 if (status) { 426 nvgpu_err(g, "unable to init boardobjgrp pmuhdr data"); 427 goto boardobjgrppmudatainit_super_done; 428 } 429 430 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct boardobj*, pboardobj, index) { 431 status = pboardobjgrp->pmudatainstget(g, 432 (struct nv_pmu_boardobjgrp *)pboardobjgrppmu, 433 &ppmudata, index); 434 if (status) { 435 nvgpu_err(g, "could not get object instance"); 436 goto boardobjgrppmudatainit_super_done; 437 } 438 439 /* Initialize the PMU Data and send to PMU */ 440 status = pboardobj->pmudatainit(g, pboardobj, ppmudata); 441 if (status) { 442 nvgpu_err(g, 443 "could not parse pmu for device %d", index); 444 goto boardobjgrppmudatainit_super_done; 445 } 446 } 447 448boardobjgrppmudatainit_super_done: 449 nvgpu_log_info(g, " Done"); 450 return status; 451} 452 453static int check_boardobjgrp_param(struct gk20a *g, 454 struct boardobjgrp *pboardobjgrp) 455{ 456 if (pboardobjgrp == NULL) { 457 return -EINVAL; 458 } 459 460 if (!pboardobjgrp->bconstructed) { 461 return -EINVAL; 462 } 463 464 if (pboardobjgrp->pmu.unitid == BOARDOBJGRP_UNIT_ID_INVALID) { 465 return -EINVAL; 466 } 467 468 if (pboardobjgrp->pmu.classid == BOARDOBJGRP_GRP_CLASS_ID_INVALID) { 469 return -EINVAL; 470 } 471 472 /* If no objects in the group, return early */ 473 if (BOARDOBJGRP_IS_EMPTY(pboardobjgrp)) { 474 return -EINVAL; 475 } 476 477 return 0; 478} 479 480int boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp) 481{ 482 int status = 0; 483 struct boardobjgrp_pmu_cmd *pcmd = 484 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); 485 486 nvgpu_log_info(g, " "); 487 488 if (check_boardobjgrp_param(g, pboardobjgrp)) { 489 return -EINVAL; 490 } 491 492 if (pboardobjgrp->pmu.set.id == BOARDOBJGRP_GRP_CMD_ID_INVALID) { 493 return -EINVAL; 494 } 495 496 if ((pcmd->hdrsize == 0) || 497 (pcmd->entrysize == 0) || 498 (pcmd->buf == NULL)) { 499 return -EINVAL; 500 } 501 502 /* Initialize PMU buffer with BOARDOBJGRP data. */ 503 memset(pcmd->buf, 0x0, pcmd->fbsize); 504 status = pboardobjgrp->pmudatainit(g, pboardobjgrp, 505 pcmd->buf); 506 if (status) { 507 nvgpu_err(g, "could not parse pmu data"); 508 goto boardobjgrp_pmuset_exit; 509 } 510 511 /* 512 * Reset the boolean that indicates set status for most recent 513 * instance of BOARDOBJGRP. 514 */ 515 pboardobjgrp->pmu.bset = false; 516 517 /* 518 * alloc mem in vidmem & copy constructed pmu boardobjgrp data from 519 * sysmem to vidmem 520 */ 521 if (pcmd->surf.vidmem_desc.size == 0) { 522 nvgpu_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc, 523 pcmd->fbsize); 524 } 525 nvgpu_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize); 526 527 /* Send the SET PMU CMD to the PMU */ 528 status = boardobjgrp_pmucmdsend(g, pboardobjgrp, 529 pcmd); 530 if (status) { 531 nvgpu_err(g, "could not send SET CMD to PMU"); 532 goto boardobjgrp_pmuset_exit; 533 } 534 535 pboardobjgrp->pmu.bset = true; 536 537boardobjgrp_pmuset_exit: 538 return status; 539} 540 541int boardobjgrp_pmuset_impl_v1(struct gk20a *g, 542 struct boardobjgrp *pboardobjgrp) 543{ 544 struct nvgpu_pmu *pmu = &g->pmu; 545 int status = 0; 546 struct boardobjgrp_pmu_cmd *pcmd = 547 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); 548 549 nvgpu_log_info(g, " "); 550 551 if (check_boardobjgrp_param(g, pboardobjgrp)) { 552 return -EINVAL; 553 } 554 555 if ((pcmd->buf == NULL) && 556 (pboardobjgrp->pmu.rpc_func_id == 557 BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID)) { 558 return -EINVAL; 559 } 560 561 /* Initialize PMU buffer with BOARDOBJGRP data. */ 562 memset(pcmd->buf, 0x0, pcmd->fbsize); 563 status = pboardobjgrp->pmudatainit(g, pboardobjgrp, 564 pcmd->buf); 565 if (status) { 566 nvgpu_err(g, "could not parse pmu data"); 567 goto boardobjgrp_pmuset_exit; 568 } 569 570 /* 571 * Reset the boolean that indicates set status 572 * for most recent instance of BOARDOBJGRP. 573 */ 574 pboardobjgrp->pmu.bset = false; 575 576 /* 577 * copy constructed pmu boardobjgrp data from 578 * sysmem to pmu super surface present in FB 579 */ 580 nvgpu_mem_wr_n(g, &pmu->super_surface_buf, 581 pcmd->super_surface_offset, pcmd->buf, 582 pcmd->fbsize); 583 584 /* Send the SET PMU CMD to the PMU using RPC*/ 585 status = boardobjgrp_pmucmdsend_rpc(g, pboardobjgrp, 586 pcmd, false); 587 if (status) { 588 nvgpu_err(g, "could not send SET CMD to PMU"); 589 goto boardobjgrp_pmuset_exit; 590 } 591 592 pboardobjgrp->pmu.bset = true; 593 594boardobjgrp_pmuset_exit: 595 return status; 596} 597 598int 599boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp, 600 struct boardobjgrpmask *mask) 601{ 602 int status = 0; 603 struct boardobjgrp_pmu_cmd *pcmd = 604 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus); 605 struct boardobjgrp_pmu_cmd *pset = 606 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); 607 608 nvgpu_log_info(g, " "); 609 610 if (check_boardobjgrp_param(g, pboardobjgrp)) { 611 return -EINVAL; 612 } 613 614 if (pset->id == BOARDOBJGRP_GRP_CMD_ID_INVALID) { 615 return -EINVAL; 616 } 617 618 if ((pcmd->hdrsize == 0) || 619 (pcmd->entrysize == 0) || 620 (pcmd->buf == NULL)) { 621 return -EINVAL; 622 } 623 624 /* 625 * Can only GET_STATUS if the BOARDOBJGRP has been previously SET to the 626 * PMU 627 */ 628 if (!pboardobjgrp->pmu.bset) { 629 return -EINVAL; 630 } 631 632 /* 633 * alloc mem in vidmem & copy constructed pmu boardobjgrp data from 634 * sysmem to vidmem 635 */ 636 if (pcmd->surf.vidmem_desc.size == 0) { 637 nvgpu_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc, 638 pcmd->fbsize); 639 } 640 641 /* 642 * Initialize PMU buffer with the mask of BOARDOBJGRPs for which to 643 * retrieve status 644 */ 645 646 memset(pcmd->buf, 0x0, pcmd->fbsize); 647 status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp, 648 pcmd->buf, mask); 649 if (status) { 650 nvgpu_err(g, "could not init PMU HDR data"); 651 goto boardobjgrp_pmugetstatus_exit; 652 } 653 654 nvgpu_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pset->buf, pset->hdrsize); 655 /* Send the GET_STATUS PMU CMD to the PMU */ 656 status = boardobjgrp_pmucmdsend(g, pboardobjgrp, 657 &pboardobjgrp->pmu.getstatus); 658 if (status) { 659 nvgpu_err(g, "could not send GET_STATUS cmd to PMU"); 660 goto boardobjgrp_pmugetstatus_exit; 661 } 662 663 /*copy the data back to sysmem buffer that belongs to command*/ 664 nvgpu_mem_rd_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize); 665 666boardobjgrp_pmugetstatus_exit: 667 return status; 668} 669 670int 671boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjgrp, 672 struct boardobjgrpmask *mask) 673{ 674 struct nvgpu_pmu *pmu = &g->pmu; 675 int status = 0; 676 struct boardobjgrp_pmu_cmd *pcmd = 677 (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus); 678 679 nvgpu_log_info(g, " "); 680 681 if (check_boardobjgrp_param(g, pboardobjgrp)) { 682 return -EINVAL; 683 } 684 685 if ((pcmd->buf == NULL) && 686 (pboardobjgrp->pmu.rpc_func_id == 687 BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID)) { 688 return -EINVAL; 689 } 690 691 /* 692 * Can only GET_STATUS if the BOARDOBJGRP has been 693 * previously SET to the PMU 694 */ 695 if (!pboardobjgrp->pmu.bset) { 696 return -EINVAL; 697 } 698 699 /* 700 * Initialize PMU buffer with the mask of 701 * BOARDOBJGRPs for which to retrieve status 702 */ 703 memset(pcmd->buf, 0x0, pcmd->fbsize); 704 status = pboardobjgrp->pmuhdrdatainit(g, pboardobjgrp, 705 pcmd->buf, mask); 706 if (status) { 707 nvgpu_err(g, "could not init PMU HDR data"); 708 goto boardobjgrp_pmugetstatus_exit; 709 } 710 711 /* 712 * copy constructed pmu boardobjgrp data from 713 * sysmem to pmu super surface present in FB 714 */ 715 nvgpu_mem_wr_n(g, &pmu->super_surface_buf, pcmd->super_surface_offset, 716 pcmd->buf, pcmd->fbsize); 717 /* Send the GET_STATUS PMU CMD to the PMU */ 718 status = boardobjgrp_pmucmdsend_rpc(g, pboardobjgrp, 719 pcmd, true); 720 if (status) { 721 nvgpu_err(g, "could not send GET_STATUS cmd to PMU"); 722 goto boardobjgrp_pmugetstatus_exit; 723 } 724 725 /*copy the data back to sysmem buffer that belongs to command*/ 726 nvgpu_mem_rd_n(g, &pmu->super_surface_buf,pcmd->super_surface_offset, 727 pcmd->buf, pcmd->fbsize); 728 729boardobjgrp_pmugetstatus_exit: 730 return status; 731} 732 733static int 734boardobjgrp_objinsert_final(struct boardobjgrp *pboardobjgrp, 735 struct boardobj *pboardobj, u8 index) 736{ 737 struct gk20a *g = pboardobjgrp->g; 738 739 nvgpu_log_info(g, " "); 740 741 if (pboardobjgrp == NULL) { 742 return -EINVAL; 743 } 744 745 if (pboardobj == NULL) { 746 return -EINVAL; 747 } 748 749 if (index > pboardobjgrp->objslots) { 750 return -EINVAL; 751 } 752 753 if (pboardobjgrp->ppobjects[index] != NULL) { 754 return -EINVAL; 755 } 756 757 /* 758 * Check that this BOARDOBJ has not already been added to a 759 * BOARDOBJGRP 760 */ 761 if (pboardobj->idx != CTRL_BOARDOBJ_IDX_INVALID) { 762 return -EINVAL; 763 } 764 765 pboardobjgrp->ppobjects[index] = pboardobj; 766 pboardobjgrp->objmaxidx = (u8)(BOARDOBJGRP_IS_EMPTY(pboardobjgrp) ? 767 index : max(pboardobjgrp->objmaxidx, index)); 768 pboardobj->idx = index; 769 770 pboardobjgrp->objmask |= BIT(index); 771 772 nvgpu_log_info(g, " Done"); 773 774 return boardobjgrpmask_bitset(pboardobjgrp->mask, index); 775} 776 777static struct boardobj *boardobjgrp_objgetbyidx_final( 778 struct boardobjgrp *pboardobjgrp, u8 index) 779{ 780 if (!boardobjgrp_idxisvalid(pboardobjgrp, index)) { 781 return NULL; 782 } 783 return pboardobjgrp->ppobjects[index]; 784} 785 786static struct boardobj *boardobjgrp_objgetnext_final( 787 struct boardobjgrp *pboardobjgrp, u8 *currentindex, 788 struct boardobjgrpmask *mask) 789{ 790 struct boardobj *pboardobjnext = NULL; 791 u8 objmaxidx; 792 u8 index; 793 794 if (currentindex == NULL) { 795 return NULL; 796 } 797 798 if (pboardobjgrp == NULL) { 799 return NULL; 800 } 801 802 /* Search from next element unless first object was requested */ 803 index = (*currentindex != CTRL_BOARDOBJ_IDX_INVALID) ? 804 (*currentindex + 1) : 0; 805 806 /* For the cases below in which we have to return NULL */ 807 *currentindex = CTRL_BOARDOBJ_IDX_INVALID; 808 809 810 /* Validate provided mask */ 811 if (mask != NULL) { 812 if (!(boardobjgrpmask_sizeeq(pboardobjgrp->mask, mask))) { 813 return NULL; 814 } 815 } 816 817 objmaxidx = pboardobjgrp->objmaxidx; 818 819 if (objmaxidx != CTRL_BOARDOBJ_IDX_INVALID) { 820 for (; index <= objmaxidx; index++) { 821 pboardobjnext = pboardobjgrp->ppobjects[index]; 822 if (pboardobjnext != NULL) { 823 /* Filter results using client provided mask.*/ 824 if (mask != NULL) { 825 if (!boardobjgrpmask_bitget(mask, 826 index)) { 827 pboardobjnext = NULL; 828 continue; 829 } 830 } 831 *currentindex = index; 832 break; 833 } 834 } 835 } 836 837 return pboardobjnext; 838} 839 840static int boardobjgrp_objremoveanddestroy_final( 841 struct boardobjgrp *pboardobjgrp, 842 u8 index) 843{ 844 int status = 0; 845 int stat; 846 struct gk20a *g = pboardobjgrp->g; 847 848 nvgpu_log_info(g, " "); 849 850 if (!boardobjgrp_idxisvalid(pboardobjgrp, index)) { 851 return -EINVAL; 852 } 853 854 if (pboardobjgrp->objmaxidx == CTRL_BOARDOBJ_IDX_INVALID) { 855 return -EINVAL; 856 } 857 858 status = pboardobjgrp->ppobjects[index]->destruct( 859 pboardobjgrp->ppobjects[index]); 860 861 pboardobjgrp->ppobjects[index] = NULL; 862 863 pboardobjgrp->objmask &= ~BIT(index); 864 865 stat = boardobjgrpmask_bitclr(pboardobjgrp->mask, index); 866 if (stat) { 867 if (status == 0) { 868 status = stat; 869 } 870 } 871 872 /* objmaxidx requires update only if that very object was removed */ 873 if (pboardobjgrp->objmaxidx == index) { 874 pboardobjgrp->objmaxidx = 875 boardobjgrpmask_bitidxhighest(pboardobjgrp->mask); 876 } 877 878 return status; 879} 880 881void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask) 882{ 883 u32 slots = objmask; 884 885 HIGHESTBITIDX_32(slots); 886 slots++; 887 888 hdr->super.type = CTRL_BOARDOBJGRP_TYPE_E32; 889 hdr->super.class_id = 0; 890 hdr->super.obj_slots = (u8)slots; 891 hdr->obj_mask = objmask; 892} 893 894static void boardobjgrp_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, 895 void *param, u32 handle, u32 status) 896{ 897 struct nv_pmu_boardobj_msg_grp *pgrpmsg; 898 struct boardobjgrp_pmucmdhandler_params *phandlerparams = 899 (struct boardobjgrp_pmucmdhandler_params *)param; 900 struct boardobjgrp *pboardobjgrp = phandlerparams->pboardobjgrp; 901 struct boardobjgrp_pmu_cmd *pgrpcmd = phandlerparams->pcmd; 902 903 nvgpu_log_info(g, " "); 904 905 pgrpmsg = &msg->msg.boardobj.grp; 906 907 if (pgrpmsg->class_id != pboardobjgrp->pmu.classid) { 908 nvgpu_err(g, 909 "Unrecognized GRP type: unit %x class id=0x%02x cmd id %x", 910 msg->hdr.unit_id, pboardobjgrp->pmu.classid, 911 pgrpcmd->id); 912 return; 913 } 914 915 if (msg->msg.boardobj.msg_type != pgrpcmd->msgid) { 916 nvgpu_err(g, 917 "unsupported msg for unit %x class %x cmd id %x msg %x", 918 msg->hdr.unit_id, pboardobjgrp->pmu.classid, 919 pgrpcmd->id, msg->msg.boardobj.msg_type); 920 return; 921 } 922 923 if (msg->msg.boardobj.grp_set.flcn_status != 0) { 924 nvgpu_err(g, 925 "cmd abort for unit %x class %x cmd id %x status %x", 926 msg->hdr.unit_id, pboardobjgrp->pmu.classid, 927 pgrpcmd->id, 928 msg->msg.boardobj.grp_set.flcn_status); 929 return; 930 } 931 932 phandlerparams->success = pgrpmsg->b_success ? 1 : 0; 933 934 if (!pgrpmsg->b_success) { 935 nvgpu_err(g, 936 "failed GRPCMD: msgtype=0x%x, classid=0x%x, cmd id %x", 937 pgrpmsg->msg_type, pgrpmsg->class_id, 938 pgrpcmd->id); 939 return; 940 } 941} 942 943static int boardobjgrp_pmucmdsend(struct gk20a *g, 944 struct boardobjgrp *pboardobjgrp, 945 struct boardobjgrp_pmu_cmd *pcmd) 946{ 947 struct boardobjgrp_pmucmdhandler_params handlerparams; 948 struct pmu_payload payload; 949 struct nv_pmu_boardobj_cmd_grp *pgrpcmd; 950 struct pmu_cmd cmd; 951 u32 seqdesc; 952 int status = 0; 953 954 nvgpu_log_info(g, " "); 955 956 memset(&payload, 0, sizeof(payload)); 957 memset(&handlerparams, 0, sizeof(handlerparams)); 958 memset(&cmd, 0, sizeof(struct pmu_cmd)); 959 cmd.hdr.unit_id = pboardobjgrp->pmu.unitid; 960 cmd.hdr.size = sizeof(struct nv_pmu_boardobj_cmd_grp) + 961 sizeof(struct pmu_hdr); 962 963 pgrpcmd = &cmd.cmd.boardobj.grp; 964 pgrpcmd->cmd_type = pcmd->id; 965 pgrpcmd->class_id = pboardobjgrp->pmu.classid; 966 pgrpcmd->grp.hdr_size = pcmd->hdrsize; 967 pgrpcmd->grp.entry_size = pcmd->entrysize; 968 969 /* 970 * copy vidmem information to boardobj_cmd_grp 971 */ 972 nvgpu_pmu_surface_describe(g, &pcmd->surf.vidmem_desc, 973 &pgrpcmd->grp.fb); 974 975 /* 976 * PMU reads command from sysmem so assigned 977 * "payload.in.buf = pcmd->buf" 978 * but PMU access pmu boardobjgrp data from vidmem copied above 979 */ 980 payload.in.buf = pcmd->buf; 981 payload.in.size = max(pcmd->hdrsize, pcmd->entrysize); 982 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 983 payload.in.offset = offsetof(struct nv_pmu_boardobj_cmd_grp, grp); 984 985 /* Setup the handler params to communicate back results.*/ 986 handlerparams.pboardobjgrp = pboardobjgrp; 987 handlerparams.pcmd = pcmd; 988 handlerparams.success = 0; 989 990 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 991 PMU_COMMAND_QUEUE_LPQ, 992 boardobjgrp_pmucmdhandler, 993 (void *)&handlerparams, 994 &seqdesc, ~0); 995 if (status) { 996 nvgpu_err(g, 997 "unable to post boardobj grp cmd for unit %x cmd id %x", 998 cmd.hdr.unit_id, pcmd->id); 999 goto boardobjgrp_pmucmdsend_exit; 1000 } 1001 pmu_wait_message_cond(&g->pmu, 1002 gk20a_get_gr_idle_timeout(g), 1003 &handlerparams.success, 1); 1004 if (handlerparams.success == 0) { 1005 nvgpu_err(g, "could not process cmd"); 1006 status = -ETIMEDOUT; 1007 goto boardobjgrp_pmucmdsend_exit; 1008 } 1009 1010boardobjgrp_pmucmdsend_exit: 1011 return status; 1012} 1013 1014static int boardobjgrp_pmucmdsend_rpc(struct gk20a *g, 1015 struct boardobjgrp *pboardobjgrp, 1016 struct boardobjgrp_pmu_cmd *pcmd, 1017 bool copy_out) 1018{ 1019 struct nvgpu_pmu *pmu = &g->pmu; 1020 struct nv_pmu_rpc_struct_board_obj_grp_cmd rpc; 1021 int status = 0; 1022 1023 nvgpu_log_fn(g, " "); 1024 1025 memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_board_obj_grp_cmd)); 1026 1027 rpc.class_id = pboardobjgrp->pmu.classid; 1028 rpc.command_id = copy_out ? 1029 NV_PMU_BOARDOBJGRP_CMD_GET_STATUS : 1030 NV_PMU_BOARDOBJGRP_CMD_SET; 1031 1032 rpc.hdr.unit_id = pboardobjgrp->pmu.unitid; 1033 rpc.hdr.function = pboardobjgrp->pmu.rpc_func_id; 1034 rpc.hdr.flags = 0x0; 1035 1036 status = nvgpu_pmu_rpc_execute(pmu, &(rpc.hdr), 1037 (sizeof(rpc) - sizeof(rpc.scratch)), 1038 pcmd->dmem_buffer_size, 1039 NULL, NULL, copy_out); 1040 1041 if (status) { 1042 nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); 1043 } 1044 1045 return status; 1046}
diff --git a/include/boardobj/boardobjgrp.h b/include/boardobj/boardobjgrp.h
deleted file mode 100644
index cd13b85..0000000
--- a/include/boardobj/boardobjgrp.h
+++ /dev/null
@@ -1,441 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_BOARDOBJGRP_H 24#define NVGPU_BOARDOBJGRP_H 25 26struct boardobjgrp; 27struct gk20a; 28struct nvgpu_list_node; 29struct pmu_surface; 30 31 32/* ------------------------ Includes ----------------------------------------*/ 33#include "ctrl/ctrlboardobj.h" 34#include "boardobj.h" 35#include "boardobjgrpmask.h" 36#include <nvgpu/list.h> 37#include <nvgpu/pmu.h> 38 39/* 40* Board Object Group destructor. 41* 42*/ 43typedef int boardobjgrp_destruct(struct boardobjgrp *pboardobjgrp); 44 45/* 46* Inserts a previously constructed Board Object into a Board Object Group for 47* tracking. Objects are inserted in the array based on the given index. 48*/ 49typedef int boardobjgrp_objinsert(struct boardobjgrp *pboardobjgrp, 50 struct boardobj *pboardobj, u8 index); 51 52/* 53* Retrieves a Board Object from a Board Object Group using the group's index. 54* 55*/ 56typedef struct boardobj *boardobjgrp_objgetbyidx( 57 struct boardobjgrp *pBobrdobjgrp, u8 index); 58 59/* 60* Retrieve Board Object immediately following one pointed by @ref pcurrentindex 61* filtered out by the provided mask. If (pMask == NULL) => no filtering. 62*/ 63typedef struct boardobj *boardobjgrp_objgetnext( 64 struct boardobjgrp *pboardobjgrp, 65 u8 *currentindex, struct boardobjgrpmask *mask); 66 67/* 68* Board Object Group Remover and destructor. This is used to remove and 69* destruct specific entry from the Board Object Group. 70*/ 71typedef int boardobjgrp_objremoveanddestroy(struct boardobjgrp *pboardobjgrp, 72 u8 index); 73 74/* 75* BOARDOBJGRP handler for PMU_UNIT_INIT. Calls the PMU_UNIT_INIT handlers 76* for the constructed PMU CMDs, and then sets the object via the 77* PMU_BOARDOBJ_CMD_GRP interface (if constructed). 78*/ 79typedef int boardobjgrp_pmuinithandle(struct gk20a *g, 80 struct boardobjgrp *pboardobjGrp); 81 82/* 83* Fills out the appropriate the PMU_BOARDOBJGRP_<xyz> driver<->PMU description 84* header structure, more specifically a mask of BOARDOBJs. 85*/ 86typedef int boardobjgrp_pmuhdrdatainit(struct gk20a *g, 87 struct boardobjgrp *pboardobjgrp, 88 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, 89 struct boardobjgrpmask *mask); 90 91/* 92* Fills out the appropriate the PMU_BOARDOBJGRP_<xyz> driver->PMU description 93* structure, describing the BOARDOBJGRP and all of its BOARDOBJs to the PMU. 94*/ 95typedef int boardobjgrp_pmudatainit(struct gk20a *g, 96 struct boardobjgrp *pboardobjgrp, 97 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu); 98 99/* 100* Sends a BOARDOBJGRP to the PMU via the PMU_BOARDOBJ_CMD_GRP interface. 101* This interface leverages @ref boardobjgrp_pmudatainit to populate the 102* structure. 103*/ 104typedef int boardobjgrp_pmuset(struct gk20a *g, 105 struct boardobjgrp *pboardobjgrp); 106 107/* 108* Gets the dynamic status of the PMU BOARDOBJGRP via the 109* PMU_BOARDOBJ_CMD_GRP GET_STATUS interface. 110*/ 111typedef int boardobjgrp_pmugetstatus(struct gk20a *g, 112 struct boardobjgrp *pboardobjgrp, 113 struct boardobjgrpmask *mask); 114 115typedef int boardobjgrp_pmudatainstget(struct gk20a *g, 116 struct nv_pmu_boardobjgrp *boardobjgrppmu, 117 struct nv_pmu_boardobj **ppboardobjpmudata, u8 idx); 118 119typedef int boardobjgrp_pmustatusinstget(struct gk20a *g, void *pboardobjgrppmu, 120 struct nv_pmu_boardobj_query **ppBoardobjpmustatus, u8 idx); 121 122/* 123* Structure describing an PMU CMD for interacting with the representaition 124* of this BOARDOBJGRP within the PMU. 125*/ 126struct boardobjgrp_pmu_cmd { 127 u8 id; 128 u8 msgid; 129 u8 hdrsize; 130 u8 entrysize; 131 u16 dmem_buffer_size; 132 u32 super_surface_offset; 133 u32 fbsize; 134 struct nv_pmu_boardobjgrp_super *buf; 135 struct pmu_surface surf; 136}; 137 138/* 139* Structure of state describing how to communicate with representation of this 140* BOARDOBJGRP in the PMU. 141*/ 142struct boardobjgrp_pmu { 143 u8 unitid; 144 u8 classid; 145 bool bset; 146 u8 rpc_func_id; 147 struct boardobjgrp_pmu_cmd set; 148 struct boardobjgrp_pmu_cmd getstatus; 149}; 150 151/* 152* Function by which a class which implements BOARDOBJGRP can construct a PMU 153* CMD. This provides the various information describing the PMU CMD including 154* the CMD and MSG ID and the size of the various sturctures in the payload. 155*/ 156typedef int boardobjgrp_pmucmd_construct(struct gk20a *g, 157 struct boardobjgrp *pboardobjgrp, 158 struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, 159 u16 hdrsize, u16 entrysize, u16 fbsize, u32 ss_offset, u8 rpc_func_id); 160 161/* 162* Destroys BOARDOBJGRP PMU SW state. CMD. 163*/ 164typedef int boardobjgrp_pmucmd_destroy(struct gk20a *g, 165 struct boardobjgrp_pmu_cmd *cmd); 166 167/* 168* init handler for the BOARDOBJGRP PMU CMD. Allocates and maps the 169* PMU CMD payload within both the PMU and driver so that it can be referenced 170* at run-time. 171*/ 172typedef int boardobjgrp_pmucmd_pmuinithandle(struct gk20a *g, 173 struct boardobjgrp *pboardobjgrp, 174 struct boardobjgrp_pmu_cmd *cmd); 175 176/* 177* Base Class Group for all physical or logical device on the PCB. 178* Contains fields common to all devices on the board. Specific types of 179* devices groups may extend this object adding any details specific to that 180* device group or device-type. 181*/ 182struct boardobjgrp { 183 struct gk20a *g; 184 u32 objmask; 185 bool bconstructed; 186 u8 type; 187 u8 classid; 188 struct boardobj **ppobjects; 189 struct boardobjgrpmask *mask; 190 u8 objslots; 191 u8 objmaxidx; 192 struct boardobjgrp_pmu pmu; 193 194 /* Basic interfaces */ 195 boardobjgrp_destruct *destruct; 196 boardobjgrp_objinsert *objinsert; 197 boardobjgrp_objgetbyidx *objgetbyidx; 198 boardobjgrp_objgetnext *objgetnext; 199 boardobjgrp_objremoveanddestroy *objremoveanddestroy; 200 201 /* PMU interfaces */ 202 boardobjgrp_pmuinithandle *pmuinithandle; 203 boardobjgrp_pmuhdrdatainit *pmuhdrdatainit; 204 boardobjgrp_pmudatainit *pmudatainit; 205 boardobjgrp_pmuset *pmuset; 206 boardobjgrp_pmugetstatus *pmugetstatus; 207 208 boardobjgrp_pmudatainstget *pmudatainstget; 209 boardobjgrp_pmustatusinstget *pmustatusinstget; 210 struct nvgpu_list_node node; 211}; 212 213/* 214* Macro test whether a specified index into the BOARDOBJGRP is valid. 215* 216*/ 217#define boardobjgrp_idxisvalid(_pboardobjgrp, _idx) \ 218 (((_idx) < (_pboardobjgrp)->objslots) && \ 219 ((_pboardobjgrp)->ppobjects[(_idx)] != NULL)) 220 221/* 222* Macro test whether a specified BOARDOBJGRP is empty. 223*/ 224#define BOARDOBJGRP_IS_EMPTY(_pboardobjgrp) \ 225 ((!((_pboardobjgrp)->bconstructed)) || \ 226 ((_pboardobjgrp)->objmaxidx == CTRL_BOARDOBJ_IDX_INVALID)) 227 228#define boardobjgrp_objinsert(_pboardobjgrp, _pboardobj, _idx) \ 229 ((_pboardobjgrp)->objinsert((_pboardobjgrp), (_pboardobj), (_idx))) 230 231/* 232* Helper macro to determine the "next" open/empty index after all allocated 233* objects. This is intended to be used to find the index at which objects can 234* be inserted contiguously (i.e. w/o fear of colliding with existing objects). 235*/ 236#define BOARDOBJGRP_NEXT_EMPTY_IDX(_pboardobjgrp) \ 237 ((CTRL_BOARDOBJ_IDX_INVALID == (_pboardobjgrp)->objmaxidx) ? 0U : \ 238 ((((_pboardobjgrp)->objmaxidx + 1U) >= (_pboardobjgrp)->objslots) ? \ 239 (u8)CTRL_BOARDOBJ_IDX_INVALID : (u8)((_pboardobjgrp)->objmaxidx + 1U))) 240 241/* 242* Helper macro to determine the number of @ref BOARDOBJ pointers 243* that are required to be allocated in PMU @ref ppObjects. 244*/ 245#define BOARDOBJGRP_PMU_SLOTS_GET(_pboardobjgrp) \ 246 ((CTRL_BOARDOBJ_IDX_INVALID == (_pboardobjgrp)->objmaxidx) ? 0U : \ 247 (u8)((_pboardobjgrp)->objmaxidx + 1U)) 248 249#define BOARDOBJGRP_OBJ_GET_BY_IDX(_pboardobjgrp, _idx) \ 250 ((_pboardobjgrp)->objgetbyidx((_pboardobjgrp), (_idx))) 251 252/* 253* macro to look-up next object while tolerating error if 254* Board Object Group is not constructed. 255*/ 256 257#define boardobjgrpobjgetnextsafe(_pgrp, _pindex, _pmask) \ 258 ((_pgrp)->bconstructed ? \ 259 (_pgrp)->objgetnext((_pgrp), (_pindex), (_pmask)) : NULL) 260 261/* 262* Used to traverse all Board Objects stored within @ref _pgrp in the increasing 263* index order. 264* If @ref _pmask is provided only objects specified by the mask are traversed. 265*/ 266#define BOARDOBJGRP_ITERATOR(_pgrp, _ptype, _pobj, _index, _pmask) \ 267 for (_index = CTRL_BOARDOBJ_IDX_INVALID, \ 268 _pobj = (_ptype)boardobjgrpobjgetnextsafe((_pgrp), &_index, (_pmask));\ 269 _pobj != NULL; \ 270 _pobj = (_ptype)boardobjgrpobjgetnextsafe((_pgrp), &_index, (_pmask))) 271#define BOARDOBJGRP_FOR_EACH(_pgrp, _ptype, _pobj, _index) \ 272 BOARDOBJGRP_ITERATOR(_pgrp, _ptype, _pobj, _index, NULL) 273 274#define BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(mask_width, index, mask) \ 275{ \ 276 u##mask_width lcl_msk = (u##mask_width)(mask); \ 277 for (index = 0; lcl_msk != 0U; index++, lcl_msk >>= 1U) { \ 278 if (((u##mask_width)((u64)1) & lcl_msk) == 0U) { \ 279 continue; \ 280 } 281 282#define BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END \ 283 } \ 284} 285 286 287/*! 288* Invalid UNIT_ID. Used to indicate that the implementing class has not set 289* @ref BOARDOBJGRP::unitId and, thus, certain BOARDOBJGRP PMU interfaces are 290* not supported. 291*/ 292#define BOARDOBJGRP_UNIT_ID_INVALID 255U 293 294/*! 295* Invalid UNIT_ID. Used to indicate that the implementing class has not set 296* @ref BOARDOBJGRP::grpType and, thus, certain BOARDOBJGRP PMU interfaces are 297* not supported. 298*/ 299#define BOARDOBJGRP_GRP_CLASS_ID_INVALID 255U 300 301/*! 302* Invalid UNIT_ID. Used to indicate that the implementing class has not set 303* @ref BOARDOBJGRP::grpSetCmdId and, thus, certain BOARDOBJGRP PMU interfaces 304* are not supported. 305*/ 306#define BOARDOBJGRP_GRP_CMD_ID_INVALID 255U 307#define BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID 255U 308 309/*! 310* Helper macro to construct a BOARDOBJGRP's PMU SW state. 311* 312* @param[out] pboardobjgrp BOARDOBJGRP pointer 313* @param[in] _eng 314* Implementing engine/unit which manages the BOARDOBJGRP. 315* @param[in] _class 316* Class ID of BOARDOBJGRP. 317*/ 318#define BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, _ENG, _CLASS) \ 319do { \ 320 (pboardobjgrp)->pmu.unitid = PMU_UNIT_##_ENG; \ 321 (pboardobjgrp)->pmu.classid = \ 322 NV_PMU_##_ENG##_BOARDOBJGRP_CLASS_ID_##_CLASS; \ 323} while (0) 324 325#define BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, eng, ENG, \ 326 class, CLASS) \ 327 g->ops.pmu_ver.boardobj.boardobjgrp_pmucmd_construct_impl( \ 328 g, /* pgpu */ \ 329 pboardobjgrp, /* pboardobjgrp */ \ 330 &((pboardobjgrp)->pmu.set), /* pcmd */ \ 331 NV_PMU_##ENG##_CMD_ID_BOARDOBJ_GRP_SET, /* id */ \ 332 NV_PMU_##ENG##_MSG_ID_BOARDOBJ_GRP_SET, /* msgid */ \ 333 (u32)sizeof(union nv_pmu_##eng##_##class##_boardobjgrp_set_header_aligned), \ 334 (u32)sizeof(union nv_pmu_##eng##_##class##_boardobj_set_union_aligned), \ 335 (u32)sizeof(struct nv_pmu_##eng##_##class##_boardobj_grp_set), \ 336 (u32)offsetof(struct nv_pmu_super_surface, eng.class##_grp_set), \ 337 NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD) 338 339#define BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, pboardobjgrp, \ 340 eng, ENG, class, CLASS) \ 341 g->ops.pmu_ver.boardobj.boardobjgrp_pmucmd_construct_impl( \ 342 g, /* pGpu */ \ 343 pboardobjgrp, /* pBoardObjGrp */ \ 344 &((pboardobjgrp)->pmu.getstatus), /* pCmd */ \ 345 NV_PMU_##ENG##_CMD_ID_BOARDOBJ_GRP_GET_STATUS, /* id */ \ 346 NV_PMU_##ENG##_MSG_ID_BOARDOBJ_GRP_GET_STATUS, /* msgid */ \ 347 (u32)sizeof(union nv_pmu_##eng##_##class##_boardobjgrp_get_status_header_aligned), \ 348 (u32)sizeof(union nv_pmu_##eng##_##class##_boardobj_get_status_union_aligned), \ 349 (u32)sizeof(struct nv_pmu_##eng##_##class##_boardobj_grp_get_status), \ 350 (u32)offsetof(struct nv_pmu_super_surface, eng.class##_grp_get_status), \ 351 NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD) 352 353/* ------------------------ Function Prototypes ----------------------------- */ 354/* Constructor and destructor */ 355int boardobjgrp_construct_super(struct gk20a *g, 356 struct boardobjgrp *pboardobjgrp); 357boardobjgrp_destruct boardobjgrp_destruct_impl; 358boardobjgrp_destruct boardobjgrp_destruct_super; 359 360/* PMU_CMD interfaces */ 361boardobjgrp_pmucmd_construct boardobjgrp_pmucmd_construct_impl; 362boardobjgrp_pmucmd_destroy boardobjgrp_pmucmd_destroy_impl; 363boardobjgrp_pmucmd_pmuinithandle boardobjgrp_pmucmd_pmuinithandle_impl; 364 365boardobjgrp_pmucmd_construct boardobjgrp_pmucmd_construct_impl_v1; 366 367/* BOARDOBJGRP interfaces */ 368boardobjgrp_pmuinithandle boardobjgrp_pmuinithandle_impl; 369boardobjgrp_pmuhdrdatainit boardobjgrp_pmuhdrdatainit_super; 370boardobjgrp_pmudatainit boardobjgrp_pmudatainit_super; 371 372boardobjgrp_pmudatainit boardobjgrp_pmudatainit_legacy; 373boardobjgrp_pmuset boardobjgrp_pmuset_impl; 374boardobjgrp_pmugetstatus boardobjgrp_pmugetstatus_impl; 375boardobjgrp_pmuset boardobjgrp_pmuset_impl_v1; 376boardobjgrp_pmugetstatus boardobjgrp_pmugetstatus_impl_v1; 377 378void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask); 379 380#define HIGHESTBITIDX_32(n32) \ 381{ \ 382 u32 count = 0U; \ 383 while (n32 >>= 1U) { \ 384 count++; \ 385 } \ 386 n32 = count; \ 387} 388 389#define LOWESTBIT(x) ((x) & (((x)-1U) ^ (x))) 390 391#define HIGHESTBIT(n32) \ 392{ \ 393 HIGHESTBITIDX_32(n32); \ 394 n32 = NVBIT(n32); \ 395} 396 397#define ONEBITSET(x) ((x) && (((x) & ((x)-1U)) == 0U)) 398 399#define LOWESTBITIDX_32(n32) \ 400{ \ 401 n32 = LOWESTBIT(n32); \ 402 IDX_32(n32); \ 403} 404 405#define NUMSETBITS_32(n32) \ 406{ \ 407 n32 = n32 - ((n32 >> 1U) & 0x55555555U); \ 408 n32 = (n32 & 0x33333333U) + ((n32 >> 2U) & 0x33333333U); \ 409 n32 = (((n32 + (n32 >> 4U)) & 0x0F0F0F0FU) * 0x01010101U) >> 24U; \ 410} 411 412#define IDX_32(n32) \ 413{ \ 414 u32 idx = 0U; \ 415 if ((n32) & 0xFFFF0000U) \ 416 idx += 16U; \ 417 if ((n32) & 0xFF00FF00U) \ 418 idx += 8U; \ 419 if ((n32) & 0xF0F0F0F0U) \ 420 idx += 4U; \ 421 if ((n32) & 0xCCCCCCCCU) \ 422 idx += 2U; \ 423 if ((n32) & 0xAAAAAAAAU) \ 424 idx += 1U; \ 425 (n32) = idx; \ 426} 427 428static inline struct boardobjgrp * 429boardobjgrp_from_node(struct nvgpu_list_node *node) 430{ 431 return (struct boardobjgrp *) 432 ((uintptr_t)node - offsetof(struct boardobjgrp, node)); 433}; 434 435int is_boardobjgrp_pmucmd_id_valid_v0(struct gk20a *g, 436 struct boardobjgrp *pboardobjgrp, 437 struct boardobjgrp_pmu_cmd *cmd); 438int is_boardobjgrp_pmucmd_id_valid_v1(struct gk20a *g, 439 struct boardobjgrp *pboardobjgrp, 440 struct boardobjgrp_pmu_cmd *cmd); 441#endif /* NVGPU_BOARDOBJGRP_H */
diff --git a/include/boardobj/boardobjgrp_e255.c b/include/boardobj/boardobjgrp_e255.c
deleted file mode 100644
index 63546a9..0000000
--- a/include/boardobj/boardobjgrp_e255.c
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#include <nvgpu/gk20a.h> 24 25#include "boardobj.h" 26#include "boardobjgrp_e255.h" 27#include "ctrl/ctrlboardobj.h" 28#include "boardobjgrp.h" 29#include "boardobjgrpmask.h" 30 31int boardobjgrpconstruct_e255(struct gk20a *g, 32 struct boardobjgrp_e255 *pboardobjgrp_e255) 33{ 34 int status = 0; 35 u8 objslots; 36 37 nvgpu_log_info(g, " "); 38 39 objslots = 255; 40 status = boardobjgrpmask_e255_init(&pboardobjgrp_e255->mask, NULL); 41 if (status) { 42 goto boardobjgrpconstruct_e255_exit; 43 } 44 45 pboardobjgrp_e255->super.type = CTRL_BOARDOBJGRP_TYPE_E255; 46 pboardobjgrp_e255->super.ppobjects = pboardobjgrp_e255->objects; 47 pboardobjgrp_e255->super.objslots = objslots; 48 pboardobjgrp_e255->super.mask = &(pboardobjgrp_e255->mask.super); 49 50 status = boardobjgrp_construct_super(g, &pboardobjgrp_e255->super); 51 if (status) { 52 goto boardobjgrpconstruct_e255_exit; 53 } 54 55 pboardobjgrp_e255->super.pmuhdrdatainit = 56 boardobjgrp_pmuhdrdatainit_e255; 57 58boardobjgrpconstruct_e255_exit: 59 return status; 60} 61 62int boardobjgrp_pmuhdrdatainit_e255(struct gk20a *g, 63 struct boardobjgrp *pboardobjgrp, 64 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, 65 struct boardobjgrpmask *mask) 66{ 67 struct nv_pmu_boardobjgrp_e255 *pgrpe255 = 68 (struct nv_pmu_boardobjgrp_e255 *)pboardobjgrppmu; 69 int status; 70 71 nvgpu_log_info(g, " "); 72 73 if (pboardobjgrp == NULL) { 74 return -EINVAL; 75 } 76 77 if (pboardobjgrppmu == NULL) { 78 return -EINVAL; 79 } 80 81 status = boardobjgrpmask_export(mask, 82 mask->bitcount, 83 &pgrpe255->obj_mask.super); 84 if (status) { 85 nvgpu_err(g, "e255 init:failed export grpmask"); 86 return status; 87 } 88 89 return boardobjgrp_pmuhdrdatainit_super(g, 90 pboardobjgrp, pboardobjgrppmu, mask); 91}
diff --git a/include/boardobj/boardobjgrp_e255.h b/include/boardobj/boardobjgrp_e255.h
deleted file mode 100644
index bc40541..0000000
--- a/include/boardobj/boardobjgrp_e255.h
+++ /dev/null
@@ -1,51 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_BOARDOBJGRP_E255_H 24#define NVGPU_BOARDOBJGRP_E255_H 25 26#include "ctrl/ctrlboardobj.h" 27#include "boardobj.h" 28#include "boardobjgrpmask.h" 29#include "boardobj/boardobjgrp.h" 30 31/* 32 * boardobjgrp_e255 is @ref BOARDOBJGRP child class allowing storage of up 33 * to 255 @ref BOARDOBJ object pointers with single static 255-bit mask denoting 34 * valid object pointers. 35 */ 36struct boardobjgrp_e255 { 37 struct boardobjgrp super; 38 struct boardobj *objects[CTRL_BOARDOBJGRP_E255_MAX_OBJECTS]; 39 struct boardobjgrpmask_e255 mask; 40}; 41 42#define boardobjgrp_pmudatainit_e255(g, pboardpbjgrp, pboardobjgrppmu) \ 43 boardobjgrp_pmudatainit_super(g, pboardpbjgrp, pboardobjgrppmu) 44 45/* Constructor and destructor */ 46int boardobjgrpconstruct_e255(struct gk20a *g, 47 struct boardobjgrp_e255 *pboardobjgrp); 48boardobjgrp_destruct boardobjgrpdestruct_e255; 49boardobjgrp_pmuhdrdatainit boardobjgrp_pmuhdrdatainit_e255; 50 51#endif /* NVGPU_BOARDOBJGRP_E255_H */
diff --git a/include/boardobj/boardobjgrp_e32.c b/include/boardobj/boardobjgrp_e32.c
deleted file mode 100644
index d72e8cb..0000000
--- a/include/boardobj/boardobjgrp_e32.c
+++ /dev/null
@@ -1,89 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22#include <nvgpu/gk20a.h> 23 24#include "boardobj.h" 25#include "boardobjgrp.h" 26#include "boardobjgrp_e32.h" 27#include "ctrl/ctrlboardobj.h" 28#include "boardobjgrpmask.h" 29 30 31int boardobjgrpconstruct_e32(struct gk20a *g, 32 struct boardobjgrp_e32 *pboardobjgrp_e32) 33{ 34 int status; 35 u8 objslots; 36 37 nvgpu_log_info(g, " "); 38 objslots = 32; 39 40 status = boardobjgrpmask_e32_init(&pboardobjgrp_e32->mask, NULL); 41 if (status) { 42 goto boardobjgrpconstruct_e32_exit; 43 } 44 45 pboardobjgrp_e32->super.type = CTRL_BOARDOBJGRP_TYPE_E32; 46 pboardobjgrp_e32->super.ppobjects = pboardobjgrp_e32->objects; 47 pboardobjgrp_e32->super.objslots = objslots; 48 pboardobjgrp_e32->super.mask = &(pboardobjgrp_e32->mask.super); 49 50 status = boardobjgrp_construct_super(g, &pboardobjgrp_e32->super); 51 if (status) { 52 goto boardobjgrpconstruct_e32_exit; 53 } 54 55 pboardobjgrp_e32->super.pmuhdrdatainit = boardobjgrp_pmuhdrdatainit_e32; 56 57boardobjgrpconstruct_e32_exit: 58 return status; 59} 60 61int boardobjgrp_pmuhdrdatainit_e32(struct gk20a *g, 62 struct boardobjgrp *pboardobjgrp, 63 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu, 64 struct boardobjgrpmask *mask) 65{ 66 struct nv_pmu_boardobjgrp_e32 *pgrpe32 = 67 (struct nv_pmu_boardobjgrp_e32 *)pboardobjgrppmu; 68 int status; 69 70 nvgpu_log_info(g, " "); 71 72 if (pboardobjgrp == NULL) { 73 return -EINVAL; 74 } 75 76 if (pboardobjgrppmu == NULL) { 77 return -EINVAL; 78 } 79 status = boardobjgrpmask_export(mask, 80 mask->bitcount, 81 &pgrpe32->obj_mask.super); 82 if (status) { 83 nvgpu_err(g, "e32 init:failed export grpmask"); 84 return status; 85 } 86 87 return boardobjgrp_pmuhdrdatainit_super(g, 88 pboardobjgrp, pboardobjgrppmu, mask); 89}
diff --git a/include/boardobj/boardobjgrp_e32.h b/include/boardobj/boardobjgrp_e32.h
deleted file mode 100644
index d4beb47..0000000
--- a/include/boardobj/boardobjgrp_e32.h
+++ /dev/null
@@ -1,66 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_BOARDOBJGRP_E32_H 24#define NVGPU_BOARDOBJGRP_E32_H 25 26#include "ctrl/ctrlboardobj.h" 27#include "boardobj.h" 28#include "boardobjgrp.h" 29#include "boardobjgrpmask.h" 30#include "boardobj/boardobjgrp.h" 31 32/* 33 * boardobjgrp_e32 is @ref BOARDOBJGRP child class allowing storage of up to 32 34 * @ref BOARDOBJ object pointers with single static 32-bit mask denoting valid 35 * object pointers. 36 */ 37struct boardobjgrp_e32 { 38 /* 39 * BOARDOBJGRP super-class. Must be first element of the structure. 40 */ 41 struct boardobjgrp super; 42 /* 43 * Statically allocated array of PBOARDOBJ-s 44 */ 45 struct boardobj *objects[CTRL_BOARDOBJGRP_E32_MAX_OBJECTS]; 46 47 /* 48 * Statically allocated mask strcuture referenced by super::pMask. 49 */ 50 struct boardobjgrpmask_e32 mask; 51}; 52 53/* 54 * Wrapper to the _SUPER implementation. Provided for the child classes which 55 * implement this interface. 56 */ 57#define boardobjgrp_pmudatainit_e32(g, pboardpbjgrp, pboardobjgrppmu) \ 58 boardobjgrp_pmudatainit_super(g, pboardpbjgrp, pboardobjgrppmu) 59 60/* Constructor and destructor */ 61int boardobjgrpconstruct_e32(struct gk20a *g, 62 struct boardobjgrp_e32 *pboardobjgrp); 63boardobjgrp_destruct boardobjgrpdestruct_e32; 64boardobjgrp_pmuhdrdatainit boardobjgrp_pmuhdrdatainit_e32; 65 66#endif /* NVGPU_BOARDOBJGRP_E32_H */
diff --git a/include/boardobj/boardobjgrpmask.c b/include/boardobj/boardobjgrpmask.c
deleted file mode 100644
index a1dcd6d..0000000
--- a/include/boardobj/boardobjgrpmask.c
+++ /dev/null
@@ -1,411 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#include <nvgpu/gk20a.h> 23 24#include "boardobjgrp.h" 25#include "ctrl/ctrlboardobj.h" 26 27/* 28* Assures that unused bits (size .. (maskDataCount * 32 - 1)) are always zero. 29*/ 30#define BOARDOBJGRPMASK_NORMALIZE(_pmask) \ 31 ((_pmask)->data[(_pmask)->maskdatacount-1] &= (_pmask)->lastmaskfilter) 32 33u32 boardobjgrpmask_init(struct boardobjgrpmask *mask, u8 bitsize, 34 struct ctrl_boardobjgrp_mask *extmask) 35{ 36 if (mask == NULL) { 37 return -EINVAL; 38 } 39 if ((bitsize != CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) && 40 (bitsize != CTRL_BOARDOBJGRP_E255_MAX_OBJECTS)) { 41 return -EINVAL; 42 } 43 44 mask->bitcount = bitsize; 45 mask->maskdatacount = CTRL_BOARDOBJGRP_MASK_DATA_SIZE(bitsize); 46 mask->lastmaskfilter = bitsize % 47 CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE; 48 49 mask->lastmaskfilter = (mask->lastmaskfilter == 0) ? 50 0xFFFFFFFF : (u32)(BIT(mask->lastmaskfilter) - 1); 51 52 return (extmask == NULL) ? 53 boardobjgrpmask_clr(mask) : 54 boardobjgrpmask_import(mask, bitsize, extmask); 55} 56 57u32 boardobjgrpmask_import(struct boardobjgrpmask *mask, u8 bitsize, 58 struct ctrl_boardobjgrp_mask *extmask) 59{ 60 u8 index; 61 62 if (mask == NULL) { 63 return -EINVAL; 64 } 65 if (extmask == NULL) { 66 return -EINVAL; 67 } 68 if (mask->bitcount != bitsize) { 69 return -EINVAL; 70 } 71 72 for (index = 0; index < mask->maskdatacount; index++) { 73 mask->data[index] = extmask->data[index]; 74 } 75 76 BOARDOBJGRPMASK_NORMALIZE(mask); 77 78 return 0; 79} 80 81u32 boardobjgrpmask_export(struct boardobjgrpmask *mask, u8 bitsize, 82 struct ctrl_boardobjgrp_mask *extmask) 83{ 84 u8 index; 85 86 if (mask == NULL) { 87 return -EINVAL; 88 } 89 if (extmask == NULL) { 90 return -EINVAL; 91 } 92 if (mask->bitcount != bitsize) { 93 return -EINVAL; 94 } 95 96 for (index = 0; index < mask->maskdatacount; index++) { 97 extmask->data[index] = mask->data[index]; 98 } 99 100 return 0; 101} 102 103u32 boardobjgrpmask_clr(struct boardobjgrpmask *mask) 104{ 105 u8 index; 106 107 if (mask == NULL) { 108 return -EINVAL; 109 } 110 for (index = 0; index < mask->maskdatacount; index++) { 111 mask->data[index] = 0; 112 } 113 114 return 0; 115} 116 117u32 boardobjgrpmask_set(struct boardobjgrpmask *mask) 118{ 119 u8 index; 120 121 if (mask == NULL) { 122 return -EINVAL; 123 } 124 for (index = 0; index < mask->maskdatacount; index++) { 125 mask->data[index] = 0xFFFFFFFF; 126 } 127 BOARDOBJGRPMASK_NORMALIZE(mask); 128 return 0; 129} 130 131u32 boardobjgrpmask_inv(struct boardobjgrpmask *mask) 132{ 133 u8 index; 134 135 if (mask == NULL) { 136 return -EINVAL; 137 } 138 for (index = 0; index < mask->maskdatacount; index++) { 139 mask->data[index] = ~mask->data[index]; 140 } 141 BOARDOBJGRPMASK_NORMALIZE(mask); 142 return 0; 143} 144 145bool boardobjgrpmask_iszero(struct boardobjgrpmask *mask) 146{ 147 u8 index; 148 149 if (mask == NULL) { 150 return true; 151 } 152 for (index = 0; index < mask->maskdatacount; index++) { 153 if (mask->data[index] != 0) { 154 return false; 155 } 156 } 157 return true; 158} 159 160u8 boardobjgrpmask_bitsetcount(struct boardobjgrpmask *mask) 161{ 162 u8 index; 163 u8 result = 0; 164 165 if (mask == NULL) { 166 return result; 167 } 168 169 for (index = 0; index < mask->maskdatacount; index++) { 170 u32 m = mask->data[index]; 171 172 NUMSETBITS_32(m); 173 result += (u8)m; 174 } 175 176 return result; 177} 178 179u8 boardobjgrpmask_bitidxlowest(struct boardobjgrpmask *mask) 180{ 181 u8 index; 182 u8 result = CTRL_BOARDOBJ_IDX_INVALID; 183 184 if (mask == NULL) { 185 return result; 186 } 187 188 for (index = 0; index < mask->maskdatacount; index++) { 189 u32 m = mask->data[index]; 190 191 if (m != 0) { 192 LOWESTBITIDX_32(m); 193 result = (u8)m + index * 194 CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE; 195 break; 196 } 197 } 198 199 return result; 200} 201 202u8 boardobjgrpmask_bitidxhighest(struct boardobjgrpmask *mask) 203{ 204 u8 index; 205 u8 result = CTRL_BOARDOBJ_IDX_INVALID; 206 207 if (mask == NULL) { 208 return result; 209 } 210 211 for (index = 0; index < mask->maskdatacount; index++) { 212 u32 m = mask->data[index]; 213 214 if (m != 0) { 215 HIGHESTBITIDX_32(m); 216 result = (u8)m + index * 217 CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE; 218 break; 219 } 220 } 221 222 return result; 223} 224 225int boardobjgrpmask_bitclr(struct boardobjgrpmask *mask, u8 bitidx) 226{ 227 u8 index; 228 u8 offset; 229 230 if (mask == NULL) { 231 return -EINVAL; 232 } 233 if (bitidx >= mask->bitcount) { 234 return -EINVAL; 235 } 236 237 index = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(bitidx); 238 offset = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(bitidx); 239 240 mask->data[index] &= ~BIT(offset); 241 242 return 0; 243} 244 245int boardobjgrpmask_bitset(struct boardobjgrpmask *mask, u8 bitidx) 246{ 247 u8 index; 248 u8 offset; 249 250 if (mask == NULL) { 251 return -EINVAL; 252 } 253 if (bitidx >= mask->bitcount) { 254 return -EINVAL; 255 } 256 257 index = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(bitidx); 258 offset = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(bitidx); 259 260 mask->data[index] |= BIT(offset); 261 262 return 0; 263} 264 265u32 boardobjgrpmask_bitinv(struct boardobjgrpmask *mask, u8 bitidx) 266{ 267 u8 index; 268 u8 offset; 269 270 if (mask == NULL) { 271 return -EINVAL; 272 } 273 if (bitidx >= mask->bitcount) { 274 return -EINVAL; 275 } 276 277 index = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(bitidx); 278 offset = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(bitidx); 279 280 mask->data[index] ^= ~BIT(offset); 281 282 return 0; 283} 284 285bool boardobjgrpmask_bitget(struct boardobjgrpmask *mask, u8 bitidx) 286{ 287 u8 index; 288 u8 offset; 289 290 if (mask == NULL) { 291 return false; 292 } 293 if (bitidx >= mask->bitcount) { 294 return false; 295 } 296 297 index = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(bitidx); 298 offset = CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(bitidx); 299 300 return (mask->data[index] & BIT(offset)) != 0; 301} 302 303u32 boardobjgrpmask_and(struct boardobjgrpmask *dst, 304 struct boardobjgrpmask *op1, 305 struct boardobjgrpmask *op2) 306{ 307 u8 index; 308 309 if (!boardobjgrpmask_sizeeq(dst, op1)) { 310 return -EINVAL; 311 } 312 if (!boardobjgrpmask_sizeeq(dst, op2)) { 313 return -EINVAL; 314 } 315 316 for (index = 0; index < dst->maskdatacount; index++) { 317 dst->data[index] = op1->data[index] & op2->data[index]; 318 } 319 320 return 0; 321} 322 323u32 boardobjgrpmask_or(struct boardobjgrpmask *dst, 324 struct boardobjgrpmask *op1, 325 struct boardobjgrpmask *op2) 326{ 327 u8 index; 328 329 if (!boardobjgrpmask_sizeeq(dst, op1)) { 330 return -EINVAL; 331 } 332 if (!boardobjgrpmask_sizeeq(dst, op2)) { 333 return -EINVAL; 334 } 335 336 for (index = 0; index < dst->maskdatacount; index++) { 337 dst->data[index] = op1->data[index] | op2->data[index]; 338 } 339 340 return 0; 341} 342 343u32 boardobjgrpmask_xor(struct boardobjgrpmask *dst, 344 struct boardobjgrpmask *op1, 345 struct boardobjgrpmask *op2) 346{ 347 u8 index; 348 349 if (!boardobjgrpmask_sizeeq(dst, op1)) { 350 return -EINVAL; 351 } 352 if (!boardobjgrpmask_sizeeq(dst, op2)) { 353 return -EINVAL; 354 } 355 356 for (index = 0; index < dst->maskdatacount; index++) { 357 dst->data[index] = op1->data[index] ^ op2->data[index]; 358 } 359 360 return 0; 361} 362 363u32 boardobjgrpmask_copy(struct boardobjgrpmask *dst, 364 struct boardobjgrpmask *src) 365{ 366 u8 index; 367 368 if (!boardobjgrpmask_sizeeq(dst, src)) { 369 return -EINVAL; 370 } 371 372 for (index = 0; index < dst->maskdatacount; index++) { 373 dst->data[index] = src->data[index]; 374 } 375 376 return 0; 377} 378 379bool boardobjgrpmask_sizeeq(struct boardobjgrpmask *op1, 380 struct boardobjgrpmask *op2) 381{ 382 if (op1 == NULL) { 383 return false; 384 } 385 if (op2 == NULL) { 386 return false; 387 } 388 389 return op1->bitcount == op2->bitcount; 390} 391 392bool boardobjgrpmask_issubset(struct boardobjgrpmask *op1, 393 struct boardobjgrpmask *op2) 394{ 395 u8 index; 396 397 if (!boardobjgrpmask_sizeeq(op2, op1)) { 398 return false; 399 } 400 401 for (index = 0; index < op1->maskdatacount; index++) { 402 u32 op_1 = op1->data[index]; 403 u32 op_2 = op2->data[index]; 404 405 if ((op_1 & op_2) != op_1) { 406 return false; 407 } 408 } 409 410 return true; 411}
diff --git a/include/boardobj/boardobjgrpmask.h b/include/boardobj/boardobjgrpmask.h
deleted file mode 100644
index f4ed0af..0000000
--- a/include/boardobj/boardobjgrpmask.h
+++ /dev/null
@@ -1,119 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_BOARDOBJGRPMASK_H 24#define NVGPU_BOARDOBJGRPMASK_H 25 26#include "ctrl/ctrlboardobj.h" 27 28 29/* 30* Board Object Group Mask super-structure. 31* Used to unify access to all BOARDOBJGRPMASK_E** child classes 32*/ 33struct boardobjgrpmask { 34 /* Number of bits supported by the mask */ 35 u8 bitcount; 36 /* Number of 32-bit words required to store all @ref bitCount bits */ 37 u8 maskdatacount; 38 /* 39 * Bit-mask of used-bits within last 32-bit word. Used to 40 * normalize data 41 */ 42 u32 lastmaskfilter; 43 /* 44 * Start of the array of 32-bit words representing the bit-mask 45 * Must be the last element of the structure. 46 */ 47 u32 data[CTRL_BOARDOBJGRP_MASK_ARRAY_START_SIZE]; 48}; 49 50struct boardobjgrpmask_e32 { 51 /* 52 * BOARDOBJGRPMASK super-class. Must be the first element of the 53 * structure. 54 */ 55 struct boardobjgrpmask super; 56 /*u32 data_e32[1]; */ 57}; 58 59struct boardobjgrpmask_e255 { 60 /* 61 * BOARDOBJGRPMASK super-class. Must be the first element of the 62 * structure. 63 */ 64 struct boardobjgrpmask super; 65 u32 data_e255[254]; 66}; 67 68/* Init and I/O operations.*/ 69u32 boardobjgrpmask_init(struct boardobjgrpmask *mask, u8 bitsize, 70 struct ctrl_boardobjgrp_mask *extmask); 71u32 boardobjgrpmask_import(struct boardobjgrpmask *mask, u8 bitsize, 72 struct ctrl_boardobjgrp_mask *extmask); 73u32 boardobjgrpmask_export(struct boardobjgrpmask *mask, u8 bitsize, 74 struct ctrl_boardobjgrp_mask *extmask); 75 76/* Operations on all bits of a single mask.*/ 77u32 boardobjgrpmask_clr(struct boardobjgrpmask *mask); 78u32 boardobjgrpmask_set(struct boardobjgrpmask *mask); 79u32 boardobjgrpmask_inv(struct boardobjgrpmask *mask); 80bool boardobjgrpmask_iszero(struct boardobjgrpmask *mask); 81u8 boardobjgrpmask_bitsetcount(struct boardobjgrpmask *mask); 82u8 boardobjgrpmask_bitidxlowest(struct boardobjgrpmask *mask); 83u8 boardobjgrpmask_bitidxhighest(struct boardobjgrpmask *mask); 84 85/* Operations on a single bit of a single mask */ 86int boardobjgrpmask_bitclr(struct boardobjgrpmask *mask, u8 bitidx); 87int boardobjgrpmask_bitset(struct boardobjgrpmask *mask, u8 bitidx); 88u32 boardobjgrpmask_bitinv(struct boardobjgrpmask *mask, u8 bitidx); 89bool boardobjgrpmask_bitget(struct boardobjgrpmask *mask, u8 bitidx); 90 91/* Operations on a multiple masks */ 92u32 boardobjgrpmask_and(struct boardobjgrpmask *dst, 93 struct boardobjgrpmask *op1, 94 struct boardobjgrpmask *op2); 95u32 boardobjgrpmask_or(struct boardobjgrpmask *dst, struct boardobjgrpmask *op1, 96 struct boardobjgrpmask *op2); 97u32 boardobjgrpmask_xor(struct boardobjgrpmask *dst, 98 struct boardobjgrpmask *op1, 99 struct boardobjgrpmask *op2); 100 101/* Special interfaces */ 102u32 boardobjgrpmask_copy(struct boardobjgrpmask *dst, 103 struct boardobjgrpmask *src); 104bool boardobjgrpmask_sizeeq(struct boardobjgrpmask *op1, 105 struct boardobjgrpmask *op2); 106bool boardobjgrpmask_issubset(struct boardobjgrpmask *op1, 107 struct boardobjgrpmask *op2); 108 109/* init boardobjgrpmask_e32 structure */ 110#define boardobjgrpmask_e32_init(pmaske32, pextmask) \ 111 boardobjgrpmask_init(&(pmaske32)->super, \ 112 CTRL_BOARDOBJGRP_E32_MAX_OBJECTS, (pextmask)) 113 114/* init boardobjgrpmask_e255 structure */ 115#define boardobjgrpmask_e255_init(pmaske255, pextmask) \ 116 boardobjgrpmask_init(&(pmaske255)->super, \ 117 CTRL_BOARDOBJGRP_E255_MAX_OBJECTS, (pextmask)) 118 119#endif /* NVGPU_BOARDOBJGRPMASK_H */
diff --git a/include/clk/clk.c b/include/clk/clk.c
deleted file mode 100644
index d8e30c4..0000000
--- a/include/clk/clk.c
+++ /dev/null
@@ -1,942 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/pmu.h> 24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 25#include <nvgpu/gk20a.h> 26 27#include "clk.h" 28#include "ctrl/ctrlclk.h" 29#include "ctrl/ctrlvolt.h" 30#include "volt/volt.h" 31 32#define BOOT_GPC2CLK_MHZ 2581 33#define BOOT_MCLK_MHZ 3003 34 35struct clkrpc_pmucmdhandler_params { 36 struct nv_pmu_clk_rpc *prpccall; 37 u32 success; 38}; 39 40static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, 41 void *param, u32 handle, u32 status) 42{ 43 struct clkrpc_pmucmdhandler_params *phandlerparams = 44 (struct clkrpc_pmucmdhandler_params *)param; 45 46 nvgpu_log_info(g, " "); 47 48 if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { 49 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", 50 msg->msg.clk.msg_type); 51 return; 52 } 53 54 if (phandlerparams->prpccall->b_supported) { 55 phandlerparams->success = 1; 56 } 57} 58 59 60int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload) 61{ 62 struct pmu_cmd cmd; 63 struct pmu_payload payload; 64 u32 status; 65 u32 seqdesc; 66 struct nv_pmu_clk_rpc rpccall; 67 struct clkrpc_pmucmdhandler_params handler; 68 struct nv_pmu_clk_load *clkload; 69 70 memset(&payload, 0, sizeof(struct pmu_payload)); 71 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); 72 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); 73 memset(&cmd, 0, sizeof(struct pmu_cmd)); 74 75 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; 76 clkload = &rpccall.params.clk_load; 77 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG; 78 clkload->action_mask = bload ? 79 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES : 80 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO; 81 82 cmd.hdr.unit_id = PMU_UNIT_CLK; 83 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + 84 (u32)sizeof(struct pmu_hdr); 85 86 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; 87 88 payload.in.buf = (u8 *)&rpccall; 89 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); 90 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 91 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; 92 93 payload.out.buf = (u8 *)&rpccall; 94 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); 95 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 96 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; 97 98 handler.prpccall = &rpccall; 99 handler.success = 0; 100 101 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 102 PMU_COMMAND_QUEUE_LPQ, 103 clkrpc_pmucmdhandler, (void *)&handler, 104 &seqdesc, ~0); 105 if (status) { 106 nvgpu_err(g, "unable to post clk RPC cmd %x", 107 cmd.cmd.clk.cmd_type); 108 goto done; 109 } 110 111 pmu_wait_message_cond(&g->pmu, 112 gk20a_get_gr_idle_timeout(g), 113 &handler.success, 1); 114 if (handler.success == 0) { 115 nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed"); 116 status = -EINVAL; 117 } 118 119done: 120 return status; 121} 122 123u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask) { 124 125 struct pmu_cmd cmd; 126 struct pmu_payload payload; 127 u32 status; 128 u32 seqdesc; 129 struct nv_pmu_clk_rpc rpccall; 130 struct clkrpc_pmucmdhandler_params handler; 131 struct nv_pmu_clk_freq_effective_avg *clk_freq_effective_avg; 132 133 memset(&payload, 0, sizeof(struct pmu_payload)); 134 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); 135 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); 136 memset(&cmd, 0, sizeof(struct pmu_cmd)); 137 138 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG; 139 clk_freq_effective_avg = &rpccall.params.clk_freq_effective_avg; 140 clk_freq_effective_avg->clkDomainMask = clkDomainMask; 141 142 cmd.hdr.unit_id = PMU_UNIT_CLK; 143 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + 144 (u32)sizeof(struct pmu_hdr); 145 146 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; 147 148 payload.in.buf = (u8 *)&rpccall; 149 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); 150 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 151 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; 152 153 payload.out.buf = (u8 *)&rpccall; 154 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); 155 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 156 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; 157 158 handler.prpccall = &rpccall; 159 handler.success = 0; 160 161 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 162 PMU_COMMAND_QUEUE_LPQ, 163 clkrpc_pmucmdhandler, (void *)&handler, 164 &seqdesc, ~0); 165 if (status) { 166 nvgpu_err(g, "unable to post clk RPC cmd %x", 167 cmd.cmd.clk.cmd_type); 168 goto done; 169 } 170 171 pmu_wait_message_cond(&g->pmu, 172 gk20a_get_gr_idle_timeout(g), 173 &handler.success, 1); 174 if (handler.success == 0) { 175 nvgpu_err(g, "rpc call to get clk frequency average failed"); 176 status = -EINVAL; 177 goto done; 178 } 179 180 return rpccall.params.clk_freq_effective_avg.freqkHz[clkDomainMask]; 181 182done: 183 return status; 184} 185 186int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) 187{ 188 struct pmu_cmd cmd; 189 struct pmu_payload payload; 190 u32 status; 191 u32 seqdesc; 192 struct nv_pmu_clk_rpc rpccall; 193 struct clkrpc_pmucmdhandler_params handler; 194 struct nv_pmu_clk_load *clkload; 195 struct clk_freq_controllers *pclk_freq_controllers; 196 struct ctrl_boardobjgrp_mask_e32 *load_mask; 197 struct boardobjgrpmask_e32 isolate_cfc_mask; 198 199 memset(&payload, 0, sizeof(struct pmu_payload)); 200 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); 201 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); 202 203 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers; 204 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; 205 clkload = &rpccall.params.clk_load; 206 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER; 207 clkload->action_mask = bload ? 208 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_YES : 209 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_NO; 210 211 load_mask = &rpccall.params.clk_load.payload.freq_controllers.load_mask; 212 213 status = boardobjgrpmask_e32_init(&isolate_cfc_mask, NULL); 214 215 if (bit_idx == CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL) { 216 status = boardobjgrpmask_export( 217 &pclk_freq_controllers-> 218 freq_ctrl_load_mask.super, 219 pclk_freq_controllers-> 220 freq_ctrl_load_mask.super.bitcount, 221 &load_mask->super); 222 223 224 } else { 225 status = boardobjgrpmask_bitset(&isolate_cfc_mask.super, 226 bit_idx); 227 status = boardobjgrpmask_export(&isolate_cfc_mask.super, 228 isolate_cfc_mask.super.bitcount, 229 &load_mask->super); 230 if (bload) { 231 status = boardobjgrpmask_bitset( 232 &pclk_freq_controllers-> 233 freq_ctrl_load_mask.super, 234 bit_idx); 235 } else { 236 status = boardobjgrpmask_bitclr( 237 &pclk_freq_controllers-> 238 freq_ctrl_load_mask.super, 239 bit_idx); 240 } 241 } 242 243 if (status) { 244 nvgpu_err(g, "Error in generating mask used to select CFC"); 245 goto done; 246 } 247 248 cmd.hdr.unit_id = PMU_UNIT_CLK; 249 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + 250 (u32)sizeof(struct pmu_hdr); 251 252 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; 253 254 payload.in.buf = (u8 *)&rpccall; 255 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); 256 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 257 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; 258 259 payload.out.buf = (u8 *)&rpccall; 260 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); 261 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 262 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; 263 264 handler.prpccall = &rpccall; 265 handler.success = 0; 266 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 267 PMU_COMMAND_QUEUE_LPQ, 268 clkrpc_pmucmdhandler, (void *)&handler, 269 &seqdesc, ~0); 270 271 if (status) { 272 nvgpu_err(g, "unable to post clk RPC cmd %x", 273 cmd.cmd.clk.cmd_type); 274 goto done; 275 } 276 277 pmu_wait_message_cond(&g->pmu, 278 gk20a_get_gr_idle_timeout(g), 279 &handler.success, 1); 280 281 if (handler.success == 0) { 282 nvgpu_err(g, "rpc call to load freq cntlr cal failed"); 283 status = -EINVAL; 284 } 285 286done: 287 return status; 288} 289 290u32 clk_pmu_vin_load(struct gk20a *g) 291{ 292 struct pmu_cmd cmd; 293 struct pmu_payload payload; 294 u32 status; 295 u32 seqdesc; 296 struct nv_pmu_clk_rpc rpccall; 297 struct clkrpc_pmucmdhandler_params handler; 298 struct nv_pmu_clk_load *clkload; 299 300 memset(&payload, 0, sizeof(struct pmu_payload)); 301 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); 302 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); 303 304 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; 305 clkload = &rpccall.params.clk_load; 306 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_VIN; 307 clkload->action_mask = NV_NV_PMU_CLK_LOAD_ACTION_MASK_VIN_HW_CAL_PROGRAM_YES << 4; 308 309 cmd.hdr.unit_id = PMU_UNIT_CLK; 310 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + 311 (u32)sizeof(struct pmu_hdr); 312 313 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; 314 cmd.cmd.clk.generic.b_perf_daemon_cmd =false; 315 316 payload.in.buf = (u8 *)&rpccall; 317 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); 318 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 319 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; 320 321 payload.out.buf = (u8 *)&rpccall; 322 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); 323 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 324 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; 325 326 handler.prpccall = &rpccall; 327 handler.success = 0; 328 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 329 PMU_COMMAND_QUEUE_LPQ, 330 clkrpc_pmucmdhandler, (void *)&handler, 331 &seqdesc, ~0); 332 333 if (status) { 334 nvgpu_err(g, "unable to post clk RPC cmd %x", 335 cmd.cmd.clk.cmd_type); 336 goto done; 337 } 338 339 pmu_wait_message_cond(&g->pmu, 340 gk20a_get_gr_idle_timeout(g), 341 &handler.success, 1); 342 343 if (handler.success == 0) { 344 nvgpu_err(g, "rpc call to load vin cal failed"); 345 status = -EINVAL; 346 } 347 348done: 349 return status; 350} 351 352u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g, 353 struct nv_pmu_clk_rpc *rpccall, 354 struct set_fll_clk *setfllclk) 355{ 356 struct nv_pmu_clk_vf_change_inject *vfchange; 357 358 vfchange = &rpccall->params.clk_vf_change_inject; 359 vfchange->flags = 0; 360 vfchange->clk_list.num_domains = 3; 361 vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPC2CLK; 362 vfchange->clk_list.clk_domains[0].clk_freq_khz = 363 setfllclk->gpc2clkmhz * 1000; 364 vfchange->clk_list.clk_domains[0].clk_flags = 0; 365 vfchange->clk_list.clk_domains[0].current_regime_id = 366 setfllclk->current_regime_id_gpc; 367 vfchange->clk_list.clk_domains[0].target_regime_id = 368 setfllclk->target_regime_id_gpc; 369 vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBAR2CLK; 370 vfchange->clk_list.clk_domains[1].clk_freq_khz = 371 setfllclk->xbar2clkmhz * 1000; 372 vfchange->clk_list.clk_domains[1].clk_flags = 0; 373 vfchange->clk_list.clk_domains[1].current_regime_id = 374 setfllclk->current_regime_id_xbar; 375 vfchange->clk_list.clk_domains[1].target_regime_id = 376 setfllclk->target_regime_id_xbar; 377 vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYS2CLK; 378 vfchange->clk_list.clk_domains[2].clk_freq_khz = 379 setfllclk->sys2clkmhz * 1000; 380 vfchange->clk_list.clk_domains[2].clk_flags = 0; 381 vfchange->clk_list.clk_domains[2].current_regime_id = 382 setfllclk->current_regime_id_sys; 383 vfchange->clk_list.clk_domains[2].target_regime_id = 384 setfllclk->target_regime_id_sys; 385 vfchange->volt_list.num_rails = 1; 386 vfchange->volt_list.rails[0].volt_domain = CTRL_VOLT_DOMAIN_LOGIC; 387 vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv; 388 vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv = 389 setfllclk->voltuv; 390 391 return 0; 392} 393 394u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g, 395 struct nv_pmu_clk_rpc *rpccall, 396 struct set_fll_clk *setfllclk) 397{ 398 struct nv_pmu_clk_vf_change_inject_v1 *vfchange; 399 400 vfchange = &rpccall->params.clk_vf_change_inject_v1; 401 vfchange->flags = 0; 402 vfchange->clk_list.num_domains = 4; 403 vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPCCLK; 404 vfchange->clk_list.clk_domains[0].clk_freq_khz = 405 setfllclk->gpc2clkmhz * 1000; 406 407 vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBARCLK; 408 vfchange->clk_list.clk_domains[1].clk_freq_khz = 409 setfllclk->xbar2clkmhz * 1000; 410 411 vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYSCLK; 412 vfchange->clk_list.clk_domains[2].clk_freq_khz = 413 setfllclk->sys2clkmhz * 1000; 414 415 vfchange->clk_list.clk_domains[3].clk_domain = CTRL_CLK_DOMAIN_NVDCLK; 416 vfchange->clk_list.clk_domains[3].clk_freq_khz = 855 * 1000; 417 418 vfchange->volt_list.num_rails = 1; 419 vfchange->volt_list.rails[0].rail_idx = 0; 420 vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv; 421 vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv = 422 setfllclk->voltuv; 423 424 return 0; 425} 426 427static u32 clk_pmu_vf_inject(struct gk20a *g, struct set_fll_clk *setfllclk) 428{ 429 struct pmu_cmd cmd; 430 struct pmu_payload payload; 431 u32 status; 432 u32 seqdesc; 433 struct nv_pmu_clk_rpc rpccall; 434 struct clkrpc_pmucmdhandler_params handler; 435 436 memset(&payload, 0, sizeof(struct pmu_payload)); 437 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); 438 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); 439 memset(&cmd, 0, sizeof(struct pmu_cmd)); 440 441 if ((setfllclk->gpc2clkmhz == 0) || (setfllclk->xbar2clkmhz == 0) || 442 (setfllclk->sys2clkmhz == 0) || (setfllclk->voltuv == 0)) { 443 return -EINVAL; 444 } 445 446 if ((setfllclk->target_regime_id_gpc > CTRL_CLK_FLL_REGIME_ID_FR) || 447 (setfllclk->target_regime_id_sys > CTRL_CLK_FLL_REGIME_ID_FR) || 448 (setfllclk->target_regime_id_xbar > CTRL_CLK_FLL_REGIME_ID_FR)) { 449 return -EINVAL; 450 } 451 452 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_VF_CHANGE_INJECT; 453 454 g->ops.pmu_ver.clk.clk_vf_change_inject_data_fill(g, 455 &rpccall, setfllclk); 456 457 cmd.hdr.unit_id = PMU_UNIT_CLK; 458 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + 459 (u32)sizeof(struct pmu_hdr); 460 461 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; 462 463 payload.in.buf = (u8 *)&rpccall; 464 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); 465 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 466 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; 467 468 payload.out.buf = (u8 *)&rpccall; 469 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); 470 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; 471 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; 472 473 handler.prpccall = &rpccall; 474 handler.success = 0; 475 476 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 477 PMU_COMMAND_QUEUE_LPQ, 478 clkrpc_pmucmdhandler, (void *)&handler, 479 &seqdesc, ~0); 480 481 if (status) { 482 nvgpu_err(g, "unable to post clk RPC cmd %x", 483 cmd.cmd.clk.cmd_type); 484 goto done; 485 } 486 487 pmu_wait_message_cond(&g->pmu, 488 gk20a_get_gr_idle_timeout(g), 489 &handler.success, 1); 490 491 if (handler.success == 0) { 492 nvgpu_err(g, "rpc call to inject clock failed"); 493 status = -EINVAL; 494 } 495done: 496 return status; 497} 498 499static u32 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz) 500{ 501 struct fll_device *pflldev; 502 u8 j; 503 struct clk_pmupstate *pclk = &g->clk_pmu; 504 505 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super), 506 struct fll_device *, pflldev, j) { 507 if (pflldev->clk_domain == domain) { 508 if (pflldev->regime_desc.fixed_freq_regime_limit_mhz >= 509 clkmhz) { 510 return CTRL_CLK_FLL_REGIME_ID_FFR; 511 } else { 512 return CTRL_CLK_FLL_REGIME_ID_FR; 513 } 514 } 515 } 516 return CTRL_CLK_FLL_REGIME_ID_INVALID; 517} 518 519static int set_regime_id(struct gk20a *g, u32 domain, u32 regimeid) 520{ 521 struct fll_device *pflldev; 522 u8 j; 523 struct clk_pmupstate *pclk = &g->clk_pmu; 524 525 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super), 526 struct fll_device *, pflldev, j) { 527 if (pflldev->clk_domain == domain) { 528 pflldev->regime_desc.regime_id = regimeid; 529 return 0; 530 } 531 } 532 return -EINVAL; 533} 534 535static int get_regime_id(struct gk20a *g, u32 domain, u32 *regimeid) 536{ 537 struct fll_device *pflldev; 538 u8 j; 539 struct clk_pmupstate *pclk = &g->clk_pmu; 540 541 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super), 542 struct fll_device *, pflldev, j) { 543 if (pflldev->clk_domain == domain) { 544 *regimeid = pflldev->regime_desc.regime_id; 545 return 0; 546 } 547 } 548 return -EINVAL; 549} 550 551int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk) 552{ 553 int status = -EINVAL; 554 555 /*set regime ids */ 556 status = get_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK, 557 &setfllclk->current_regime_id_gpc); 558 if (status) { 559 goto done; 560 } 561 562 setfllclk->target_regime_id_gpc = find_regime_id(g, 563 CTRL_CLK_DOMAIN_GPC2CLK, setfllclk->gpc2clkmhz); 564 565 status = get_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK, 566 &setfllclk->current_regime_id_sys); 567 if (status) { 568 goto done; 569 } 570 571 setfllclk->target_regime_id_sys = find_regime_id(g, 572 CTRL_CLK_DOMAIN_SYS2CLK, setfllclk->sys2clkmhz); 573 574 status = get_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK, 575 &setfllclk->current_regime_id_xbar); 576 if (status) { 577 goto done; 578 } 579 580 setfllclk->target_regime_id_xbar = find_regime_id(g, 581 CTRL_CLK_DOMAIN_XBAR2CLK, setfllclk->xbar2clkmhz); 582 583 status = clk_pmu_vf_inject(g, setfllclk); 584 585 if (status) { 586 nvgpu_err(g, "vf inject to change clk failed"); 587 } 588 589 /* save regime ids */ 590 status = set_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK, 591 setfllclk->target_regime_id_xbar); 592 if (status) { 593 goto done; 594 } 595 596 status = set_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK, 597 setfllclk->target_regime_id_gpc); 598 if (status) { 599 goto done; 600 } 601 602 status = set_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK, 603 setfllclk->target_regime_id_sys); 604 if (status) { 605 goto done; 606 } 607done: 608 return status; 609} 610 611int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk) 612{ 613 int status = -EINVAL; 614 struct clk_domain *pdomain; 615 u8 i; 616 struct clk_pmupstate *pclk = &g->clk_pmu; 617 u16 clkmhz = 0; 618 struct clk_domain_3x_master *p3xmaster; 619 struct clk_domain_3x_slave *p3xslave; 620 unsigned long slaveidxmask; 621 622 if (setfllclk->gpc2clkmhz == 0) { 623 return -EINVAL; 624 } 625 626 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), 627 struct clk_domain *, pdomain, i) { 628 629 if (pdomain->api_domain == CTRL_CLK_DOMAIN_GPC2CLK) { 630 631 if (!pdomain->super.implements(g, &pdomain->super, 632 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) { 633 status = -EINVAL; 634 goto done; 635 } 636 p3xmaster = (struct clk_domain_3x_master *)pdomain; 637 slaveidxmask = p3xmaster->slave_idxs_mask; 638 for_each_set_bit(i, &slaveidxmask, 32) { 639 p3xslave = (struct clk_domain_3x_slave *) 640 CLK_CLK_DOMAIN_GET(pclk, i); 641 if ((p3xslave->super.super.super.api_domain != 642 CTRL_CLK_DOMAIN_XBAR2CLK) && 643 (p3xslave->super.super.super.api_domain != 644 CTRL_CLK_DOMAIN_SYS2CLK)) { 645 continue; 646 } 647 clkmhz = 0; 648 status = p3xslave->clkdomainclkgetslaveclk(g, 649 pclk, 650 (struct clk_domain *)p3xslave, 651 &clkmhz, 652 setfllclk->gpc2clkmhz); 653 if (status) { 654 status = -EINVAL; 655 goto done; 656 } 657 if (p3xslave->super.super.super.api_domain == 658 CTRL_CLK_DOMAIN_XBAR2CLK) { 659 setfllclk->xbar2clkmhz = clkmhz; 660 } 661 if (p3xslave->super.super.super.api_domain == 662 CTRL_CLK_DOMAIN_SYS2CLK) { 663 setfllclk->sys2clkmhz = clkmhz; 664 } 665 } 666 } 667 } 668done: 669 return status; 670} 671 672u32 clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain) 673{ 674 u32 status = -EINVAL; 675 struct clk_domain *pdomain; 676 u8 i; 677 struct clk_pmupstate *pclk = &g->clk_pmu; 678 u16 clkmhz = 0; 679 u32 volt = 0; 680 681 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), 682 struct clk_domain *, pdomain, i) { 683 if (pdomain->api_domain == clkapidomain) { 684 status = pdomain->clkdomainclkvfsearch(g, pclk, 685 pdomain, &clkmhz, &volt, 686 CLK_PROG_VFE_ENTRY_LOGIC); 687 status = pdomain->clkdomainclkvfsearch(g, pclk, 688 pdomain, &clkmhz, &volt, 689 CLK_PROG_VFE_ENTRY_SRAM); 690 } 691 } 692 return status; 693} 694 695static int clk_program_fllclks(struct gk20a *g, struct change_fll_clk *fllclk) 696{ 697 int status = -EINVAL; 698 struct clk_domain *pdomain; 699 u8 i; 700 struct clk_pmupstate *pclk = &g->clk_pmu; 701 u16 clkmhz = 0; 702 struct clk_domain_3x_master *p3xmaster; 703 struct clk_domain_3x_slave *p3xslave; 704 unsigned long slaveidxmask; 705 struct set_fll_clk setfllclk; 706 707 if (fllclk->api_clk_domain != CTRL_CLK_DOMAIN_GPCCLK) { 708 return -EINVAL; 709 } 710 if (fllclk->voltuv == 0) { 711 return -EINVAL; 712 } 713 if (fllclk->clkmhz == 0) { 714 return -EINVAL; 715 } 716 717 setfllclk.voltuv = fllclk->voltuv; 718 setfllclk.gpc2clkmhz = fllclk->clkmhz; 719 720 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), 721 struct clk_domain *, pdomain, i) { 722 723 if (pdomain->api_domain == fllclk->api_clk_domain) { 724 725 if (!pdomain->super.implements(g, &pdomain->super, 726 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) { 727 status = -EINVAL; 728 goto done; 729 } 730 p3xmaster = (struct clk_domain_3x_master *)pdomain; 731 slaveidxmask = p3xmaster->slave_idxs_mask; 732 for_each_set_bit(i, &slaveidxmask, 32) { 733 p3xslave = (struct clk_domain_3x_slave *) 734 CLK_CLK_DOMAIN_GET(pclk, i); 735 if ((p3xslave->super.super.super.api_domain != 736 CTRL_CLK_DOMAIN_XBARCLK) && 737 (p3xslave->super.super.super.api_domain != 738 CTRL_CLK_DOMAIN_SYSCLK)) { 739 continue; 740 } 741 clkmhz = 0; 742 status = p3xslave->clkdomainclkgetslaveclk(g, 743 pclk, 744 (struct clk_domain *)p3xslave, 745 &clkmhz, 746 fllclk->clkmhz); 747 if (status) { 748 status = -EINVAL; 749 goto done; 750 } 751 if (p3xslave->super.super.super.api_domain == 752 CTRL_CLK_DOMAIN_XBARCLK) { 753 setfllclk.xbar2clkmhz = clkmhz; 754 } 755 if (p3xslave->super.super.super.api_domain == 756 CTRL_CLK_DOMAIN_SYSCLK) { 757 setfllclk.sys2clkmhz = clkmhz; 758 } 759 } 760 } 761 } 762 /*set regime ids */ 763 status = get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, 764 &setfllclk.current_regime_id_gpc); 765 if (status) { 766 goto done; 767 } 768 769 setfllclk.target_regime_id_gpc = find_regime_id(g, 770 CTRL_CLK_DOMAIN_GPCCLK, setfllclk.gpc2clkmhz); 771 772 status = get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, 773 &setfllclk.current_regime_id_sys); 774 if (status) { 775 goto done; 776 } 777 778 setfllclk.target_regime_id_sys = find_regime_id(g, 779 CTRL_CLK_DOMAIN_SYSCLK, setfllclk.sys2clkmhz); 780 781 status = get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, 782 &setfllclk.current_regime_id_xbar); 783 if (status) { 784 goto done; 785 } 786 787 setfllclk.target_regime_id_xbar = find_regime_id(g, 788 CTRL_CLK_DOMAIN_XBARCLK, setfllclk.xbar2clkmhz); 789 790 status = clk_pmu_vf_inject(g, &setfllclk); 791 792 if (status) { 793 nvgpu_err(g, 794 "vf inject to change clk failed"); 795 } 796 797 /* save regime ids */ 798 status = set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, 799 setfllclk.target_regime_id_xbar); 800 if (status) { 801 goto done; 802 } 803 804 status = set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, 805 setfllclk.target_regime_id_gpc); 806 if (status) { 807 goto done; 808 } 809 810 status = set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, 811 setfllclk.target_regime_id_sys); 812 if (status) { 813 goto done; 814 } 815done: 816 return status; 817} 818 819u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g) 820{ 821 int status; 822 struct change_fll_clk bootfllclk; 823 u16 gpcclk_clkmhz = BOOT_GPCCLK_MHZ; 824 u32 gpcclk_voltuv = 0; 825 u32 voltuv = 0; 826 827 status = clk_vf_point_cache(g); 828 if (status) { 829 nvgpu_err(g,"caching failed"); 830 return status; 831 } 832 833 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK, 834 &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); 835 if (status) { 836 return status; 837 } 838 839 voltuv = gpcclk_voltuv; 840 841 status = volt_set_voltage(g, voltuv, 0); 842 if (status) { 843 nvgpu_err(g, 844 "attempt to set boot voltage failed %d", 845 voltuv); 846 } 847 848 bootfllclk.api_clk_domain = CTRL_CLK_DOMAIN_GPCCLK; 849 bootfllclk.clkmhz = gpcclk_clkmhz; 850 bootfllclk.voltuv = voltuv; 851 status = clk_program_fllclks(g, &bootfllclk); 852 if (status) { 853 nvgpu_err(g, "attempt to set boot gpcclk failed"); 854 } 855 856 status = clk_pmu_freq_effective_avg_load(g, true); 857 858 /* 859 * Read clocks after some delay with below method 860 * & extract clock data from buffer 861 * clk_freq_effective_avg(g, CTRL_CLK_DOMAIN_GPCCLK | 862 * CTRL_CLK_DOMAIN_XBARCLK | 863 * CTRL_CLK_DOMAIN_SYSCLK | 864 * CTRL_CLK_DOMAIN_NVDCLK) 865 * */ 866 867 return status; 868} 869 870int nvgpu_clk_set_fll_clk_gv10x(struct gk20a *g) 871{ 872 int status; 873 struct change_fll_clk bootfllclk; 874 u16 gpcclk_clkmhz = BOOT_GPCCLK_MHZ; 875 u32 gpcclk_voltuv = 0U; 876 u32 voltuv = 0U; 877 878 status = clk_vf_point_cache(g); 879 if (status != 0) { 880 nvgpu_err(g, "caching failed"); 881 return status; 882 } 883 884 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK, 885 &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); 886 if (status != 0) { 887 return status; 888 } 889 890 voltuv = gpcclk_voltuv; 891 892 status = volt_set_voltage(g, voltuv, 0U); 893 if (status != 0) { 894 nvgpu_err(g, "attempt to set max voltage failed %d", voltuv); 895 } 896 897 bootfllclk.api_clk_domain = CTRL_CLK_DOMAIN_GPCCLK; 898 bootfllclk.clkmhz = gpcclk_clkmhz; 899 bootfllclk.voltuv = voltuv; 900 status = clk_program_fllclks(g, &bootfllclk); 901 if (status != 0) { 902 nvgpu_err(g, "attempt to set max gpcclk failed"); 903 } 904 return status; 905} 906 907u32 clk_domain_get_f_or_v( 908 struct gk20a *g, 909 u32 clkapidomain, 910 u16 *pclkmhz, 911 u32 *pvoltuv, 912 u8 railidx 913) 914{ 915 u32 status = -EINVAL; 916 struct clk_domain *pdomain; 917 u8 i; 918 struct clk_pmupstate *pclk = &g->clk_pmu; 919 u8 rail; 920 921 if ((pclkmhz == NULL) || (pvoltuv == NULL)) { 922 return -EINVAL; 923 } 924 925 if (railidx == CTRL_VOLT_DOMAIN_LOGIC) { 926 rail = CLK_PROG_VFE_ENTRY_LOGIC; 927 } else if (railidx == CTRL_VOLT_DOMAIN_SRAM) { 928 rail = CLK_PROG_VFE_ENTRY_SRAM; 929 } else { 930 return -EINVAL; 931 } 932 933 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), 934 struct clk_domain *, pdomain, i) { 935 if (pdomain->api_domain == clkapidomain) { 936 status = pdomain->clkdomainclkvfsearch(g, pclk, 937 pdomain, pclkmhz, pvoltuv, rail); 938 return status; 939 } 940 } 941 return status; 942}
diff --git a/include/clk/clk.h b/include/clk/clk.h
deleted file mode 100644
index 3f4bdf7..0000000
--- a/include/clk/clk.h
+++ /dev/null
@@ -1,144 +0,0 @@ 1/* 2 * general clock structures & definitions 3 * 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_CLK_H 25#define NVGPU_CLK_H 26 27#include "clk_vin.h" 28#include "clk_fll.h" 29#include "clk_domain.h" 30#include "clk_prog.h" 31#include "clk_vf_point.h" 32#include "clk_mclk.h" 33#include "clk_freq_controller.h" 34 35#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP 0x10 36#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK 0x1F 37#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SHIFT 0 38#define BOOT_GPCCLK_MHZ 952 39 40struct gk20a; 41 42int clk_set_boot_fll_clk(struct gk20a *g); 43 44/* clock related defines for GPUs supporting clock control from pmu*/ 45struct clk_pmupstate { 46 struct avfsvinobjs avfs_vinobjs; 47 struct avfsfllobjs avfs_fllobjs; 48 struct clk_domains clk_domainobjs; 49 struct clk_progs clk_progobjs; 50 struct clk_vf_points clk_vf_pointobjs; 51 struct clk_mclk_state clk_mclk; 52 struct clk_freq_controllers clk_freq_controllers; 53}; 54 55struct clockentry { 56 u8 vbios_clk_domain; 57 u8 clk_which; 58 u8 perf_index; 59 u32 api_clk_domain; 60}; 61 62struct change_fll_clk { 63 u32 api_clk_domain; 64 u16 clkmhz; 65 u32 voltuv; 66}; 67 68struct set_fll_clk { 69 u32 voltuv; 70 u16 gpc2clkmhz; 71 u32 current_regime_id_gpc; 72 u32 target_regime_id_gpc; 73 u16 sys2clkmhz; 74 u32 current_regime_id_sys; 75 u32 target_regime_id_sys; 76 u16 xbar2clkmhz; 77 u32 current_regime_id_xbar; 78 u32 target_regime_id_xbar; 79}; 80 81#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_MAX_NUMCLKS 9 82 83struct vbios_clock_domain { 84 u8 clock_type; 85 u8 num_domains; 86 struct clockentry clock_entry[NV_PERF_HEADER_4X_CLOCKS_DOMAINS_MAX_NUMCLKS]; 87}; 88 89struct vbios_clocks_table_1x_hal_clock_entry { 90 enum nv_pmu_clk_clkwhich domain; 91 bool b_noise_aware_capable; 92 u8 clk_vf_curve_count; 93}; 94 95#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_GPC2CLK 0 96#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_XBAR2CLK 1 97#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_DRAMCLK 2 98#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_SYS2CLK 3 99#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_HUB2CLK 4 100#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_MSDCLK 5 101#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_PWRCLK 6 102#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_DISPCLK 7 103#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_NUMCLKS 8 104 105#define PERF_CLK_MCLK 0 106#define PERF_CLK_DISPCLK 1 107#define PERF_CLK_GPC2CLK 2 108#define PERF_CLK_HOSTCLK 3 109#define PERF_CLK_LTC2CLK 4 110#define PERF_CLK_SYS2CLK 5 111#define PERF_CLK_HUB2CLK 6 112#define PERF_CLK_LEGCLK 7 113#define PERF_CLK_MSDCLK 8 114#define PERF_CLK_XCLK 9 115#define PERF_CLK_PWRCLK 10 116#define PERF_CLK_XBAR2CLK 11 117#define PERF_CLK_PCIEGENCLK 12 118#define PERF_CLK_NUM 13 119 120#define BOOT_GPC2CLK_MHZ 2581 121 122u32 clk_pmu_vin_load(struct gk20a *g); 123u32 clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain); 124u32 clk_domain_get_f_or_v( 125 struct gk20a *g, 126 u32 clkapidomain, 127 u16 *pclkmhz, 128 u32 *pvoltuv, 129 u8 railidx 130); 131int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *fllclk); 132int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *fllclk); 133int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx); 134u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g, 135 struct nv_pmu_clk_rpc *rpccall, 136 struct set_fll_clk *setfllclk); 137u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g, 138 struct nv_pmu_clk_rpc *rpccall, 139 struct set_fll_clk *setfllclk); 140u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g); 141int nvgpu_clk_set_fll_clk_gv10x(struct gk20a *g); 142int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload); 143u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask); 144#endif /* NVGPU_CLK_H */
diff --git a/include/clk/clk_arb.c b/include/clk/clk_arb.c
deleted file mode 100644
index 8e9fb41..0000000
--- a/include/clk/clk_arb.c
+++ /dev/null
@@ -1,1087 +0,0 @@ 1/* 2 * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bitops.h> 24#include <nvgpu/lock.h> 25#include <nvgpu/kmem.h> 26#include <nvgpu/atomic.h> 27#include <nvgpu/bug.h> 28#include <nvgpu/kref.h> 29#include <nvgpu/log.h> 30#include <nvgpu/barrier.h> 31#include <nvgpu/cond.h> 32#include <nvgpu/list.h> 33#include <nvgpu/clk_arb.h> 34#include <nvgpu/timers.h> 35#include <nvgpu/gk20a.h> 36 37#include "clk/clk.h" 38#include "pstate/pstate.h" 39#include "lpwr/lpwr.h" 40#include "volt/volt.h" 41 42int nvgpu_clk_notification_queue_alloc(struct gk20a *g, 43 struct nvgpu_clk_notification_queue *queue, 44 size_t events_number) { 45 queue->notifications = nvgpu_kcalloc(g, events_number, 46 sizeof(struct nvgpu_clk_notification)); 47 if (!queue->notifications) 48 return -ENOMEM; 49 queue->size = events_number; 50 51 nvgpu_atomic_set(&queue->head, 0); 52 nvgpu_atomic_set(&queue->tail, 0); 53 54 return 0; 55} 56 57void nvgpu_clk_notification_queue_free(struct gk20a *g, 58 struct nvgpu_clk_notification_queue *queue) { 59 if (queue->size > 0) { 60 nvgpu_kfree(g, queue->notifications); 61 queue->size = 0; 62 nvgpu_atomic_set(&queue->head, 0); 63 nvgpu_atomic_set(&queue->tail, 0); 64 } 65} 66 67static void nvgpu_clk_arb_queue_notification(struct gk20a *g, 68 struct nvgpu_clk_notification_queue *queue, 69 u32 alarm_mask) { 70 71 u32 queue_index; 72 u64 timestamp; 73 74 queue_index = (nvgpu_atomic_inc_return(&queue->tail)) % queue->size; 75 /* get current timestamp */ 76 timestamp = (u64) nvgpu_hr_timestamp(); 77 78 queue->notifications[queue_index].timestamp = timestamp; 79 queue->notifications[queue_index].notification = alarm_mask; 80 81} 82 83void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm) 84{ 85 struct nvgpu_clk_arb *arb = g->clk_arb; 86 87 u64 current_mask; 88 u32 refcnt; 89 u32 alarm_mask; 90 u64 new_mask; 91 92 do { 93 current_mask = nvgpu_atomic64_read(&arb->alarm_mask); 94 /* atomic operations are strong so they do not need masks */ 95 96 refcnt = ((u32) (current_mask >> 32)) + 1; 97 alarm_mask = (u32) (current_mask & ~0) | alarm; 98 new_mask = ((u64) refcnt << 32) | alarm_mask; 99 100 } while (unlikely(current_mask != 101 (u64)nvgpu_atomic64_cmpxchg(&arb->alarm_mask, 102 current_mask, new_mask))); 103 104 nvgpu_clk_arb_queue_notification(g, &arb->notification_queue, alarm); 105} 106 107 108int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb) 109{ 110 struct gk20a *g = arb->g; 111 struct nvgpu_clk_vf_table *table; 112 113 u32 i, j; 114 int status = -EINVAL; 115 u32 gpc2clk_voltuv = 0, mclk_voltuv = 0; 116 u32 gpc2clk_voltuv_sram = 0, mclk_voltuv_sram = 0; 117 u16 clk_cur; 118 u32 num_points; 119 120 struct clk_set_info *p5_info, *p0_info; 121 122 table = NV_ACCESS_ONCE(arb->current_vf_table); 123 /* make flag visible when all data has resolved in the tables */ 124 nvgpu_smp_rmb(); 125 126 table = (table == &arb->vf_table_pool[0]) ? &arb->vf_table_pool[1] : 127 &arb->vf_table_pool[0]; 128 129 /* Get allowed memory ranges */ 130 if (g->ops.clk_arb.get_arbiter_clk_range(g, CTRL_CLK_DOMAIN_GPC2CLK, 131 &arb->gpc2clk_min, 132 &arb->gpc2clk_max) < 0) { 133 nvgpu_err(g, "failed to fetch GPC2CLK range"); 134 goto exit_vf_table; 135 } 136 if (g->ops.clk_arb.get_arbiter_clk_range(g, CTRL_CLK_DOMAIN_MCLK, 137 &arb->mclk_min, 138 &arb->mclk_max) < 0) { 139 nvgpu_err(g, "failed to fetch MCLK range"); 140 goto exit_vf_table; 141 } 142 143 table->gpc2clk_num_points = MAX_F_POINTS; 144 table->mclk_num_points = MAX_F_POINTS; 145 146 if (g->ops.clk.clk_domain_get_f_points(arb->g, CTRL_CLK_DOMAIN_GPC2CLK, 147 &table->gpc2clk_num_points, arb->gpc2clk_f_points)) { 148 nvgpu_err(g, "failed to fetch GPC2CLK frequency points"); 149 goto exit_vf_table; 150 } 151 152 if (g->ops.clk.clk_domain_get_f_points(arb->g, CTRL_CLK_DOMAIN_MCLK, 153 &table->mclk_num_points, arb->mclk_f_points)) { 154 nvgpu_err(g, "failed to fetch MCLK frequency points"); 155 goto exit_vf_table; 156 } 157 if (!table->mclk_num_points || !table->gpc2clk_num_points) { 158 nvgpu_err(g, "empty queries to f points mclk %d gpc2clk %d", 159 table->mclk_num_points, table->gpc2clk_num_points); 160 status = -EINVAL; 161 goto exit_vf_table; 162 } 163 164 memset(table->mclk_points, 0, 165 table->mclk_num_points*sizeof(struct nvgpu_clk_vf_point)); 166 memset(table->gpc2clk_points, 0, 167 table->gpc2clk_num_points*sizeof(struct nvgpu_clk_vf_point)); 168 169 p5_info = pstate_get_clk_set_info(g, 170 CTRL_PERF_PSTATE_P5, clkwhich_mclk); 171 if (!p5_info) { 172 nvgpu_err(g, "failed to get MCLK P5 info"); 173 goto exit_vf_table; 174 } 175 p0_info = pstate_get_clk_set_info(g, 176 CTRL_PERF_PSTATE_P0, clkwhich_mclk); 177 if (!p0_info) { 178 nvgpu_err(g, "failed to get MCLK P0 info"); 179 goto exit_vf_table; 180 } 181 182 for (i = 0, j = 0, num_points = 0, clk_cur = 0; 183 i < table->mclk_num_points; i++) { 184 185 if ((arb->mclk_f_points[i] >= arb->mclk_min) && 186 (arb->mclk_f_points[i] <= arb->mclk_max) && 187 (arb->mclk_f_points[i] != clk_cur)) { 188 189 table->mclk_points[j].mem_mhz = arb->mclk_f_points[i]; 190 mclk_voltuv = mclk_voltuv_sram = 0; 191 192 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_MCLK, 193 &table->mclk_points[j].mem_mhz, &mclk_voltuv, 194 CTRL_VOLT_DOMAIN_LOGIC); 195 if (status < 0) { 196 nvgpu_err(g, 197 "failed to get MCLK LOGIC voltage"); 198 goto exit_vf_table; 199 } 200 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_MCLK, 201 &table->mclk_points[j].mem_mhz, 202 &mclk_voltuv_sram, 203 CTRL_VOLT_DOMAIN_SRAM); 204 if (status < 0) { 205 nvgpu_err(g, "failed to get MCLK SRAM voltage"); 206 goto exit_vf_table; 207 } 208 209 table->mclk_points[j].uvolt = mclk_voltuv; 210 table->mclk_points[j].uvolt_sram = mclk_voltuv_sram; 211 clk_cur = table->mclk_points[j].mem_mhz; 212 213 if ((clk_cur >= p5_info->min_mhz) && 214 (clk_cur <= p5_info->max_mhz)) 215 VF_POINT_SET_PSTATE_SUPPORTED( 216 &table->mclk_points[j], 217 CTRL_PERF_PSTATE_P5); 218 if ((clk_cur >= p0_info->min_mhz) && 219 (clk_cur <= p0_info->max_mhz)) 220 VF_POINT_SET_PSTATE_SUPPORTED( 221 &table->mclk_points[j], 222 CTRL_PERF_PSTATE_P0); 223 224 j++; 225 num_points++; 226 227 } 228 } 229 table->mclk_num_points = num_points; 230 231 p5_info = pstate_get_clk_set_info(g, 232 CTRL_PERF_PSTATE_P5, clkwhich_gpc2clk); 233 if (!p5_info) { 234 status = -EINVAL; 235 nvgpu_err(g, "failed to get GPC2CLK P5 info"); 236 goto exit_vf_table; 237 } 238 239 p0_info = pstate_get_clk_set_info(g, 240 CTRL_PERF_PSTATE_P0, clkwhich_gpc2clk); 241 if (!p0_info) { 242 status = -EINVAL; 243 nvgpu_err(g, "failed to get GPC2CLK P0 info"); 244 goto exit_vf_table; 245 } 246 247 /* GPC2CLK needs to be checked in two passes. The first determines the 248 * relationships between GPC2CLK, SYS2CLK and XBAR2CLK, while the 249 * second verifies that the clocks minimum is satisfied and sets 250 * the voltages 251 */ 252 for (i = 0, j = 0, num_points = 0, clk_cur = 0; 253 i < table->gpc2clk_num_points; i++) { 254 struct set_fll_clk setfllclk; 255 256 if ((arb->gpc2clk_f_points[i] >= arb->gpc2clk_min) && 257 (arb->gpc2clk_f_points[i] <= arb->gpc2clk_max) && 258 (arb->gpc2clk_f_points[i] != clk_cur)) { 259 260 table->gpc2clk_points[j].gpc_mhz = 261 arb->gpc2clk_f_points[i]; 262 setfllclk.gpc2clkmhz = arb->gpc2clk_f_points[i]; 263 status = clk_get_fll_clks(g, &setfllclk); 264 if (status < 0) { 265 nvgpu_err(g, 266 "failed to get GPC2CLK slave clocks"); 267 goto exit_vf_table; 268 } 269 270 table->gpc2clk_points[j].sys_mhz = 271 setfllclk.sys2clkmhz; 272 table->gpc2clk_points[j].xbar_mhz = 273 setfllclk.xbar2clkmhz; 274 275 clk_cur = table->gpc2clk_points[j].gpc_mhz; 276 277 if ((clk_cur >= p5_info->min_mhz) && 278 (clk_cur <= p5_info->max_mhz)) 279 VF_POINT_SET_PSTATE_SUPPORTED( 280 &table->gpc2clk_points[j], 281 CTRL_PERF_PSTATE_P5); 282 if ((clk_cur >= p0_info->min_mhz) && 283 (clk_cur <= p0_info->max_mhz)) 284 VF_POINT_SET_PSTATE_SUPPORTED( 285 &table->gpc2clk_points[j], 286 CTRL_PERF_PSTATE_P0); 287 288 j++; 289 num_points++; 290 } 291 } 292 table->gpc2clk_num_points = num_points; 293 294 /* Second pass */ 295 for (i = 0, j = 0; i < table->gpc2clk_num_points; i++) { 296 297 u16 alt_gpc2clk = table->gpc2clk_points[i].gpc_mhz; 298 299 gpc2clk_voltuv = gpc2clk_voltuv_sram = 0; 300 301 /* Check sysclk */ 302 p5_info = pstate_get_clk_set_info(g, 303 VF_POINT_GET_PSTATE(&table->gpc2clk_points[i]), 304 clkwhich_sys2clk); 305 if (!p5_info) { 306 status = -EINVAL; 307 nvgpu_err(g, "failed to get SYS2CLK P5 info"); 308 goto exit_vf_table; 309 } 310 311 /* sys2clk below clk min, need to find correct clock */ 312 if (table->gpc2clk_points[i].sys_mhz < p5_info->min_mhz) { 313 for (j = i + 1; j < table->gpc2clk_num_points; j++) { 314 315 if (table->gpc2clk_points[j].sys_mhz >= 316 p5_info->min_mhz) { 317 318 319 table->gpc2clk_points[i].sys_mhz = 320 p5_info->min_mhz; 321 322 alt_gpc2clk = alt_gpc2clk < 323 table->gpc2clk_points[j]. 324 gpc_mhz ? 325 table->gpc2clk_points[j]. 326 gpc_mhz : 327 alt_gpc2clk; 328 break; 329 } 330 } 331 /* no VF exists that satisfies condition */ 332 if (j == table->gpc2clk_num_points) { 333 nvgpu_err(g, "NO SYS2CLK VF point possible"); 334 status = -EINVAL; 335 goto exit_vf_table; 336 } 337 } 338 339 /* Check xbarclk */ 340 p5_info = pstate_get_clk_set_info(g, 341 VF_POINT_GET_PSTATE(&table->gpc2clk_points[i]), 342 clkwhich_xbar2clk); 343 if (!p5_info) { 344 status = -EINVAL; 345 nvgpu_err(g, "failed to get SYS2CLK P5 info"); 346 goto exit_vf_table; 347 } 348 349 /* xbar2clk below clk min, need to find correct clock */ 350 if (table->gpc2clk_points[i].xbar_mhz < p5_info->min_mhz) { 351 for (j = i; j < table->gpc2clk_num_points; j++) { 352 if (table->gpc2clk_points[j].xbar_mhz >= 353 p5_info->min_mhz) { 354 355 table->gpc2clk_points[i].xbar_mhz = 356 p5_info->min_mhz; 357 358 alt_gpc2clk = alt_gpc2clk < 359 table->gpc2clk_points[j]. 360 gpc_mhz ? 361 table->gpc2clk_points[j]. 362 gpc_mhz : 363 alt_gpc2clk; 364 break; 365 } 366 } 367 /* no VF exists that satisfies condition */ 368 if (j == table->gpc2clk_num_points) { 369 status = -EINVAL; 370 nvgpu_err(g, "NO XBAR2CLK VF point possible"); 371 372 goto exit_vf_table; 373 } 374 } 375 376 /* Calculate voltages */ 377 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPC2CLK, 378 &alt_gpc2clk, &gpc2clk_voltuv, 379 CTRL_VOLT_DOMAIN_LOGIC); 380 if (status < 0) { 381 nvgpu_err(g, "failed to get GPC2CLK LOGIC voltage"); 382 goto exit_vf_table; 383 } 384 385 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPC2CLK, 386 &alt_gpc2clk, 387 &gpc2clk_voltuv_sram, 388 CTRL_VOLT_DOMAIN_SRAM); 389 if (status < 0) { 390 nvgpu_err(g, "failed to get GPC2CLK SRAM voltage"); 391 goto exit_vf_table; 392 } 393 394 table->gpc2clk_points[i].uvolt = gpc2clk_voltuv; 395 table->gpc2clk_points[i].uvolt_sram = gpc2clk_voltuv_sram; 396 } 397 398 /* make table visible when all data has resolved in the tables */ 399 nvgpu_smp_wmb(); 400 arb->current_vf_table = table; 401 402exit_vf_table: 403 404 if (status < 0) 405 nvgpu_clk_arb_set_global_alarm(g, 406 EVENT(ALARM_VF_TABLE_UPDATE_FAILED)); 407 nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item); 408 409 return status; 410} 411 412 413static void nvgpu_clk_arb_run_vf_table_cb(struct nvgpu_clk_arb *arb) 414{ 415 struct gk20a *g = arb->g; 416 u32 err; 417 418 /* get latest vf curve from pmu */ 419 err = clk_vf_point_cache(g); 420 if (err) { 421 nvgpu_err(g, "failed to cache VF table"); 422 nvgpu_clk_arb_set_global_alarm(g, 423 EVENT(ALARM_VF_TABLE_UPDATE_FAILED)); 424 nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item); 425 426 return; 427 } 428 nvgpu_clk_arb_update_vf_table(arb); 429} 430 431u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev, 432 struct nvgpu_clk_arb_target *target, 433 u32 alarm) { 434 435 struct nvgpu_clk_session *session = dev->session; 436 struct nvgpu_clk_arb *arb = session->g->clk_arb; 437 struct nvgpu_clk_notification *notification; 438 439 u32 queue_alarm_mask = 0; 440 u32 enabled_mask = 0; 441 u32 new_alarms_reported = 0; 442 u32 poll_mask = 0; 443 u32 tail, head; 444 u32 queue_index; 445 size_t size; 446 int index; 447 448 enabled_mask = nvgpu_atomic_read(&dev->enabled_mask); 449 size = arb->notification_queue.size; 450 451 /* queue global arbiter notifications in buffer */ 452 do { 453 tail = nvgpu_atomic_read(&arb->notification_queue.tail); 454 /* copy items to the queue */ 455 queue_index = nvgpu_atomic_read(&dev->queue.tail); 456 head = dev->arb_queue_head; 457 head = (tail - head) < arb->notification_queue.size ? 458 head : tail - arb->notification_queue.size; 459 460 for (index = head; _WRAPGTEQ(tail, index); index++) { 461 u32 alarm_detected; 462 463 notification = &arb->notification_queue. 464 notifications[(index+1) % size]; 465 alarm_detected = 466 NV_ACCESS_ONCE(notification->notification); 467 468 if (!(enabled_mask & alarm_detected)) 469 continue; 470 471 queue_index++; 472 dev->queue.notifications[ 473 queue_index % dev->queue.size].timestamp = 474 NV_ACCESS_ONCE(notification->timestamp); 475 476 dev->queue.notifications[ 477 queue_index % dev->queue.size].notification = 478 alarm_detected; 479 480 queue_alarm_mask |= alarm_detected; 481 } 482 } while (unlikely(nvgpu_atomic_read(&arb->notification_queue.tail) != 483 (int)tail)); 484 485 nvgpu_atomic_set(&dev->queue.tail, queue_index); 486 /* update the last notification we processed from global queue */ 487 488 dev->arb_queue_head = tail; 489 490 /* Check if current session targets are met */ 491 if (enabled_mask & EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE)) { 492 if ((target->gpc2clk < session->target->gpc2clk) 493 || (target->mclk < session->target->mclk)) { 494 495 poll_mask |= (NVGPU_POLLIN | NVGPU_POLLPRI); 496 nvgpu_clk_arb_queue_notification(arb->g, &dev->queue, 497 EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE)); 498 } 499 } 500 501 /* Check if there is a new VF update */ 502 if (queue_alarm_mask & EVENT(VF_UPDATE)) 503 poll_mask |= (NVGPU_POLLIN | NVGPU_POLLRDNORM); 504 505 /* Notify sticky alarms that were not reported on previous run*/ 506 new_alarms_reported = (queue_alarm_mask | 507 (alarm & ~dev->alarms_reported & queue_alarm_mask)); 508 509 if (new_alarms_reported & ~LOCAL_ALARM_MASK) { 510 /* check that we are not re-reporting */ 511 if (new_alarms_reported & EVENT(ALARM_GPU_LOST)) 512 poll_mask |= NVGPU_POLLHUP; 513 514 poll_mask |= (NVGPU_POLLIN | NVGPU_POLLPRI); 515 /* On next run do not report global alarms that were already 516 * reported, but report SHUTDOWN always 517 */ 518 dev->alarms_reported = new_alarms_reported & ~LOCAL_ALARM_MASK & 519 ~EVENT(ALARM_GPU_LOST); 520 } 521 522 if (poll_mask) { 523 nvgpu_atomic_set(&dev->poll_mask, poll_mask); 524 nvgpu_clk_arb_event_post_event(dev); 525 } 526 527 return new_alarms_reported; 528} 529 530void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm) 531{ 532 struct nvgpu_clk_arb *arb = g->clk_arb; 533 534 u64 current_mask; 535 u32 refcnt; 536 u32 alarm_mask; 537 u64 new_mask; 538 539 do { 540 current_mask = nvgpu_atomic64_read(&arb->alarm_mask); 541 /* atomic operations are strong so they do not need masks */ 542 543 refcnt = ((u32) (current_mask >> 32)) + 1; 544 alarm_mask = (u32) (current_mask & ~alarm); 545 new_mask = ((u64) refcnt << 32) | alarm_mask; 546 547 } while (unlikely(current_mask != 548 (u64)nvgpu_atomic64_cmpxchg(&arb->alarm_mask, 549 current_mask, new_mask))); 550} 551 552/* 553 * Process one scheduled work item. 554 */ 555static void nvgpu_clk_arb_worker_process_item( 556 struct nvgpu_clk_arb_work_item *work_item) 557{ 558 struct gk20a *g = work_item->arb->g; 559 560 clk_arb_dbg(g, " "); 561 562 if (work_item->item_type == CLK_ARB_WORK_UPDATE_VF_TABLE) 563 nvgpu_clk_arb_run_vf_table_cb(work_item->arb); 564 else if (work_item->item_type == CLK_ARB_WORK_UPDATE_ARB) 565 g->ops.clk_arb.clk_arb_run_arbiter_cb(work_item->arb); 566} 567 568/** 569 * Tell the worker that one more work needs to be done. 570 * 571 * Increase the work counter to synchronize the worker with the new work. Wake 572 * up the worker. If the worker was already running, it will handle this work 573 * before going to sleep. 574 */ 575static int nvgpu_clk_arb_worker_wakeup(struct gk20a *g) 576{ 577 int put; 578 579 clk_arb_dbg(g, " "); 580 581 put = nvgpu_atomic_inc_return(&g->clk_arb_worker.put); 582 nvgpu_cond_signal_interruptible(&g->clk_arb_worker.wq); 583 584 return put; 585} 586 587/** 588 * Test if there is some work pending. 589 * 590 * This is a pair for nvgpu_clk_arb_worker_wakeup to be called from the 591 * worker. The worker has an internal work counter which is incremented once 592 * per finished work item. This is compared with the number of queued jobs. 593 */ 594static bool nvgpu_clk_arb_worker_pending(struct gk20a *g, int get) 595{ 596 bool pending = nvgpu_atomic_read(&g->clk_arb_worker.put) != get; 597 598 /* We don't need barriers because they are implicit in locking */ 599 return pending; 600} 601 602/** 603 * Process the queued works for the worker thread serially. 604 * 605 * Flush all the work items in the queue one by one. This may block timeout 606 * handling for a short while, as these are serialized. 607 */ 608static void nvgpu_clk_arb_worker_process(struct gk20a *g, int *get) 609{ 610 611 while (nvgpu_clk_arb_worker_pending(g, *get)) { 612 struct nvgpu_clk_arb_work_item *work_item = NULL; 613 614 nvgpu_spinlock_acquire(&g->clk_arb_worker.items_lock); 615 if (!nvgpu_list_empty(&g->clk_arb_worker.items)) { 616 work_item = nvgpu_list_first_entry(&g->clk_arb_worker.items, 617 nvgpu_clk_arb_work_item, worker_item); 618 nvgpu_list_del(&work_item->worker_item); 619 } 620 nvgpu_spinlock_release(&g->clk_arb_worker.items_lock); 621 622 if (!work_item) { 623 /* 624 * Woke up for some other reason, but there are no 625 * other reasons than a work item added in the items list 626 * currently, so warn and ack the message. 627 */ 628 nvgpu_warn(g, "Spurious worker event!"); 629 ++*get; 630 break; 631 } 632 633 nvgpu_clk_arb_worker_process_item(work_item); 634 ++*get; 635 } 636} 637 638/* 639 * Process all work items found in the clk arbiter work queue. 640 */ 641static int nvgpu_clk_arb_poll_worker(void *arg) 642{ 643 struct gk20a *g = (struct gk20a *)arg; 644 struct gk20a_worker *worker = &g->clk_arb_worker; 645 int get = 0; 646 647 clk_arb_dbg(g, " "); 648 649 while (!nvgpu_thread_should_stop(&worker->poll_task)) { 650 int ret; 651 652 ret = NVGPU_COND_WAIT_INTERRUPTIBLE( 653 &worker->wq, 654 nvgpu_clk_arb_worker_pending(g, get), 0); 655 656 if (nvgpu_thread_should_stop(&worker->poll_task)) { 657 break; 658 } 659 660 if (ret == 0) 661 nvgpu_clk_arb_worker_process(g, &get); 662 } 663 return 0; 664} 665 666static int __nvgpu_clk_arb_worker_start(struct gk20a *g) 667{ 668 char thread_name[64]; 669 int err = 0; 670 671 if (nvgpu_thread_is_running(&g->clk_arb_worker.poll_task)) 672 return err; 673 674 nvgpu_mutex_acquire(&g->clk_arb_worker.start_lock); 675 676 /* 677 * Mutexes have implicit barriers, so there is no risk of a thread 678 * having a stale copy of the poll_task variable as the call to 679 * thread_is_running is volatile 680 */ 681 682 if (nvgpu_thread_is_running(&g->clk_arb_worker.poll_task)) { 683 nvgpu_mutex_release(&g->clk_arb_worker.start_lock); 684 return err; 685 } 686 687 snprintf(thread_name, sizeof(thread_name), 688 "nvgpu_clk_arb_poll_%s", g->name); 689 690 err = nvgpu_thread_create(&g->clk_arb_worker.poll_task, g, 691 nvgpu_clk_arb_poll_worker, thread_name); 692 693 nvgpu_mutex_release(&g->clk_arb_worker.start_lock); 694 return err; 695} 696 697/** 698 * Append a work item to the worker's list. 699 * 700 * This adds work item to the end of the list and wakes the worker 701 * up immediately. If the work item already existed in the list, it's not added, 702 * because in that case it has been scheduled already but has not yet been 703 * processed. 704 */ 705void nvgpu_clk_arb_worker_enqueue(struct gk20a *g, 706 struct nvgpu_clk_arb_work_item *work_item) 707{ 708 clk_arb_dbg(g, " "); 709 710 /* 711 * Warn if worker thread cannot run 712 */ 713 if (WARN_ON(__nvgpu_clk_arb_worker_start(g))) { 714 nvgpu_warn(g, "clk arb worker cannot run!"); 715 return; 716 } 717 718 nvgpu_spinlock_acquire(&g->clk_arb_worker.items_lock); 719 if (!nvgpu_list_empty(&work_item->worker_item)) { 720 /* 721 * Already queued, so will get processed eventually. 722 * The worker is probably awake already. 723 */ 724 nvgpu_spinlock_release(&g->clk_arb_worker.items_lock); 725 return; 726 } 727 nvgpu_list_add_tail(&work_item->worker_item, &g->clk_arb_worker.items); 728 nvgpu_spinlock_release(&g->clk_arb_worker.items_lock); 729 730 nvgpu_clk_arb_worker_wakeup(g); 731} 732 733/** 734 * Initialize the clk arb worker's metadata and start the background thread. 735 */ 736int nvgpu_clk_arb_worker_init(struct gk20a *g) 737{ 738 int err; 739 740 nvgpu_atomic_set(&g->clk_arb_worker.put, 0); 741 nvgpu_cond_init(&g->clk_arb_worker.wq); 742 nvgpu_init_list_node(&g->clk_arb_worker.items); 743 nvgpu_spinlock_init(&g->clk_arb_worker.items_lock); 744 err = nvgpu_mutex_init(&g->clk_arb_worker.start_lock); 745 if (err) 746 goto error_check; 747 748 err = __nvgpu_clk_arb_worker_start(g); 749error_check: 750 if (err) { 751 nvgpu_err(g, "failed to start clk arb poller thread"); 752 return err; 753 } 754 return 0; 755} 756 757int nvgpu_clk_arb_init_arbiter(struct gk20a *g) 758{ 759 int err = 0; 760 761 if (!g->ops.clk.support_clk_freq_controller || 762 !g->ops.clk_arb.get_arbiter_clk_domains) { 763 return 0; 764 } 765 766 nvgpu_mutex_acquire(&g->clk_arb_enable_lock); 767 768 err = g->ops.clk_arb.arbiter_clk_init(g); 769 770 nvgpu_mutex_release(&g->clk_arb_enable_lock); 771 772 return err; 773} 774 775bool nvgpu_clk_arb_has_active_req(struct gk20a *g) 776{ 777 return (nvgpu_atomic_read(&g->clk_arb_global_nr) > 0); 778} 779 780void nvgpu_clk_arb_send_thermal_alarm(struct gk20a *g) 781{ 782 nvgpu_clk_arb_schedule_alarm(g, 783 (0x1UL << NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD)); 784} 785 786void nvgpu_clk_arb_schedule_alarm(struct gk20a *g, u32 alarm) 787{ 788 struct nvgpu_clk_arb *arb = g->clk_arb; 789 790 nvgpu_clk_arb_set_global_alarm(g, alarm); 791 nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item); 792} 793 794static void nvgpu_clk_arb_worker_deinit(struct gk20a *g) 795{ 796 nvgpu_atomic_inc(&g->clk_arb_worker.put); 797 798 nvgpu_mutex_acquire(&g->clk_arb_worker.start_lock); 799 nvgpu_thread_stop(&g->clk_arb_worker.poll_task); 800 nvgpu_mutex_release(&g->clk_arb_worker.start_lock); 801} 802 803void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g) 804{ 805 struct nvgpu_clk_arb *arb = g->clk_arb; 806 807 nvgpu_mutex_acquire(&g->clk_arb_enable_lock); 808 809 if (arb) { 810 nvgpu_clk_arb_worker_deinit(g); 811 g->ops.clk_arb.clk_arb_cleanup(g->clk_arb); 812 } 813 814 nvgpu_mutex_release(&g->clk_arb_enable_lock); 815} 816 817int nvgpu_clk_arb_init_session(struct gk20a *g, 818 struct nvgpu_clk_session **_session) 819{ 820 struct nvgpu_clk_arb *arb = g->clk_arb; 821 struct nvgpu_clk_session *session = *(_session); 822 823 clk_arb_dbg(g, " "); 824 825 if (!g->ops.clk.support_clk_freq_controller || 826 !g->ops.clk_arb.get_arbiter_clk_domains) { 827 return 0; 828 } 829 830 session = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_session)); 831 if (!session) 832 return -ENOMEM; 833 session->g = g; 834 835 nvgpu_ref_init(&session->refcount); 836 837 session->zombie = false; 838 session->target_pool[0].pstate = CTRL_PERF_PSTATE_P8; 839 /* make sure that the initialization of the pool is visible 840 * before the update 841 */ 842 nvgpu_smp_wmb(); 843 session->target = &session->target_pool[0]; 844 845 nvgpu_init_list_node(&session->targets); 846 nvgpu_spinlock_init(&session->session_lock); 847 848 nvgpu_spinlock_acquire(&arb->sessions_lock); 849 nvgpu_list_add_tail(&session->link, &arb->sessions); 850 nvgpu_spinlock_release(&arb->sessions_lock); 851 852 *_session = session; 853 854 return 0; 855} 856 857void nvgpu_clk_arb_free_fd(struct nvgpu_ref *refcount) 858{ 859 struct nvgpu_clk_dev *dev = container_of(refcount, 860 struct nvgpu_clk_dev, refcount); 861 struct nvgpu_clk_session *session = dev->session; 862 struct gk20a *g = session->g; 863 864 nvgpu_clk_notification_queue_free(g, &dev->queue); 865 866 nvgpu_atomic_dec(&g->clk_arb_global_nr); 867 nvgpu_kfree(g, dev); 868} 869 870void nvgpu_clk_arb_free_session(struct nvgpu_ref *refcount) 871{ 872 struct nvgpu_clk_session *session = container_of(refcount, 873 struct nvgpu_clk_session, refcount); 874 struct nvgpu_clk_arb *arb = session->g->clk_arb; 875 struct gk20a *g = session->g; 876 struct nvgpu_clk_dev *dev, *tmp; 877 878 clk_arb_dbg(g, " "); 879 880 if (arb) { 881 nvgpu_spinlock_acquire(&arb->sessions_lock); 882 nvgpu_list_del(&session->link); 883 nvgpu_spinlock_release(&arb->sessions_lock); 884 } 885 886 nvgpu_spinlock_acquire(&session->session_lock); 887 nvgpu_list_for_each_entry_safe(dev, tmp, &session->targets, 888 nvgpu_clk_dev, node) { 889 nvgpu_list_del(&dev->node); 890 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); 891 } 892 nvgpu_spinlock_release(&session->session_lock); 893 894 nvgpu_kfree(g, session); 895} 896 897void nvgpu_clk_arb_release_session(struct gk20a *g, 898 struct nvgpu_clk_session *session) 899{ 900 struct nvgpu_clk_arb *arb = g->clk_arb; 901 902 clk_arb_dbg(g, " "); 903 904 session->zombie = true; 905 nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session); 906 if (arb) 907 nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item); 908} 909 910void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g) 911{ 912 struct nvgpu_clk_arb *arb = g->clk_arb; 913 914 nvgpu_clk_arb_worker_enqueue(g, &arb->update_vf_table_work_item); 915} 916 917/* This function is inherently unsafe to call while arbiter is running 918 * arbiter must be blocked before calling this function 919 */ 920int nvgpu_clk_arb_get_current_pstate(struct gk20a *g) 921{ 922 return NV_ACCESS_ONCE(g->clk_arb->actual->pstate); 923} 924 925void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock) 926{ 927 struct nvgpu_clk_arb *arb = g->clk_arb; 928 929 if (lock) 930 nvgpu_mutex_acquire(&arb->pstate_lock); 931 else 932 nvgpu_mutex_release(&arb->pstate_lock); 933} 934 935bool nvgpu_clk_arb_is_valid_domain(struct gk20a *g, u32 api_domain) 936{ 937 u32 clk_domains = g->ops.clk_arb.get_arbiter_clk_domains(g); 938 939 switch (api_domain) { 940 case NVGPU_CLK_DOMAIN_MCLK: 941 return (clk_domains & CTRL_CLK_DOMAIN_MCLK) != 0; 942 943 case NVGPU_CLK_DOMAIN_GPCCLK: 944 return (clk_domains & CTRL_CLK_DOMAIN_GPC2CLK) != 0; 945 946 default: 947 return false; 948 } 949} 950 951int nvgpu_clk_arb_get_arbiter_clk_range(struct gk20a *g, u32 api_domain, 952 u16 *min_mhz, u16 *max_mhz) 953{ 954 int ret; 955 956 switch (api_domain) { 957 case NVGPU_CLK_DOMAIN_MCLK: 958 ret = g->ops.clk_arb.get_arbiter_clk_range(g, 959 CTRL_CLK_DOMAIN_MCLK, min_mhz, max_mhz); 960 return ret; 961 962 case NVGPU_CLK_DOMAIN_GPCCLK: 963 ret = g->ops.clk_arb.get_arbiter_clk_range(g, 964 CTRL_CLK_DOMAIN_GPC2CLK, min_mhz, max_mhz); 965 if (!ret) { 966 *min_mhz /= 2; 967 *max_mhz /= 2; 968 } 969 return ret; 970 971 default: 972 return -EINVAL; 973 } 974} 975 976int nvgpu_clk_arb_get_arbiter_clk_f_points(struct gk20a *g, 977 u32 api_domain, u32 *max_points, u16 *fpoints) 978{ 979 int err; 980 u32 i; 981 982 switch (api_domain) { 983 case NVGPU_CLK_DOMAIN_GPCCLK: 984 err = g->ops.clk_arb.get_arbiter_f_points(g, 985 CTRL_CLK_DOMAIN_GPC2CLK, max_points, fpoints); 986 if (err || !fpoints) 987 return err; 988 for (i = 0; i < *max_points; i++) 989 fpoints[i] /= 2; 990 return 0; 991 case NVGPU_CLK_DOMAIN_MCLK: 992 return g->ops.clk_arb.get_arbiter_f_points(g, 993 CTRL_CLK_DOMAIN_MCLK, max_points, fpoints); 994 default: 995 return -EINVAL; 996 } 997} 998 999int nvgpu_clk_arb_get_session_target_mhz(struct nvgpu_clk_session *session, 1000 u32 api_domain, u16 *freq_mhz) 1001{ 1002 int err = 0; 1003 struct nvgpu_clk_arb_target *target = session->target; 1004 1005 if (!nvgpu_clk_arb_is_valid_domain(session->g, api_domain)) { 1006 return -EINVAL; 1007 } 1008 1009 switch (api_domain) { 1010 case NVGPU_CLK_DOMAIN_MCLK: 1011 *freq_mhz = target->mclk; 1012 break; 1013 1014 case NVGPU_CLK_DOMAIN_GPCCLK: 1015 *freq_mhz = target->gpc2clk / 2ULL; 1016 break; 1017 1018 default: 1019 *freq_mhz = 0; 1020 err = -EINVAL; 1021 } 1022 return err; 1023} 1024 1025int nvgpu_clk_arb_get_arbiter_actual_mhz(struct gk20a *g, 1026 u32 api_domain, u16 *freq_mhz) 1027{ 1028 struct nvgpu_clk_arb *arb = g->clk_arb; 1029 int err = 0; 1030 struct nvgpu_clk_arb_target *actual = arb->actual; 1031 1032 if (!nvgpu_clk_arb_is_valid_domain(g, api_domain)) { 1033 return -EINVAL; 1034 } 1035 1036 switch (api_domain) { 1037 case NVGPU_CLK_DOMAIN_MCLK: 1038 *freq_mhz = actual->mclk; 1039 break; 1040 1041 case NVGPU_CLK_DOMAIN_GPCCLK: 1042 *freq_mhz = actual->gpc2clk / 2ULL; 1043 break; 1044 1045 default: 1046 *freq_mhz = 0; 1047 err = -EINVAL; 1048 } 1049 return err; 1050} 1051 1052unsigned long nvgpu_clk_measure_freq(struct gk20a *g, u32 api_domain) 1053{ 1054 unsigned long freq = 0UL; 1055 1056 switch (api_domain) { 1057 case CTRL_CLK_DOMAIN_GPC2CLK: 1058 freq = g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_GPCCLK) * 2UL; 1059 break; 1060 default: 1061 break; 1062 } 1063 return freq; 1064} 1065 1066int nvgpu_clk_arb_get_arbiter_effective_mhz(struct gk20a *g, 1067 u32 api_domain, u16 *freq_mhz) 1068{ 1069 if (!nvgpu_clk_arb_is_valid_domain(g, api_domain)) { 1070 return -EINVAL; 1071 } 1072 1073 switch (api_domain) { 1074 case NVGPU_CLK_DOMAIN_MCLK: 1075 *freq_mhz = g->ops.clk.measure_freq(g, CTRL_CLK_DOMAIN_MCLK) / 1076 1000000ULL; 1077 return 0; 1078 1079 case NVGPU_CLK_DOMAIN_GPCCLK: 1080 *freq_mhz = g->ops.clk.measure_freq(g, 1081 CTRL_CLK_DOMAIN_GPC2CLK) / 2000000ULL; 1082 return 0; 1083 1084 default: 1085 return -EINVAL; 1086 } 1087}
diff --git a/include/clk/clk_domain.c b/include/clk/clk_domain.c
deleted file mode 100644
index 3b64f51..0000000
--- a/include/clk/clk_domain.c
+++ /dev/null
@@ -1,1666 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bios.h> 24#include <nvgpu/gk20a.h> 25 26#include "clk.h" 27#include "clk_fll.h" 28#include "clk_domain.h" 29#include "boardobj/boardobjgrp.h" 30#include "boardobj/boardobjgrp_e32.h" 31#include "ctrl/ctrlclk.h" 32#include "ctrl/ctrlvolt.h" 33 34static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs); 35 36static int devinit_get_clocks_table(struct gk20a *g, 37 struct clk_domains *pdomainobjs); 38 39static int clk_domain_pmudatainit_super(struct gk20a *g, struct boardobj 40 *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); 41 42static struct vbios_clocks_table_1x_hal_clock_entry 43 vbiosclktbl1xhalentry_gp[] = { 44 { clkwhich_gpc2clk, true, 1, }, 45 { clkwhich_xbar2clk, true, 1, }, 46 { clkwhich_mclk, false, 1, }, 47 { clkwhich_sys2clk, true, 1, }, 48 { clkwhich_hub2clk, false, 1, }, 49 { clkwhich_nvdclk, false, 1, }, 50 { clkwhich_pwrclk, false, 1, }, 51 { clkwhich_dispclk, false, 1, }, 52 { clkwhich_pciegenclk, false, 1, } 53}; 54/* 55 * Updated from RM devinit_clock.c 56 * GV100 is 0x03 and 57 * GP10x is 0x02 in clocks_hal. 58 */ 59static struct vbios_clocks_table_1x_hal_clock_entry 60 vbiosclktbl1xhalentry_gv[] = { 61 { clkwhich_gpcclk, true, 2, }, 62 { clkwhich_xbarclk, true, 1, }, 63 { clkwhich_mclk, false, 1, }, 64 { clkwhich_sysclk, true, 1, }, 65 { clkwhich_hubclk, false, 1, }, 66 { clkwhich_nvdclk, true, 1, }, 67 { clkwhich_pwrclk, false, 1, }, 68 { clkwhich_dispclk, false, 1, }, 69 { clkwhich_pciegenclk, false, 1, }, 70 { clkwhich_hostclk, true, 1, } 71}; 72 73static u32 clktranslatehalmumsettoapinumset(u32 clkhaldomains) 74{ 75 u32 clkapidomains = 0; 76 77 if (clkhaldomains & BIT(clkwhich_gpcclk)) { 78 clkapidomains |= CTRL_CLK_DOMAIN_GPCCLK; 79 } 80 if (clkhaldomains & BIT(clkwhich_xbarclk)) { 81 clkapidomains |= CTRL_CLK_DOMAIN_XBARCLK; 82 } 83 if (clkhaldomains & BIT(clkwhich_sysclk)) { 84 clkapidomains |= CTRL_CLK_DOMAIN_SYSCLK; 85 } 86 if (clkhaldomains & BIT(clkwhich_hubclk)) { 87 clkapidomains |= CTRL_CLK_DOMAIN_HUBCLK; 88 } 89 if (clkhaldomains & BIT(clkwhich_hostclk)) { 90 clkapidomains |= CTRL_CLK_DOMAIN_HOSTCLK; 91 } 92 if (clkhaldomains & BIT(clkwhich_gpc2clk)) { 93 clkapidomains |= CTRL_CLK_DOMAIN_GPC2CLK; 94 } 95 if (clkhaldomains & BIT(clkwhich_xbar2clk)) { 96 clkapidomains |= CTRL_CLK_DOMAIN_XBAR2CLK; 97 } 98 if (clkhaldomains & BIT(clkwhich_sys2clk)) { 99 clkapidomains |= CTRL_CLK_DOMAIN_SYS2CLK; 100 } 101 if (clkhaldomains & BIT(clkwhich_hub2clk)) { 102 clkapidomains |= CTRL_CLK_DOMAIN_HUB2CLK; 103 } 104 if (clkhaldomains & BIT(clkwhich_pwrclk)) { 105 clkapidomains |= CTRL_CLK_DOMAIN_PWRCLK; 106 } 107 if (clkhaldomains & BIT(clkwhich_pciegenclk)) { 108 clkapidomains |= CTRL_CLK_DOMAIN_PCIEGENCLK; 109 } 110 if (clkhaldomains & BIT(clkwhich_mclk)) { 111 clkapidomains |= CTRL_CLK_DOMAIN_MCLK; 112 } 113 if (clkhaldomains & BIT(clkwhich_nvdclk)) { 114 clkapidomains |= CTRL_CLK_DOMAIN_NVDCLK; 115 } 116 if (clkhaldomains & BIT(clkwhich_dispclk)) { 117 clkapidomains |= CTRL_CLK_DOMAIN_DISPCLK; 118 } 119 120 return clkapidomains; 121} 122 123static int _clk_domains_pmudatainit_3x(struct gk20a *g, 124 struct boardobjgrp *pboardobjgrp, 125 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 126{ 127 struct nv_pmu_clk_clk_domain_boardobjgrp_set_header *pset = 128 (struct nv_pmu_clk_clk_domain_boardobjgrp_set_header *) 129 pboardobjgrppmu; 130 struct clk_domains *pdomains = (struct clk_domains *)pboardobjgrp; 131 int status = 0; 132 133 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 134 if (status) { 135 nvgpu_err(g, 136 "error updating pmu boardobjgrp for clk domain 0x%x", 137 status); 138 goto done; 139 } 140 141 pset->vbios_domains = pdomains->vbios_domains; 142 pset->cntr_sampling_periodms = pdomains->cntr_sampling_periodms; 143 pset->version = CLK_DOMAIN_BOARDOBJGRP_VERSION; 144 pset->b_override_o_v_o_c = false; 145 pset->b_debug_mode = false; 146 pset->b_enforce_vf_monotonicity = pdomains->b_enforce_vf_monotonicity; 147 pset->b_enforce_vf_smoothening = pdomains->b_enforce_vf_smoothening; 148 if (g->ops.clk.split_rail_support) { 149 pset->volt_rails_max = 2; 150 } else { 151 pset->volt_rails_max = 1; 152 } 153 status = boardobjgrpmask_export( 154 &pdomains->master_domains_mask.super, 155 pdomains->master_domains_mask.super.bitcount, 156 &pset->master_domains_mask.super); 157 158 memcpy(&pset->deltas, &pdomains->deltas, 159 (sizeof(struct ctrl_clk_clk_delta))); 160 161done: 162 return status; 163} 164 165static int _clk_domains_pmudata_instget(struct gk20a *g, 166 struct nv_pmu_boardobjgrp *pmuboardobjgrp, 167 struct nv_pmu_boardobj **ppboardobjpmudata, 168 u8 idx) 169{ 170 struct nv_pmu_clk_clk_domain_boardobj_grp_set *pgrp_set = 171 (struct nv_pmu_clk_clk_domain_boardobj_grp_set *) 172 pmuboardobjgrp; 173 174 nvgpu_log_info(g, " "); 175 176 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 177 if (((u32)BIT(idx) & 178 pgrp_set->hdr.data.super.obj_mask.super.data[0]) == 0) { 179 return -EINVAL; 180 } 181 182 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 183 &pgrp_set->objects[idx].data.board_obj; 184 nvgpu_log_info(g, " Done"); 185 return 0; 186} 187 188int clk_domain_sw_setup(struct gk20a *g) 189{ 190 int status; 191 struct boardobjgrp *pboardobjgrp = NULL; 192 struct clk_domains *pclkdomainobjs; 193 struct clk_domain *pdomain; 194 struct clk_domain_3x_master *pdomain_master; 195 struct clk_domain_3x_slave *pdomain_slave; 196 u8 i; 197 198 nvgpu_log_info(g, " "); 199 200 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.clk_domainobjs.super); 201 if (status) { 202 nvgpu_err(g, 203 "error creating boardobjgrp for clk domain, status - 0x%x", 204 status); 205 goto done; 206 } 207 208 pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super; 209 pclkdomainobjs = &(g->clk_pmu.clk_domainobjs); 210 211 BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_DOMAIN); 212 213 status = BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, 214 clk, CLK, clk_domain, CLK_DOMAIN); 215 if (status) { 216 nvgpu_err(g, 217 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 218 status); 219 goto done; 220 } 221 222 pboardobjgrp->pmudatainit = _clk_domains_pmudatainit_3x; 223 pboardobjgrp->pmudatainstget = _clk_domains_pmudata_instget; 224 225 /* Initialize mask to zero.*/ 226 boardobjgrpmask_e32_init(&pclkdomainobjs->prog_domains_mask, NULL); 227 boardobjgrpmask_e32_init(&pclkdomainobjs->master_domains_mask, NULL); 228 pclkdomainobjs->b_enforce_vf_monotonicity = true; 229 pclkdomainobjs->b_enforce_vf_smoothening = true; 230 231 memset(&pclkdomainobjs->ordered_noise_aware_list, 0, 232 sizeof(pclkdomainobjs->ordered_noise_aware_list)); 233 234 memset(&pclkdomainobjs->ordered_noise_unaware_list, 0, 235 sizeof(pclkdomainobjs->ordered_noise_unaware_list)); 236 237 memset(&pclkdomainobjs->deltas, 0, 238 sizeof(struct ctrl_clk_clk_delta)); 239 240 status = devinit_get_clocks_table(g, pclkdomainobjs); 241 if (status) { 242 goto done; 243 } 244 245 BOARDOBJGRP_FOR_EACH(&(pclkdomainobjs->super.super), 246 struct clk_domain *, pdomain, i) { 247 pdomain_master = NULL; 248 if (pdomain->super.implements(g, &pdomain->super, 249 CTRL_CLK_CLK_DOMAIN_TYPE_3X_PROG)) { 250 status = boardobjgrpmask_bitset( 251 &pclkdomainobjs->prog_domains_mask.super, i); 252 if (status) { 253 goto done; 254 } 255 } 256 257 if (pdomain->super.implements(g, &pdomain->super, 258 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) { 259 status = boardobjgrpmask_bitset( 260 &pclkdomainobjs->master_domains_mask.super, i); 261 if (status) { 262 goto done; 263 } 264 } 265 266 if (pdomain->super.implements(g, &pdomain->super, 267 CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE)) { 268 pdomain_slave = 269 (struct clk_domain_3x_slave *)pdomain; 270 pdomain_master = 271 (struct clk_domain_3x_master *) 272 (CLK_CLK_DOMAIN_GET((&g->clk_pmu), 273 pdomain_slave->master_idx)); 274 pdomain_master->slave_idxs_mask |= BIT(i); 275 } 276 277 } 278 279done: 280 nvgpu_log_info(g, " done status %x", status); 281 return status; 282} 283 284int clk_domain_pmu_setup(struct gk20a *g) 285{ 286 int status; 287 struct boardobjgrp *pboardobjgrp = NULL; 288 289 nvgpu_log_info(g, " "); 290 291 pboardobjgrp = &g->clk_pmu.clk_domainobjs.super.super; 292 293 if (!pboardobjgrp->bconstructed) { 294 return -EINVAL; 295 } 296 297 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 298 299 nvgpu_log_info(g, "Done"); 300 return status; 301} 302 303static int devinit_get_clocks_table_35(struct gk20a *g, 304 struct clk_domains *pclkdomainobjs, u8 *clocks_table_ptr) 305{ 306 int status = 0; 307 struct vbios_clocks_table_35_header clocks_table_header = { 0 }; 308 struct vbios_clocks_table_35_entry clocks_table_entry = { 0 }; 309 struct vbios_clocks_table_1x_hal_clock_entry *vbiosclktbl1xhalentry; 310 u8 *clocks_tbl_entry_ptr = NULL; 311 u32 index = 0; 312 struct clk_domain *pclkdomain_dev; 313 union { 314 struct boardobj boardobj; 315 struct clk_domain clk_domain; 316 struct clk_domain_3x v3x; 317 struct clk_domain_3x_fixed v3x_fixed; 318 struct clk_domain_35_prog v35_prog; 319 struct clk_domain_35_master v35_master; 320 struct clk_domain_35_slave v35_slave; 321 } clk_domain_data; 322 323 nvgpu_log_info(g, " "); 324 325 memcpy(&clocks_table_header, clocks_table_ptr, 326 VBIOS_CLOCKS_TABLE_35_HEADER_SIZE_09); 327 if (clocks_table_header.header_size < 328 (u8) VBIOS_CLOCKS_TABLE_35_HEADER_SIZE_09) { 329 status = -EINVAL; 330 goto done; 331 } 332 333 if (clocks_table_header.entry_size < 334 (u8) VBIOS_CLOCKS_TABLE_35_ENTRY_SIZE_11) { 335 status = -EINVAL; 336 goto done; 337 } 338 339 switch (clocks_table_header.clocks_hal) { 340 case CLK_TABLE_HAL_ENTRY_GP: 341 { 342 vbiosclktbl1xhalentry = vbiosclktbl1xhalentry_gp; 343 break; 344 } 345 case CLK_TABLE_HAL_ENTRY_GV: 346 { 347 vbiosclktbl1xhalentry = vbiosclktbl1xhalentry_gv; 348 break; 349 } 350 default: 351 { 352 status = -EINVAL; 353 goto done; 354 } 355 } 356 357 pclkdomainobjs->cntr_sampling_periodms = 358 (u16)clocks_table_header.cntr_sampling_periodms; 359 360 /* Read table entries*/ 361 clocks_tbl_entry_ptr = clocks_table_ptr + 362 clocks_table_header.header_size; 363 for (index = 0; index < clocks_table_header.entry_count; index++) { 364 memcpy((void*) &clocks_table_entry, (void*) clocks_tbl_entry_ptr, 365 clocks_table_header.entry_size); 366 clk_domain_data.clk_domain.domain = 367 (u8) vbiosclktbl1xhalentry[index].domain; 368 clk_domain_data.clk_domain.api_domain = 369 clktranslatehalmumsettoapinumset( 370 (u32) BIT(clk_domain_data.clk_domain.domain)); 371 clk_domain_data.v3x.b_noise_aware_capable = 372 vbiosclktbl1xhalentry[index].b_noise_aware_capable; 373 374 switch (BIOS_GET_FIELD(clocks_table_entry.flags0, 375 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE)) { 376 case NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_FIXED: 377 { 378 clk_domain_data.boardobj.type = 379 CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED; 380 clk_domain_data.v3x_fixed.freq_mhz = (u16)BIOS_GET_FIELD( 381 clocks_table_entry.param1, 382 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_FIXED_FREQUENCY_MHZ); 383 break; 384 } 385 386 case NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_MASTER: 387 { 388 clk_domain_data.boardobj.type = 389 CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER; 390 clk_domain_data.v35_prog.super.clk_prog_idx_first = 391 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 392 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_FIRST)); 393 clk_domain_data.v35_prog.super.clk_prog_idx_last = 394 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 395 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_LAST)); 396 clk_domain_data.v35_prog.super.noise_unaware_ordering_index = 397 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 398 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_UNAWARE_ORDERING_IDX)); 399 if (clk_domain_data.v3x.b_noise_aware_capable) { 400 clk_domain_data.v35_prog.super.b_force_noise_unaware_ordering = 401 (bool)(BIOS_GET_FIELD(clocks_table_entry.param2, 402 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING)); 403 404 } else { 405 clk_domain_data.v35_prog.super.noise_aware_ordering_index = 406 CTRL_CLK_CLK_DOMAIN_3X_PROG_ORDERING_INDEX_INVALID; 407 clk_domain_data.v35_prog.super.b_force_noise_unaware_ordering = false; 408 } 409 clk_domain_data.v35_prog.pre_volt_ordering_index = 410 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 411 NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_PRE_VOLT_ORDERING_IDX)); 412 413 clk_domain_data.v35_prog.post_volt_ordering_index = 414 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 415 NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_POST_VOLT_ORDERING_IDX)); 416 417 clk_domain_data.v35_prog.super.factory_delta.data.delta_khz = 0; 418 clk_domain_data.v35_prog.super.factory_delta.type = 0; 419 420 clk_domain_data.v35_prog.super.freq_delta_min_mhz = 421 (u16)(BIOS_GET_FIELD(clocks_table_entry.param1, 422 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MIN_MHZ)); 423 424 clk_domain_data.v35_prog.super.freq_delta_max_mhz = 425 (u16)(BIOS_GET_FIELD(clocks_table_entry.param1, 426 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MAX_MHZ)); 427 clk_domain_data.v35_prog.clk_vf_curve_count = 428 vbiosclktbl1xhalentry[index].clk_vf_curve_count; 429 break; 430 } 431 432 case NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_SLAVE: 433 { 434 clk_domain_data.boardobj.type = 435 CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE; 436 clk_domain_data.v35_prog.super.clk_prog_idx_first = 437 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 438 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_FIRST)); 439 clk_domain_data.v35_prog.super.clk_prog_idx_last = 440 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 441 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_LAST)); 442 clk_domain_data.v35_prog.super.noise_unaware_ordering_index = 443 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 444 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_UNAWARE_ORDERING_IDX)); 445 446 if (clk_domain_data.v3x.b_noise_aware_capable) { 447 clk_domain_data.v35_prog.super.b_force_noise_unaware_ordering = 448 (bool)(BIOS_GET_FIELD(clocks_table_entry.param2, 449 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING)); 450 451 } else { 452 clk_domain_data.v35_prog.super.noise_aware_ordering_index = 453 CTRL_CLK_CLK_DOMAIN_3X_PROG_ORDERING_INDEX_INVALID; 454 clk_domain_data.v35_prog.super.b_force_noise_unaware_ordering = false; 455 } 456 clk_domain_data.v35_prog.pre_volt_ordering_index = 457 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 458 NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_PRE_VOLT_ORDERING_IDX)); 459 460 clk_domain_data.v35_prog.post_volt_ordering_index = 461 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 462 NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_POST_VOLT_ORDERING_IDX)); 463 464 clk_domain_data.v35_prog.super.factory_delta.data.delta_khz = 0; 465 clk_domain_data.v35_prog.super.factory_delta.type = 0; 466 clk_domain_data.v35_prog.super.freq_delta_min_mhz = 0; 467 clk_domain_data.v35_prog.super.freq_delta_max_mhz = 0; 468 clk_domain_data.v35_slave.slave.master_idx = 469 (u8)(BIOS_GET_FIELD(clocks_table_entry.param1, 470 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_SLAVE_MASTER_DOMAIN)); 471 break; 472 } 473 474 default: 475 { 476 nvgpu_err(g, 477 "error reading clock domain entry %d", index); 478 status = -EINVAL; 479 goto done; 480 } 481 482 } 483 pclkdomain_dev = construct_clk_domain(g, 484 (void *)&clk_domain_data); 485 if (pclkdomain_dev == NULL) { 486 nvgpu_err(g, 487 "unable to construct clock domain boardobj for %d", 488 index); 489 status = -EINVAL; 490 goto done; 491 } 492 status = boardobjgrp_objinsert( 493 &pclkdomainobjs->super.super, 494 (struct boardobj *)(void*) pclkdomain_dev, index); 495 if (status != 0UL) { 496 nvgpu_err(g, 497 "unable to insert clock domain boardobj for %d", index); 498 status = (u32) -EINVAL; 499 goto done; 500 } 501 clocks_tbl_entry_ptr += clocks_table_header.entry_size; 502 } 503 504done: 505 nvgpu_log_info(g, " done status %x", status); 506 return status; 507} 508 509static int devinit_get_clocks_table_1x(struct gk20a *g, 510 struct clk_domains *pclkdomainobjs, u8 *clocks_table_ptr) 511{ 512 int status = 0; 513 struct vbios_clocks_table_1x_header clocks_table_header = { 0 }; 514 struct vbios_clocks_table_1x_entry clocks_table_entry = { 0 }; 515 struct vbios_clocks_table_1x_hal_clock_entry *vbiosclktbl1xhalentry; 516 u8 *clocks_tbl_entry_ptr = NULL; 517 u32 index = 0; 518 struct clk_domain *pclkdomain_dev; 519 union { 520 struct boardobj boardobj; 521 struct clk_domain clk_domain; 522 struct clk_domain_3x v3x; 523 struct clk_domain_3x_fixed v3x_fixed; 524 struct clk_domain_3x_prog v3x_prog; 525 struct clk_domain_3x_master v3x_master; 526 struct clk_domain_3x_slave v3x_slave; 527 } clk_domain_data; 528 529 nvgpu_log_info(g, " "); 530 531 memcpy(&clocks_table_header, clocks_table_ptr, 532 VBIOS_CLOCKS_TABLE_1X_HEADER_SIZE_07); 533 if (clocks_table_header.header_size < 534 (u8) VBIOS_CLOCKS_TABLE_1X_HEADER_SIZE_07) { 535 status = -EINVAL; 536 goto done; 537 } 538 539 if (clocks_table_header.entry_size < 540 (u8) VBIOS_CLOCKS_TABLE_1X_ENTRY_SIZE_09) { 541 status = -EINVAL; 542 goto done; 543 } 544 545 switch (clocks_table_header.clocks_hal) { 546 case CLK_TABLE_HAL_ENTRY_GP: 547 { 548 vbiosclktbl1xhalentry = vbiosclktbl1xhalentry_gp; 549 break; 550 } 551 case CLK_TABLE_HAL_ENTRY_GV: 552 { 553 vbiosclktbl1xhalentry = vbiosclktbl1xhalentry_gv; 554 break; 555 } 556 default: 557 { 558 status = -EINVAL; 559 goto done; 560 } 561 } 562 563 pclkdomainobjs->cntr_sampling_periodms = 564 (u16)clocks_table_header.cntr_sampling_periodms; 565 566 /* Read table entries*/ 567 clocks_tbl_entry_ptr = clocks_table_ptr + 568 VBIOS_CLOCKS_TABLE_1X_HEADER_SIZE_07; 569 for (index = 0; index < clocks_table_header.entry_count; index++) { 570 memcpy((void*) &clocks_table_entry, (void*) clocks_tbl_entry_ptr, 571 clocks_table_header.entry_size); 572 clk_domain_data.clk_domain.domain = 573 (u8) vbiosclktbl1xhalentry[index].domain; 574 clk_domain_data.clk_domain.api_domain = 575 clktranslatehalmumsettoapinumset( 576 BIT(clk_domain_data.clk_domain.domain)); 577 clk_domain_data.v3x.b_noise_aware_capable = 578 vbiosclktbl1xhalentry[index].b_noise_aware_capable; 579 580 switch (BIOS_GET_FIELD(clocks_table_entry.flags0, 581 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE)) { 582 case NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_FIXED: 583 { 584 clk_domain_data.boardobj.type = 585 CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED; 586 clk_domain_data.v3x_fixed.freq_mhz = (u16)BIOS_GET_FIELD( 587 clocks_table_entry.param1, 588 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_FIXED_FREQUENCY_MHZ); 589 break; 590 } 591 592 case NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_MASTER: 593 { 594 clk_domain_data.boardobj.type = 595 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER; 596 clk_domain_data.v3x_prog.clk_prog_idx_first = 597 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 598 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_FIRST)); 599 clk_domain_data.v3x_prog.clk_prog_idx_last = 600 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 601 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_LAST)); 602 clk_domain_data.v3x_prog.noise_unaware_ordering_index = 603 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 604 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_UNAWARE_ORDERING_IDX)); 605 if (clk_domain_data.v3x.b_noise_aware_capable) { 606 clk_domain_data.v3x_prog.noise_aware_ordering_index = 607 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 608 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_AWARE_ORDERING_IDX)); 609 clk_domain_data.v3x_prog.b_force_noise_unaware_ordering = 610 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 611 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING)); 612 } else { 613 clk_domain_data.v3x_prog.noise_aware_ordering_index = 614 CTRL_CLK_CLK_DOMAIN_3X_PROG_ORDERING_INDEX_INVALID; 615 clk_domain_data.v3x_prog.b_force_noise_unaware_ordering = false; 616 } 617 618 clk_domain_data.v3x_prog.factory_delta.data.delta_khz = 0; 619 clk_domain_data.v3x_prog.factory_delta.type = 0; 620 621 clk_domain_data.v3x_prog.freq_delta_min_mhz = 622 (u16)(BIOS_GET_FIELD(clocks_table_entry.param1, 623 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MIN_MHZ)); 624 625 clk_domain_data.v3x_prog.freq_delta_max_mhz = 626 (u16)(BIOS_GET_FIELD(clocks_table_entry.param1, 627 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MAX_MHZ)); 628 break; 629 } 630 631 case NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_SLAVE: 632 { 633 clk_domain_data.boardobj.type = 634 CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE; 635 clk_domain_data.v3x_prog.clk_prog_idx_first = 636 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 637 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_FIRST)); 638 clk_domain_data.v3x_prog.clk_prog_idx_last = 639 (u8)(BIOS_GET_FIELD(clocks_table_entry.param0, 640 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_LAST)); 641 clk_domain_data.v3x_prog.noise_unaware_ordering_index = 642 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 643 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_UNAWARE_ORDERING_IDX)); 644 645 if (clk_domain_data.v3x.b_noise_aware_capable) { 646 clk_domain_data.v3x_prog.noise_aware_ordering_index = 647 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 648 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_AWARE_ORDERING_IDX)); 649 clk_domain_data.v3x_prog.b_force_noise_unaware_ordering = 650 (u8)(BIOS_GET_FIELD(clocks_table_entry.param2, 651 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING)); 652 } else { 653 clk_domain_data.v3x_prog.noise_aware_ordering_index = 654 CTRL_CLK_CLK_DOMAIN_3X_PROG_ORDERING_INDEX_INVALID; 655 clk_domain_data.v3x_prog.b_force_noise_unaware_ordering = false; 656 } 657 clk_domain_data.v3x_prog.factory_delta.data.delta_khz = 0; 658 clk_domain_data.v3x_prog.factory_delta.type = 0; 659 clk_domain_data.v3x_prog.freq_delta_min_mhz = 0; 660 clk_domain_data.v3x_prog.freq_delta_max_mhz = 0; 661 clk_domain_data.v3x_slave.master_idx = 662 (u8)(BIOS_GET_FIELD(clocks_table_entry.param1, 663 NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_SLAVE_MASTER_DOMAIN)); 664 break; 665 } 666 667 default: 668 { 669 nvgpu_err(g, 670 "error reading clock domain entry %d", index); 671 status = (u32) -EINVAL; 672 goto done; 673 } 674 675 } 676 pclkdomain_dev = construct_clk_domain(g, 677 (void *)&clk_domain_data); 678 if (pclkdomain_dev == NULL) { 679 nvgpu_err(g, 680 "unable to construct clock domain boardobj for %d", 681 index); 682 status = (u32) -EINVAL; 683 goto done; 684 } 685 status = boardobjgrp_objinsert(&pclkdomainobjs->super.super, 686 (struct boardobj *)(void *)pclkdomain_dev, index); 687 if (status != 0UL) { 688 nvgpu_err(g, 689 "unable to insert clock domain boardobj for %d", index); 690 status = (u32) -EINVAL; 691 goto done; 692 } 693 clocks_tbl_entry_ptr += clocks_table_header.entry_size; 694 } 695 696done: 697 nvgpu_log_info(g, " done status %x", status); 698 return status; 699} 700 701static int devinit_get_clocks_table(struct gk20a *g, 702 struct clk_domains *pclkdomainobjs) 703{ 704 int status = 0; 705 u8 *clocks_table_ptr = NULL; 706 struct vbios_clocks_table_1x_header clocks_table_header = { 0 }; 707 nvgpu_log_info(g, " "); 708 709 clocks_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 710 g->bios.clock_token, CLOCKS_TABLE); 711 if (clocks_table_ptr == NULL) { 712 status = -EINVAL; 713 goto done; 714 } 715 memcpy(&clocks_table_header, clocks_table_ptr, 716 VBIOS_CLOCKS_TABLE_1X_HEADER_SIZE_07); 717 if (clocks_table_header.version == 0x35U) { 718 devinit_get_clocks_table_35(g, pclkdomainobjs, clocks_table_ptr); 719 } 720 else { 721 devinit_get_clocks_table_1x(g, pclkdomainobjs, clocks_table_ptr); 722 } 723 done: 724 return status; 725 726} 727 728static int clkdomainclkproglink_not_supported(struct gk20a *g, 729 struct clk_pmupstate *pclk, 730 struct clk_domain *pdomain) 731{ 732 nvgpu_log_info(g, " "); 733 return -EINVAL; 734} 735 736static int clkdomainvfsearch_stub( 737 struct gk20a *g, 738 struct clk_pmupstate *pclk, 739 struct clk_domain *pdomain, 740 u16 *clkmhz, 741 u32 *voltuv, 742 u8 rail) 743 744{ 745 nvgpu_log_info(g, " "); 746 return -EINVAL; 747} 748 749static u32 clkdomaingetfpoints_stub( 750 struct gk20a *g, 751 struct clk_pmupstate *pclk, 752 struct clk_domain *pdomain, 753 u32 *pfpointscount, 754 u16 *pfreqpointsinmhz, 755 u8 rail) 756{ 757 nvgpu_log_info(g, " "); 758 return -EINVAL; 759} 760 761 762static int clk_domain_construct_super(struct gk20a *g, 763 struct boardobj **ppboardobj, 764 u16 size, void *pargs) 765{ 766 struct clk_domain *pdomain; 767 struct clk_domain *ptmpdomain = (struct clk_domain *)pargs; 768 int status = 0; 769 770 status = boardobj_construct_super(g, ppboardobj, 771 size, pargs); 772 773 if (status) { 774 return -EINVAL; 775 } 776 777 pdomain = (struct clk_domain *)*ppboardobj; 778 779 pdomain->super.pmudatainit = 780 clk_domain_pmudatainit_super; 781 782 pdomain->clkdomainclkproglink = 783 clkdomainclkproglink_not_supported; 784 785 pdomain->clkdomainclkvfsearch = 786 clkdomainvfsearch_stub; 787 788 pdomain->clkdomainclkgetfpoints = 789 clkdomaingetfpoints_stub; 790 791 pdomain->api_domain = ptmpdomain->api_domain; 792 pdomain->domain = ptmpdomain->domain; 793 pdomain->perf_domain_grp_idx = 794 ptmpdomain->perf_domain_grp_idx; 795 796 return status; 797} 798 799static int _clk_domain_pmudatainit_3x(struct gk20a *g, 800 struct boardobj *board_obj_ptr, 801 struct nv_pmu_boardobj *ppmudata) 802{ 803 int status = 0; 804 struct clk_domain_3x *pclk_domain_3x; 805 struct nv_pmu_clk_clk_domain_3x_boardobj_set *pset; 806 807 nvgpu_log_info(g, " "); 808 809 status = clk_domain_pmudatainit_super(g, board_obj_ptr, ppmudata); 810 if (status != 0) { 811 return status; 812 } 813 814 pclk_domain_3x = (struct clk_domain_3x *)board_obj_ptr; 815 816 pset = (struct nv_pmu_clk_clk_domain_3x_boardobj_set *)ppmudata; 817 818 pset->b_noise_aware_capable = pclk_domain_3x->b_noise_aware_capable; 819 820 return status; 821} 822 823static int clk_domain_construct_3x(struct gk20a *g, 824 struct boardobj **ppboardobj, 825 u16 size, void *pargs) 826{ 827 struct boardobj *ptmpobj = (struct boardobj *)pargs; 828 struct clk_domain_3x *pdomain; 829 struct clk_domain_3x *ptmpdomain = 830 (struct clk_domain_3x *)pargs; 831 int status = 0; 832 833 ptmpobj->type_mask = BIT(CTRL_CLK_CLK_DOMAIN_TYPE_3X); 834 status = clk_domain_construct_super(g, ppboardobj, 835 size, pargs); 836 if (status) { 837 return -EINVAL; 838 } 839 840 pdomain = (struct clk_domain_3x *)*ppboardobj; 841 842 pdomain->super.super.pmudatainit = 843 _clk_domain_pmudatainit_3x; 844 845 pdomain->b_noise_aware_capable = ptmpdomain->b_noise_aware_capable; 846 847 return status; 848} 849 850static int clkdomainclkproglink_3x_prog(struct gk20a *g, 851 struct clk_pmupstate *pclk, 852 struct clk_domain *pdomain) 853{ 854 int status = 0; 855 struct clk_domain_3x_prog *p3xprog = 856 (struct clk_domain_3x_prog *)pdomain; 857 struct clk_prog *pprog = NULL; 858 u8 i; 859 860 nvgpu_log_info(g, " "); 861 862 for (i = p3xprog->clk_prog_idx_first; 863 i <= p3xprog->clk_prog_idx_last; 864 i++) { 865 pprog = CLK_CLK_PROG_GET(pclk, i); 866 if (pprog == NULL) { 867 status = -EINVAL; 868 } 869 } 870 return status; 871} 872 873static int clkdomaingetslaveclk(struct gk20a *g, 874 struct clk_pmupstate *pclk, 875 struct clk_domain *pdomain, 876 u16 *pclkmhz, 877 u16 masterclkmhz) 878{ 879 int status = 0; 880 struct clk_prog *pprog = NULL; 881 struct clk_prog_1x_master *pprog1xmaster = NULL; 882 u8 slaveidx; 883 struct clk_domain_3x_master *p3xmaster; 884 885 nvgpu_log_info(g, " "); 886 887 if (pclkmhz == NULL) { 888 return -EINVAL; 889 } 890 891 if (masterclkmhz == 0) { 892 return -EINVAL; 893 } 894 895 slaveidx = BOARDOBJ_GET_IDX(pdomain); 896 p3xmaster = (struct clk_domain_3x_master *) 897 CLK_CLK_DOMAIN_GET(pclk, 898 ((struct clk_domain_3x_slave *) 899 pdomain)->master_idx); 900 pprog = CLK_CLK_PROG_GET(pclk, p3xmaster->super.clk_prog_idx_first); 901 pprog1xmaster = (struct clk_prog_1x_master *)pprog; 902 903 status = pprog1xmaster->getslaveclk(g, pclk, pprog1xmaster, 904 slaveidx, pclkmhz, masterclkmhz); 905 return status; 906} 907 908static int clkdomainvfsearch(struct gk20a *g, 909 struct clk_pmupstate *pclk, 910 struct clk_domain *pdomain, 911 u16 *pclkmhz, 912 u32 *pvoltuv, 913 u8 rail) 914{ 915 int status = 0; 916 struct clk_domain_3x_master *p3xmaster = 917 (struct clk_domain_3x_master *)pdomain; 918 struct clk_prog *pprog = NULL; 919 struct clk_prog_1x_master *pprog1xmaster = NULL; 920 u8 i; 921 u8 *pslaveidx = NULL; 922 u8 slaveidx; 923 u16 clkmhz; 924 u32 voltuv; 925 u16 bestclkmhz; 926 u32 bestvoltuv; 927 928 nvgpu_log_info(g, " "); 929 930 if ((pclkmhz == NULL) || (pvoltuv == NULL)) { 931 return -EINVAL; 932 } 933 934 if ((*pclkmhz != 0) && (*pvoltuv != 0)) { 935 return -EINVAL; 936 } 937 938 bestclkmhz = *pclkmhz; 939 bestvoltuv = *pvoltuv; 940 941 if (pdomain->super.implements(g, &pdomain->super, 942 CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE)) { 943 slaveidx = BOARDOBJ_GET_IDX(pdomain); 944 pslaveidx = &slaveidx; 945 p3xmaster = (struct clk_domain_3x_master *) 946 CLK_CLK_DOMAIN_GET(pclk, 947 ((struct clk_domain_3x_slave *) 948 pdomain)->master_idx); 949 } 950 /* Iterate over the set of CLK_PROGs pointed at by this domain.*/ 951 for (i = p3xmaster->super.clk_prog_idx_first; 952 i <= p3xmaster->super.clk_prog_idx_last; 953 i++) { 954 clkmhz = *pclkmhz; 955 voltuv = *pvoltuv; 956 pprog = CLK_CLK_PROG_GET(pclk, i); 957 958 /* MASTER CLK_DOMAINs must point to MASTER CLK_PROGs.*/ 959 if (!pprog->super.implements(g, &pprog->super, 960 CTRL_CLK_CLK_PROG_TYPE_1X_MASTER)) { 961 status = -EINVAL; 962 goto done; 963 } 964 965 pprog1xmaster = (struct clk_prog_1x_master *)pprog; 966 status = pprog1xmaster->vflookup(g, pclk, pprog1xmaster, 967 pslaveidx, &clkmhz, &voltuv, rail); 968 /* if look up has found the V or F value matching to other 969 exit */ 970 if (status == 0) { 971 if (*pclkmhz == 0) { 972 bestclkmhz = clkmhz; 973 } else { 974 bestvoltuv = voltuv; 975 break; 976 } 977 } 978 } 979 /* clk and volt sent as zero to print vf table */ 980 if ((*pclkmhz == 0) && (*pvoltuv == 0)) { 981 status = 0; 982 goto done; 983 } 984 /* atleast one search found a matching value? */ 985 if ((bestvoltuv != 0) && (bestclkmhz != 0)) { 986 *pclkmhz = bestclkmhz; 987 *pvoltuv = bestvoltuv; 988 status = 0; 989 goto done; 990 } 991done: 992 nvgpu_log_info(g, "done status %x", status); 993 return status; 994} 995 996static u32 clkdomaingetfpoints 997( 998 struct gk20a *g, 999 struct clk_pmupstate *pclk, 1000 struct clk_domain *pdomain, 1001 u32 *pfpointscount, 1002 u16 *pfreqpointsinmhz, 1003 u8 rail 1004) 1005{ 1006 u32 status = 0; 1007 struct clk_domain_3x_master *p3xmaster = 1008 (struct clk_domain_3x_master *)pdomain; 1009 struct clk_prog *pprog = NULL; 1010 struct clk_prog_1x_master *pprog1xmaster = NULL; 1011 u32 fpointscount = 0; 1012 u32 remainingcount; 1013 u32 totalcount; 1014 u16 *freqpointsdata; 1015 u8 i; 1016 1017 nvgpu_log_info(g, " "); 1018 1019 if (pfpointscount == NULL) { 1020 return -EINVAL; 1021 } 1022 1023 if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0)) { 1024 return -EINVAL; 1025 } 1026 1027 if (pdomain->super.implements(g, &pdomain->super, 1028 CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE)) { 1029 return -EINVAL; 1030 } 1031 1032 freqpointsdata = pfreqpointsinmhz; 1033 totalcount = 0; 1034 fpointscount = *pfpointscount; 1035 remainingcount = fpointscount; 1036 /* Iterate over the set of CLK_PROGs pointed at by this domain.*/ 1037 for (i = p3xmaster->super.clk_prog_idx_first; 1038 i <= p3xmaster->super.clk_prog_idx_last; 1039 i++) { 1040 pprog = CLK_CLK_PROG_GET(pclk, i); 1041 pprog1xmaster = (struct clk_prog_1x_master *)pprog; 1042 status = pprog1xmaster->getfpoints(g, pclk, pprog1xmaster, 1043 &fpointscount, &freqpointsdata, rail); 1044 if (status) { 1045 *pfpointscount = 0; 1046 goto done; 1047 } 1048 totalcount += fpointscount; 1049 if (*pfpointscount) { 1050 remainingcount -= fpointscount; 1051 fpointscount = remainingcount; 1052 } else { 1053 fpointscount = 0; 1054 } 1055 1056 } 1057 1058 *pfpointscount = totalcount; 1059done: 1060 nvgpu_log_info(g, "done status %x", status); 1061 return status; 1062} 1063 1064static int clk_domain_pmudatainit_35_prog(struct gk20a *g, 1065 struct boardobj *board_obj_ptr, 1066 struct nv_pmu_boardobj *ppmudata) 1067{ 1068 int status = 0; 1069 struct clk_domain_35_prog *pclk_domain_35_prog; 1070 struct clk_domain_3x_prog *pclk_domain_3x_prog; 1071 struct nv_pmu_clk_clk_domain_35_prog_boardobj_set *pset; 1072 struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs); 1073 1074 nvgpu_log_info(g, " "); 1075 1076 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); 1077 if (status != 0UL) { 1078 return status; 1079 } 1080 1081 pclk_domain_35_prog = (struct clk_domain_35_prog *)(void*)board_obj_ptr; 1082 pclk_domain_3x_prog = &pclk_domain_35_prog->super; 1083 1084 pset = (struct nv_pmu_clk_clk_domain_35_prog_boardobj_set *) 1085 (void*) ppmudata; 1086 1087 pset->super.clk_prog_idx_first = pclk_domain_3x_prog->clk_prog_idx_first; 1088 pset->super.clk_prog_idx_last = pclk_domain_3x_prog->clk_prog_idx_last; 1089 pset->super.b_force_noise_unaware_ordering = 1090 pclk_domain_3x_prog->b_force_noise_unaware_ordering; 1091 pset->super.factory_delta = pclk_domain_3x_prog->factory_delta; 1092 pset->super.freq_delta_min_mhz = pclk_domain_3x_prog->freq_delta_min_mhz; 1093 pset->super.freq_delta_max_mhz = pclk_domain_3x_prog->freq_delta_max_mhz; 1094 memcpy(&pset->super.deltas, &pdomains->deltas, 1095 (sizeof(struct ctrl_clk_clk_delta))); 1096 pset->pre_volt_ordering_index = pclk_domain_35_prog->pre_volt_ordering_index; 1097 pset->post_volt_ordering_index = pclk_domain_35_prog->post_volt_ordering_index; 1098 pset->clk_pos = pclk_domain_35_prog->clk_pos; 1099 pset->clk_vf_curve_count = pclk_domain_35_prog->clk_vf_curve_count; 1100 1101 return status; 1102} 1103 1104static int _clk_domain_pmudatainit_3x_prog(struct gk20a *g, 1105 struct boardobj *board_obj_ptr, 1106 struct nv_pmu_boardobj *ppmudata) 1107{ 1108 int status = 0; 1109 struct clk_domain_3x_prog *pclk_domain_3x_prog; 1110 struct nv_pmu_clk_clk_domain_30_prog_boardobj_set *pset; 1111 struct clk_domains *pdomains = &(g->clk_pmu.clk_domainobjs); 1112 1113 nvgpu_log_info(g, " "); 1114 1115 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); 1116 if (status != 0) { 1117 return status; 1118 } 1119 1120 pclk_domain_3x_prog = (struct clk_domain_3x_prog *)board_obj_ptr; 1121 1122 pset = (struct nv_pmu_clk_clk_domain_30_prog_boardobj_set *) 1123 ppmudata; 1124 1125 pset->super.clk_prog_idx_first = pclk_domain_3x_prog->clk_prog_idx_first; 1126 pset->super.clk_prog_idx_last = pclk_domain_3x_prog->clk_prog_idx_last; 1127 pset->noise_unaware_ordering_index = 1128 pclk_domain_3x_prog->noise_unaware_ordering_index; 1129 pset->noise_aware_ordering_index = 1130 pclk_domain_3x_prog->noise_aware_ordering_index; 1131 pset->super.b_force_noise_unaware_ordering = 1132 pclk_domain_3x_prog->b_force_noise_unaware_ordering; 1133 pset->super.factory_delta = pclk_domain_3x_prog->factory_delta; 1134 pset->super.freq_delta_min_mhz = pclk_domain_3x_prog->freq_delta_min_mhz; 1135 pset->super.freq_delta_max_mhz = pclk_domain_3x_prog->freq_delta_max_mhz; 1136 memcpy(&pset->super.deltas, &pdomains->deltas, 1137 (sizeof(struct ctrl_clk_clk_delta))); 1138 1139 return status; 1140} 1141 1142static int clk_domain_construct_35_prog(struct gk20a *g, 1143 struct boardobj **ppboardobj, 1144 u16 size, void *pargs) 1145{ 1146 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1147 struct clk_domain_35_prog *pdomain; 1148 struct clk_domain_35_prog *ptmpdomain = 1149 (struct clk_domain_35_prog *)pargs; 1150 int status = 0; 1151 1152 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_3X_PROG); 1153 status = clk_domain_construct_3x(g, ppboardobj, size, pargs); 1154 if (status != 0UL) 1155 { 1156 return (u32) -EINVAL; 1157 } 1158 1159 pdomain = (struct clk_domain_35_prog *)(void*) *ppboardobj; 1160 1161 pdomain->super.super.super.super.pmudatainit = 1162 clk_domain_pmudatainit_35_prog; 1163 1164 pdomain->super.super.super.clkdomainclkproglink = 1165 clkdomainclkproglink_3x_prog; 1166 1167 pdomain->super.super.super.clkdomainclkvfsearch = 1168 clkdomainvfsearch; 1169 1170 pdomain->super.super.super.clkdomainclkgetfpoints = 1171 clkdomaingetfpoints; 1172 1173 pdomain->super.clk_prog_idx_first = ptmpdomain->super.clk_prog_idx_first; 1174 pdomain->super.clk_prog_idx_last = ptmpdomain->super.clk_prog_idx_last; 1175 pdomain->super.noise_unaware_ordering_index = 1176 ptmpdomain->super.noise_unaware_ordering_index; 1177 pdomain->super.noise_aware_ordering_index = 1178 ptmpdomain->super.noise_aware_ordering_index; 1179 pdomain->super.b_force_noise_unaware_ordering = 1180 ptmpdomain->super.b_force_noise_unaware_ordering; 1181 pdomain->super.factory_delta = ptmpdomain->super.factory_delta; 1182 pdomain->super.freq_delta_min_mhz = ptmpdomain->super.freq_delta_min_mhz; 1183 pdomain->super.freq_delta_max_mhz = ptmpdomain->super.freq_delta_max_mhz; 1184 pdomain->pre_volt_ordering_index = ptmpdomain->pre_volt_ordering_index; 1185 pdomain->post_volt_ordering_index = ptmpdomain->post_volt_ordering_index; 1186 pdomain->clk_pos = ptmpdomain->clk_pos; 1187 pdomain->clk_vf_curve_count = ptmpdomain->clk_vf_curve_count; 1188 1189 return status; 1190} 1191 1192static int clk_domain_construct_3x_prog(struct gk20a *g, 1193 struct boardobj **ppboardobj, 1194 u16 size, void *pargs) 1195{ 1196 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1197 struct clk_domain_3x_prog *pdomain; 1198 struct clk_domain_3x_prog *ptmpdomain = 1199 (struct clk_domain_3x_prog *)pargs; 1200 int status = 0; 1201 1202 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_3X_PROG); 1203 status = clk_domain_construct_3x(g, ppboardobj, size, pargs); 1204 if (status) { 1205 return -EINVAL; 1206 } 1207 1208 pdomain = (struct clk_domain_3x_prog *)*ppboardobj; 1209 1210 pdomain->super.super.super.pmudatainit = 1211 _clk_domain_pmudatainit_3x_prog; 1212 1213 pdomain->super.super.clkdomainclkproglink = 1214 clkdomainclkproglink_3x_prog; 1215 1216 pdomain->super.super.clkdomainclkvfsearch = 1217 clkdomainvfsearch; 1218 1219 pdomain->super.super.clkdomainclkgetfpoints = 1220 clkdomaingetfpoints; 1221 1222 pdomain->clk_prog_idx_first = ptmpdomain->clk_prog_idx_first; 1223 pdomain->clk_prog_idx_last = ptmpdomain->clk_prog_idx_last; 1224 pdomain->noise_unaware_ordering_index = 1225 ptmpdomain->noise_unaware_ordering_index; 1226 pdomain->noise_aware_ordering_index = 1227 ptmpdomain->noise_aware_ordering_index; 1228 pdomain->b_force_noise_unaware_ordering = 1229 ptmpdomain->b_force_noise_unaware_ordering; 1230 pdomain->factory_delta = ptmpdomain->factory_delta; 1231 pdomain->freq_delta_min_mhz = ptmpdomain->freq_delta_min_mhz; 1232 pdomain->freq_delta_max_mhz = ptmpdomain->freq_delta_max_mhz; 1233 1234 return status; 1235} 1236 1237static int _clk_domain_pmudatainit_35_slave(struct gk20a *g, 1238 struct boardobj *board_obj_ptr, 1239 struct nv_pmu_boardobj *ppmudata) 1240{ 1241 int status = 0; 1242 struct clk_domain_35_slave *pclk_domain_35_slave; 1243 struct nv_pmu_clk_clk_domain_35_slave_boardobj_set *pset; 1244 1245 nvgpu_log_info(g, " "); 1246 1247 status = clk_domain_pmudatainit_35_prog(g, board_obj_ptr, ppmudata); 1248 if (status != 0UL) { 1249 return status; 1250 } 1251 1252 pclk_domain_35_slave = (struct clk_domain_35_slave *)(void*)board_obj_ptr; 1253 1254 pset = (struct nv_pmu_clk_clk_domain_35_slave_boardobj_set *) 1255 (void*) ppmudata; 1256 1257 pset->slave.master_idx = pclk_domain_35_slave->slave.master_idx; 1258 1259 return status; 1260} 1261 1262static int clk_domain_pmudatainit_3x_slave(struct gk20a *g, 1263 struct boardobj *board_obj_ptr, 1264 struct nv_pmu_boardobj *ppmudata) 1265{ 1266 int status = 0; 1267 struct clk_domain_3x_slave *pclk_domain_3x_slave; 1268 struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *pset; 1269 1270 nvgpu_log_info(g, " "); 1271 1272 status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); 1273 if (status != 0) { 1274 return status; 1275 } 1276 1277 pclk_domain_3x_slave = (struct clk_domain_3x_slave *)board_obj_ptr; 1278 1279 pset = (struct nv_pmu_clk_clk_domain_3x_slave_boardobj_set *) 1280 ppmudata; 1281 1282 pset->master_idx = pclk_domain_3x_slave->master_idx; 1283 1284 return status; 1285} 1286 1287static int clk_domain_construct_35_slave(struct gk20a *g, 1288 struct boardobj **ppboardobj, 1289 u16 size, void *pargs) 1290{ 1291 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1292 struct clk_domain_35_slave *pdomain; 1293 struct clk_domain_35_slave *ptmpdomain = 1294 (struct clk_domain_35_slave *)pargs; 1295 int status = 0; 1296 1297 if (BOARDOBJ_GET_TYPE(pargs) != (u8) CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE) { 1298 return (u32) -EINVAL; 1299 } 1300 1301 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE); 1302 status = clk_domain_construct_35_prog(g, ppboardobj, size, pargs); 1303 if (status != 0UL) { 1304 return (u32) -EINVAL; 1305 } 1306 1307 pdomain = (struct clk_domain_35_slave *)(void*)*ppboardobj; 1308 1309 pdomain->super.super.super.super.super.pmudatainit = 1310 _clk_domain_pmudatainit_35_slave; 1311 1312 pdomain->slave.master_idx = ptmpdomain->slave.master_idx; 1313 1314 pdomain->slave.clkdomainclkgetslaveclk = 1315 clkdomaingetslaveclk; 1316 1317 return status; 1318} 1319 1320static int clk_domain_construct_3x_slave(struct gk20a *g, 1321 struct boardobj **ppboardobj, 1322 u16 size, void *pargs) 1323{ 1324 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1325 struct clk_domain_3x_slave *pdomain; 1326 struct clk_domain_3x_slave *ptmpdomain = 1327 (struct clk_domain_3x_slave *)pargs; 1328 int status = 0; 1329 1330 if (BOARDOBJ_GET_TYPE(pargs) != (u8) CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE) { 1331 return -EINVAL; 1332 } 1333 1334 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE); 1335 status = clk_domain_construct_3x_prog(g, ppboardobj, size, pargs); 1336 if (status != 0UL) { 1337 return -EINVAL; 1338 } 1339 1340 pdomain = (struct clk_domain_3x_slave *)*ppboardobj; 1341 1342 pdomain->super.super.super.super.pmudatainit = 1343 clk_domain_pmudatainit_3x_slave; 1344 1345 pdomain->master_idx = ptmpdomain->master_idx; 1346 1347 pdomain->clkdomainclkgetslaveclk = 1348 clkdomaingetslaveclk; 1349 1350 return status; 1351} 1352 1353static int clkdomainclkproglink_3x_master(struct gk20a *g, 1354 struct clk_pmupstate *pclk, 1355 struct clk_domain *pdomain) 1356{ 1357 int status = 0; 1358 struct clk_domain_3x_master *p3xmaster = 1359 (struct clk_domain_3x_master *)pdomain; 1360 struct clk_prog *pprog = NULL; 1361 struct clk_prog_1x_master *pprog1xmaster = NULL; 1362 u16 freq_max_last_mhz = 0; 1363 u8 i; 1364 1365 nvgpu_log_info(g, " "); 1366 1367 status = clkdomainclkproglink_3x_prog(g, pclk, pdomain); 1368 if (status) { 1369 goto done; 1370 } 1371 1372 /* Iterate over the set of CLK_PROGs pointed at by this domain.*/ 1373 for (i = p3xmaster->super.clk_prog_idx_first; 1374 i <= p3xmaster->super.clk_prog_idx_last; 1375 i++) { 1376 pprog = CLK_CLK_PROG_GET(pclk, i); 1377 1378 /* MASTER CLK_DOMAINs must point to MASTER CLK_PROGs.*/ 1379 if (!pprog->super.implements(g, &pprog->super, 1380 CTRL_CLK_CLK_PROG_TYPE_1X_MASTER)) { 1381 status = -EINVAL; 1382 goto done; 1383 } 1384 1385 pprog1xmaster = (struct clk_prog_1x_master *)pprog; 1386 status = pprog1xmaster->vfflatten(g, pclk, pprog1xmaster, 1387 BOARDOBJ_GET_IDX(p3xmaster), &freq_max_last_mhz); 1388 if (status) { 1389 goto done; 1390 } 1391 } 1392done: 1393 nvgpu_log_info(g, "done status %x", status); 1394 return status; 1395} 1396 1397static int clk_domain_pmudatainit_35_master(struct gk20a *g, 1398 struct boardobj *board_obj_ptr, 1399 struct nv_pmu_boardobj *ppmudata) 1400{ 1401 int status = 0; 1402 struct clk_domain_35_master *pclk_domain_35_master; 1403 struct nv_pmu_clk_clk_domain_35_master_boardobj_set *pset; 1404 1405 nvgpu_log_info(g, " "); 1406 1407 status = clk_domain_pmudatainit_35_prog(g, board_obj_ptr, ppmudata); 1408 if (status != 0UL) { 1409 return status; 1410 } 1411 1412 pclk_domain_35_master = (struct clk_domain_35_master *) 1413 (void*) board_obj_ptr; 1414 1415 pset = (struct nv_pmu_clk_clk_domain_35_master_boardobj_set *) 1416 (void*) ppmudata; 1417 1418 pset->master.slave_idxs_mask = pclk_domain_35_master->master.slave_idxs_mask; 1419 1420 return status; 1421} 1422 1423static int _clk_domain_pmudatainit_3x_master(struct gk20a *g, 1424 struct boardobj *board_obj_ptr, 1425 struct nv_pmu_boardobj *ppmudata) 1426{ 1427 int status = 0; 1428 struct clk_domain_3x_master *pclk_domain_3x_master; 1429 struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *pset; 1430 1431 nvgpu_log_info(g, " "); 1432 1433 status = _clk_domain_pmudatainit_3x_prog(g, board_obj_ptr, ppmudata); 1434 if (status != 0) { 1435 return status; 1436 } 1437 1438 pclk_domain_3x_master = (struct clk_domain_3x_master *)board_obj_ptr; 1439 1440 pset = (struct nv_pmu_clk_clk_domain_3x_master_boardobj_set *) 1441 ppmudata; 1442 1443 pset->slave_idxs_mask = pclk_domain_3x_master->slave_idxs_mask; 1444 1445 return status; 1446} 1447 1448static int clk_domain_construct_35_master(struct gk20a *g, 1449 struct boardobj **ppboardobj, 1450 u16 size, void *pargs) 1451{ 1452 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1453 struct clk_domain_35_master *pdomain; 1454 int status = 0; 1455 1456 if (BOARDOBJ_GET_TYPE(pargs) != (u8) CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER) { 1457 return -EINVAL; 1458 } 1459 1460 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER); 1461 status = clk_domain_construct_35_prog(g, ppboardobj, size, pargs); 1462 if (status != 0UL) { 1463 return (u32) -EINVAL; 1464 } 1465 1466 pdomain = (struct clk_domain_35_master *)(void*) *ppboardobj; 1467 1468 pdomain->super.super.super.super.super.pmudatainit = 1469 clk_domain_pmudatainit_35_master; 1470 pdomain->super.super.super.super.clkdomainclkproglink = 1471 clkdomainclkproglink_3x_master; 1472 1473 pdomain->master.slave_idxs_mask = 0; 1474 1475 return status; 1476} 1477 1478static int clk_domain_construct_3x_master(struct gk20a *g, 1479 struct boardobj **ppboardobj, 1480 u16 size, void *pargs) 1481{ 1482 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1483 struct clk_domain_3x_master *pdomain; 1484 int status = 0; 1485 1486 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER) { 1487 return -EINVAL; 1488 } 1489 1490 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER); 1491 status = clk_domain_construct_3x_prog(g, ppboardobj, size, pargs); 1492 if (status) { 1493 return -EINVAL; 1494 } 1495 1496 pdomain = (struct clk_domain_3x_master *)*ppboardobj; 1497 1498 pdomain->super.super.super.super.pmudatainit = 1499 _clk_domain_pmudatainit_3x_master; 1500 pdomain->super.super.super.clkdomainclkproglink = 1501 clkdomainclkproglink_3x_master; 1502 1503 pdomain->slave_idxs_mask = 0; 1504 1505 return status; 1506} 1507 1508static int clkdomainclkproglink_fixed(struct gk20a *g, 1509 struct clk_pmupstate *pclk, 1510 struct clk_domain *pdomain) 1511{ 1512 nvgpu_log_info(g, " "); 1513 return 0; 1514} 1515 1516static int _clk_domain_pmudatainit_3x_fixed(struct gk20a *g, 1517 struct boardobj *board_obj_ptr, 1518 struct nv_pmu_boardobj *ppmudata) 1519{ 1520 int status = 0; 1521 struct clk_domain_3x_fixed *pclk_domain_3x_fixed; 1522 struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *pset; 1523 1524 nvgpu_log_info(g, " "); 1525 1526 status = _clk_domain_pmudatainit_3x(g, board_obj_ptr, ppmudata); 1527 if (status != 0) { 1528 return status; 1529 } 1530 1531 pclk_domain_3x_fixed = (struct clk_domain_3x_fixed *)board_obj_ptr; 1532 1533 pset = (struct nv_pmu_clk_clk_domain_3x_fixed_boardobj_set *) 1534 ppmudata; 1535 1536 pset->freq_mhz = pclk_domain_3x_fixed->freq_mhz; 1537 1538 return status; 1539} 1540 1541static int clk_domain_construct_3x_fixed(struct gk20a *g, 1542 struct boardobj **ppboardobj, 1543 u16 size, void *pargs) 1544{ 1545 struct boardobj *ptmpobj = (struct boardobj *)pargs; 1546 struct clk_domain_3x_fixed *pdomain; 1547 struct clk_domain_3x_fixed *ptmpdomain = 1548 (struct clk_domain_3x_fixed *)pargs; 1549 int status = 0; 1550 1551 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED) { 1552 return -EINVAL; 1553 } 1554 1555 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED); 1556 status = clk_domain_construct_3x(g, ppboardobj, size, pargs); 1557 if (status) { 1558 return -EINVAL; 1559 } 1560 1561 pdomain = (struct clk_domain_3x_fixed *)*ppboardobj; 1562 1563 pdomain->super.super.super.pmudatainit = 1564 _clk_domain_pmudatainit_3x_fixed; 1565 1566 pdomain->super.super.clkdomainclkproglink = 1567 clkdomainclkproglink_fixed; 1568 1569 pdomain->freq_mhz = ptmpdomain->freq_mhz; 1570 1571 return status; 1572} 1573 1574static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs) 1575{ 1576 struct boardobj *board_obj_ptr = NULL; 1577 u32 status; 1578 1579 nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs)); 1580 switch (BOARDOBJ_GET_TYPE(pargs)) { 1581 case CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED: 1582 status = clk_domain_construct_3x_fixed(g, &board_obj_ptr, 1583 sizeof(struct clk_domain_3x_fixed), pargs); 1584 break; 1585 1586 case CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER: 1587 status = clk_domain_construct_35_master(g, &board_obj_ptr, 1588 sizeof(struct clk_domain_35_master), pargs); 1589 break; 1590 1591 1592 case CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER: 1593 status = clk_domain_construct_3x_master(g, &board_obj_ptr, 1594 sizeof(struct clk_domain_3x_master), pargs); 1595 break; 1596 1597 case CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE: 1598 status = clk_domain_construct_35_slave(g, &board_obj_ptr, 1599 sizeof(struct clk_domain_35_slave), pargs); 1600 break; 1601 1602 case CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE: 1603 status = clk_domain_construct_3x_slave(g, &board_obj_ptr, 1604 sizeof(struct clk_domain_3x_slave), pargs); 1605 break; 1606 1607 default: 1608 return NULL; 1609 } 1610 1611 if (status) { 1612 return NULL; 1613 } 1614 1615 nvgpu_log_info(g, " Done"); 1616 1617 return (struct clk_domain *)board_obj_ptr; 1618} 1619 1620static int clk_domain_pmudatainit_super(struct gk20a *g, 1621 struct boardobj *board_obj_ptr, 1622 struct nv_pmu_boardobj *ppmudata) 1623{ 1624 int status = 0; 1625 struct clk_domain *pclk_domain; 1626 struct nv_pmu_clk_clk_domain_boardobj_set *pset; 1627 1628 nvgpu_log_info(g, " "); 1629 1630 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 1631 if (status != 0) { 1632 return status; 1633 } 1634 1635 pclk_domain = (struct clk_domain *)board_obj_ptr; 1636 1637 pset = (struct nv_pmu_clk_clk_domain_boardobj_set *)ppmudata; 1638 1639 pset->domain = pclk_domain->domain; 1640 pset->api_domain = pclk_domain->api_domain; 1641 pset->perf_domain_grp_idx = pclk_domain->perf_domain_grp_idx; 1642 1643 return status; 1644} 1645 1646int clk_domain_clk_prog_link(struct gk20a *g, struct clk_pmupstate *pclk) 1647{ 1648 int status = 0; 1649 struct clk_domain *pdomain; 1650 u8 i; 1651 1652 /* Iterate over all CLK_DOMAINs and flatten their VF curves.*/ 1653 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), 1654 struct clk_domain *, pdomain, i) { 1655 status = pdomain->clkdomainclkproglink(g, pclk, pdomain); 1656 if (status) { 1657 nvgpu_err(g, 1658 "error flattening VF for CLK DOMAIN - 0x%x", 1659 pdomain->domain); 1660 goto done; 1661 } 1662 } 1663 1664done: 1665 return status; 1666}
diff --git a/include/clk/clk_domain.h b/include/clk/clk_domain.h
deleted file mode 100644
index e5a7153..0000000
--- a/include/clk/clk_domain.h
+++ /dev/null
@@ -1,157 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_DOMAIN_H 24#define NVGPU_CLK_DOMAIN_H 25 26#include "ctrl/ctrlclk.h" 27#include "ctrl/ctrlboardobj.h" 28#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 29#include "boardobj/boardobjgrp_e32.h" 30#include "boardobj/boardobjgrpmask.h" 31 32#define CLK_DOMAIN_BOARDOBJGRP_VERSION 0x30 33#define CLK_TABLE_HAL_ENTRY_GP 0x02 34#define CLK_TABLE_HAL_ENTRY_GV 0x03 35 36struct clk_domains; 37struct clk_domain; 38enum nv_pmu_clk_clkwhich; 39 40/*data and function definition to talk to driver*/ 41int clk_domain_sw_setup(struct gk20a *g); 42int clk_domain_pmu_setup(struct gk20a *g); 43 44typedef int clkproglink(struct gk20a *g, struct clk_pmupstate *pclk, 45 struct clk_domain *pdomain); 46 47typedef int clkvfsearch(struct gk20a *g, struct clk_pmupstate *pclk, 48 struct clk_domain *pdomain, u16 *clkmhz, 49 u32 *voltuv, u8 rail); 50 51typedef int clkgetslaveclk(struct gk20a *g, struct clk_pmupstate *pclk, 52 struct clk_domain *pdomain, u16 *clkmhz, 53 u16 masterclkmhz); 54 55typedef u32 clkgetfpoints(struct gk20a *g, struct clk_pmupstate *pclk, 56 struct clk_domain *pdomain, u32 *pfpointscount, 57 u16 *pfreqpointsinmhz, u8 rail); 58 59struct clk_domains { 60 struct boardobjgrp_e32 super; 61 u8 n_num_entries; 62 u8 version; 63 bool b_enforce_vf_monotonicity; 64 bool b_enforce_vf_smoothening; 65 bool b_override_o_v_o_c; 66 bool b_debug_mode; 67 u32 vbios_domains; 68 u16 cntr_sampling_periodms; 69 struct boardobjgrpmask_e32 prog_domains_mask; 70 struct boardobjgrpmask_e32 master_domains_mask; 71 struct ctrl_clk_clk_delta deltas; 72 73 struct clk_domain *ordered_noise_aware_list[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; 74 75 struct clk_domain *ordered_noise_unaware_list[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; 76}; 77 78struct clk_domain { 79 struct boardobj super; 80 u32 api_domain; 81 u32 part_mask; 82 enum nv_pmu_clk_clkwhich domain; 83 u8 perf_domain_index; 84 u8 perf_domain_grp_idx; 85 u8 ratio_domain; 86 u8 usage; 87 clkproglink *clkdomainclkproglink; 88 clkvfsearch *clkdomainclkvfsearch; 89 clkgetfpoints *clkdomainclkgetfpoints; 90}; 91 92struct clk_domain_3x { 93 struct clk_domain super; 94 bool b_noise_aware_capable; 95}; 96 97struct clk_domain_3x_fixed { 98 struct clk_domain_3x super; 99 u16 freq_mhz; 100}; 101 102struct clk_domain_3x_prog { 103 struct clk_domain_3x super; 104 u8 clk_prog_idx_first; 105 u8 clk_prog_idx_last; 106 bool b_force_noise_unaware_ordering; 107 struct ctrl_clk_freq_delta factory_delta; 108 short freq_delta_min_mhz; 109 short freq_delta_max_mhz; 110 struct ctrl_clk_clk_delta deltas; 111 u8 noise_unaware_ordering_index; 112 u8 noise_aware_ordering_index; 113}; 114 115struct clk_domain_35_prog { 116 struct clk_domain_3x_prog super; 117 u8 pre_volt_ordering_index; 118 u8 post_volt_ordering_index; 119 u8 clk_pos; 120 u8 clk_vf_curve_count; 121}; 122 123struct clk_domain_3x_master { 124 struct clk_domain_3x_prog super; 125 u32 slave_idxs_mask; 126}; 127 128struct clk_domain_35_master { 129 struct clk_domain_35_prog super; 130 struct clk_domain_3x_master master; 131 struct boardobjgrpmask_e32 master_slave_domains_grp_mask; 132}; 133 134struct clk_domain_3x_slave { 135 struct clk_domain_3x_prog super; 136 u8 master_idx; 137 clkgetslaveclk *clkdomainclkgetslaveclk; 138}; 139 140struct clk_domain_30_slave { 141 u8 rsvd; 142 u8 master_idx; 143 clkgetslaveclk *clkdomainclkgetslaveclk; 144}; 145 146struct clk_domain_35_slave { 147 struct clk_domain_35_prog super; 148 struct clk_domain_30_slave slave; 149}; 150 151int clk_domain_clk_prog_link(struct gk20a *g, struct clk_pmupstate *pclk); 152 153#define CLK_CLK_DOMAIN_GET(pclk, idx) \ 154 ((struct clk_domain *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ 155 &pclk->clk_domainobjs.super.super, (u8)(idx))) 156 157#endif /* NVGPU_CLK_DOMAIN_H */
diff --git a/include/clk/clk_fll.c b/include/clk/clk_fll.c
deleted file mode 100644
index e67dd35..0000000
--- a/include/clk/clk_fll.c
+++ /dev/null
@@ -1,495 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bios.h> 24#include <nvgpu/gk20a.h> 25 26#include "clk.h" 27#include "clk_fll.h" 28#include "clk_domain.h" 29#include "boardobj/boardobjgrp.h" 30#include "boardobj/boardobjgrp_e32.h" 31#include "ctrl/ctrlclk.h" 32#include "ctrl/ctrlvolt.h" 33 34static int devinit_get_fll_device_table(struct gk20a *g, 35 struct avfsfllobjs *pfllobjs); 36static struct fll_device *construct_fll_device(struct gk20a *g, 37 void *pargs); 38static int fll_device_init_pmudata_super(struct gk20a *g, 39 struct boardobj *board_obj_ptr, 40 struct nv_pmu_boardobj *ppmudata); 41 42static int _clk_fll_devgrp_pmudatainit_super(struct gk20a *g, 43 struct boardobjgrp *pboardobjgrp, 44 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 45{ 46 struct nv_pmu_clk_clk_fll_device_boardobjgrp_set_header *pset = 47 (struct nv_pmu_clk_clk_fll_device_boardobjgrp_set_header *) 48 pboardobjgrppmu; 49 struct avfsfllobjs *pfll_objs = (struct avfsfllobjs *) 50 pboardobjgrp; 51 int status = 0; 52 53 nvgpu_log_info(g, " "); 54 55 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 56 if (status) { 57 nvgpu_err(g, "failed to init fll pmuobjgrp"); 58 return status; 59 } 60 pset->lut_num_entries = pfll_objs->lut_num_entries; 61 pset->lut_step_size_uv = pfll_objs->lut_step_size_uv; 62 pset->lut_min_voltage_uv = pfll_objs->lut_min_voltage_uv; 63 pset->max_min_freq_mhz = pfll_objs->max_min_freq_mhz; 64 65 status = boardobjgrpmask_export( 66 &pfll_objs->lut_prog_master_mask.super, 67 pfll_objs->lut_prog_master_mask.super.bitcount, 68 &pset->lut_prog_master_mask.super); 69 70 nvgpu_log_info(g, " Done"); 71 return status; 72} 73 74static int _clk_fll_devgrp_pmudata_instget(struct gk20a *g, 75 struct nv_pmu_boardobjgrp *pmuboardobjgrp, 76 struct nv_pmu_boardobj **ppboardobjpmudata, 77 u8 idx) 78{ 79 struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *pgrp_set = 80 (struct nv_pmu_clk_clk_fll_device_boardobj_grp_set *) 81 pmuboardobjgrp; 82 83 nvgpu_log_info(g, " "); 84 85 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 86 if (((u32)BIT(idx) & 87 pgrp_set->hdr.data.super.obj_mask.super.data[0]) == 0) { 88 return -EINVAL; 89 } 90 91 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 92 &pgrp_set->objects[idx].data.board_obj; 93 nvgpu_log_info(g, " Done"); 94 return 0; 95} 96 97static int _clk_fll_devgrp_pmustatus_instget(struct gk20a *g, 98 void *pboardobjgrppmu, 99 struct nv_pmu_boardobj_query **ppboardobjpmustatus, 100 u8 idx) 101{ 102 struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status *pgrp_get_status = 103 (struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status *) 104 pboardobjgrppmu; 105 106 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 107 if (((u32)BIT(idx) & 108 pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0) { 109 return -EINVAL; 110 } 111 112 *ppboardobjpmustatus = (struct nv_pmu_boardobj_query *) 113 &pgrp_get_status->objects[idx].data.board_obj; 114 return 0; 115} 116 117int clk_fll_sw_setup(struct gk20a *g) 118{ 119 int status; 120 struct boardobjgrp *pboardobjgrp = NULL; 121 struct avfsfllobjs *pfllobjs; 122 struct fll_device *pfll; 123 struct fll_device *pfll_master; 124 struct fll_device *pfll_local; 125 u8 i; 126 u8 j; 127 128 nvgpu_log_info(g, " "); 129 130 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_fllobjs.super); 131 if (status) { 132 nvgpu_err(g, 133 "error creating boardobjgrp for fll, status - 0x%x", status); 134 goto done; 135 } 136 pfllobjs = &(g->clk_pmu.avfs_fllobjs); 137 pboardobjgrp = &(g->clk_pmu.avfs_fllobjs.super.super); 138 139 BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, FLL_DEVICE); 140 141 status = BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, 142 clk, CLK, clk_fll_device, CLK_FLL_DEVICE); 143 if (status) { 144 nvgpu_err(g, 145 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 146 status); 147 goto done; 148 } 149 150 pboardobjgrp->pmudatainit = _clk_fll_devgrp_pmudatainit_super; 151 pboardobjgrp->pmudatainstget = _clk_fll_devgrp_pmudata_instget; 152 pboardobjgrp->pmustatusinstget = _clk_fll_devgrp_pmustatus_instget; 153 pfllobjs = (struct avfsfllobjs *)pboardobjgrp; 154 pfllobjs->lut_num_entries = g->ops.clk.lut_num_entries; 155 pfllobjs->lut_step_size_uv = CTRL_CLK_VIN_STEP_SIZE_UV; 156 pfllobjs->lut_min_voltage_uv = CTRL_CLK_LUT_MIN_VOLTAGE_UV; 157 158 /* Initialize lut prog master mask to zero.*/ 159 boardobjgrpmask_e32_init(&pfllobjs->lut_prog_master_mask, NULL); 160 161 status = devinit_get_fll_device_table(g, pfllobjs); 162 if (status) { 163 goto done; 164 } 165 166 status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, 167 &g->clk_pmu.avfs_fllobjs.super.super, 168 clk, CLK, clk_fll_device, CLK_FLL_DEVICE); 169 if (status) { 170 nvgpu_err(g, 171 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 172 status); 173 goto done; 174 } 175 176 BOARDOBJGRP_FOR_EACH(&(pfllobjs->super.super), 177 struct fll_device *, pfll, i) { 178 pfll_master = NULL; 179 j = 0; 180 BOARDOBJGRP_ITERATOR(&(pfllobjs->super.super), 181 struct fll_device *, pfll_local, j, 182 &pfllobjs->lut_prog_master_mask.super) { 183 if (pfll_local->clk_domain == pfll->clk_domain) { 184 pfll_master = pfll_local; 185 break; 186 } 187 } 188 189 if (pfll_master == NULL) { 190 status = boardobjgrpmask_bitset( 191 &pfllobjs->lut_prog_master_mask.super, 192 BOARDOBJ_GET_IDX(pfll)); 193 if (status) { 194 nvgpu_err(g, "err setting lutprogmask"); 195 goto done; 196 } 197 pfll_master = pfll; 198 } 199 status = pfll_master->lut_broadcast_slave_register( 200 g, pfllobjs, pfll_master, pfll); 201 202 if (status) { 203 nvgpu_err(g, "err setting lutslavemask"); 204 goto done; 205 } 206 } 207done: 208 nvgpu_log_info(g, " done status %x", status); 209 return status; 210} 211 212int clk_fll_pmu_setup(struct gk20a *g) 213{ 214 int status; 215 struct boardobjgrp *pboardobjgrp = NULL; 216 217 nvgpu_log_info(g, " "); 218 219 pboardobjgrp = &g->clk_pmu.avfs_fllobjs.super.super; 220 221 if (!pboardobjgrp->bconstructed) { 222 return -EINVAL; 223 } 224 225 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 226 227 nvgpu_log_info(g, "Done"); 228 return status; 229} 230 231static int devinit_get_fll_device_table(struct gk20a *g, 232 struct avfsfllobjs *pfllobjs) 233{ 234 int status = 0; 235 u8 *fll_table_ptr = NULL; 236 struct fll_descriptor_header fll_desc_table_header_sz = { 0 }; 237 struct fll_descriptor_header_10 fll_desc_table_header = { 0 }; 238 struct fll_descriptor_entry_10 fll_desc_table_entry = { 0 }; 239 u8 *fll_tbl_entry_ptr = NULL; 240 u32 index = 0; 241 struct fll_device fll_dev_data; 242 struct fll_device *pfll_dev; 243 struct vin_device *pvin_dev; 244 u32 desctablesize; 245 u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP; 246 struct avfsvinobjs *pvinobjs = &g->clk_pmu.avfs_vinobjs; 247 248 nvgpu_log_info(g, " "); 249 250 fll_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 251 g->bios.clock_token, FLL_TABLE); 252 if (fll_table_ptr == NULL) { 253 status = -1; 254 goto done; 255 } 256 257 memcpy(&fll_desc_table_header_sz, fll_table_ptr, 258 sizeof(struct fll_descriptor_header)); 259 if (fll_desc_table_header_sz.size >= FLL_DESCRIPTOR_HEADER_10_SIZE_6) { 260 desctablesize = FLL_DESCRIPTOR_HEADER_10_SIZE_6; 261 } else { 262 desctablesize = FLL_DESCRIPTOR_HEADER_10_SIZE_4; 263 } 264 265 memcpy(&fll_desc_table_header, fll_table_ptr, desctablesize); 266 267 if (desctablesize == FLL_DESCRIPTOR_HEADER_10_SIZE_6) { 268 pfllobjs->max_min_freq_mhz = 269 fll_desc_table_header.max_min_freq_mhz; 270 } else { 271 pfllobjs->max_min_freq_mhz = 0; 272 } 273 274 /* Read table entries*/ 275 fll_tbl_entry_ptr = fll_table_ptr + desctablesize; 276 for (index = 0; index < fll_desc_table_header.entry_count; index++) { 277 u32 fll_id; 278 279 memcpy(&fll_desc_table_entry, fll_tbl_entry_ptr, 280 sizeof(struct fll_descriptor_entry_10)); 281 282 if (fll_desc_table_entry.fll_device_type == CTRL_CLK_FLL_TYPE_DISABLED) { 283 continue; 284 } 285 286 fll_id = fll_desc_table_entry.fll_device_id; 287 288 if ( (u8)fll_desc_table_entry.vin_idx_logic != CTRL_CLK_VIN_ID_UNDEFINED) { 289 pvin_dev = CLK_GET_VIN_DEVICE(pvinobjs, 290 (u8)fll_desc_table_entry.vin_idx_logic); 291 if (pvin_dev == NULL) { 292 return -EINVAL; 293 } else { 294 pvin_dev->flls_shared_mask |= BIT(fll_id); 295 } 296 } else { 297 /* Return if Logic ADC device index is invalid*/ 298 nvgpu_err(g, "Invalid Logic ADC specified for Nafll ID"); 299 return -EINVAL; 300 } 301 302 fll_dev_data.lut_device.vselect_mode = 303 (u8)BIOS_GET_FIELD(fll_desc_table_entry.lut_params, 304 NV_FLL_DESC_LUT_PARAMS_VSELECT); 305 306 if ( (u8)fll_desc_table_entry.vin_idx_sram != CTRL_CLK_VIN_ID_UNDEFINED) { 307 pvin_dev = CLK_GET_VIN_DEVICE(pvinobjs, 308 (u8)fll_desc_table_entry.vin_idx_sram); 309 if (pvin_dev == NULL) { 310 return -EINVAL; 311 } else { 312 pvin_dev->flls_shared_mask |= BIT(fll_id); 313 } 314 } else { 315 /* Make sure VSELECT mode is set correctly to _LOGIC*/ 316 if (fll_dev_data.lut_device.vselect_mode != CTRL_CLK_FLL_LUT_VSELECT_LOGIC) { 317 return -EINVAL; 318 } 319 } 320 321 fll_dev_data.super.type = 322 (u8)fll_desc_table_entry.fll_device_type; 323 fll_dev_data.id = (u8)fll_desc_table_entry.fll_device_id; 324 fll_dev_data.mdiv = (u8)BIOS_GET_FIELD( 325 fll_desc_table_entry.fll_params, 326 NV_FLL_DESC_FLL_PARAMS_MDIV); 327 fll_dev_data.input_freq_mhz = 328 (u16)fll_desc_table_entry.ref_freq_mhz; 329 fll_dev_data.min_freq_vfe_idx = 330 (u8)fll_desc_table_entry.min_freq_vfe_idx; 331 fll_dev_data.freq_ctrl_idx = CTRL_BOARDOBJ_IDX_INVALID; 332 333 vbios_domain = (u32)(fll_desc_table_entry.clk_domain & 334 NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK); 335 fll_dev_data.clk_domain = 336 g->ops.pmu_ver.clk.get_vbios_clk_domain(vbios_domain); 337 338 fll_dev_data.rail_idx_for_lut = 0; 339 fll_dev_data.vin_idx_logic = 340 (u8)fll_desc_table_entry.vin_idx_logic; 341 fll_dev_data.vin_idx_sram = 342 (u8)fll_desc_table_entry.vin_idx_sram; 343 fll_dev_data.b_skip_pldiv_below_dvco_min = 344 (bool)BIOS_GET_FIELD(fll_desc_table_entry.fll_params, 345 NV_FLL_DESC_FLL_PARAMS_SKIP_PLDIV_BELOW_DVCO_MIN); 346 fll_dev_data.lut_device.hysteresis_threshold = 347 (u8)BIOS_GET_FIELD(fll_desc_table_entry.lut_params, 348 NV_FLL_DESC_LUT_PARAMS_HYSTERISIS_THRESHOLD); 349 fll_dev_data.regime_desc.regime_id = 350 CTRL_CLK_FLL_REGIME_ID_FFR; 351 fll_dev_data.regime_desc.fixed_freq_regime_limit_mhz = 352 (u16)fll_desc_table_entry.ffr_cutoff_freq_mhz; 353 fll_dev_data.regime_desc.target_regime_id_override=0; 354 355 /*construct fll device*/ 356 pfll_dev = construct_fll_device(g, (void *)&fll_dev_data); 357 358 status = boardobjgrp_objinsert(&pfllobjs->super.super, 359 (struct boardobj *)pfll_dev, index); 360 fll_tbl_entry_ptr += fll_desc_table_header.entry_size; 361 } 362 363done: 364 nvgpu_log_info(g, " done status %x", status); 365 return status; 366} 367 368u32 nvgpu_clk_get_vbios_clk_domain_gv10x( u32 vbios_domain) 369{ 370 if (vbios_domain == 0) { 371 return CTRL_CLK_DOMAIN_GPCCLK; 372 } else if (vbios_domain == 1) { 373 return CTRL_CLK_DOMAIN_XBARCLK; 374 } else if (vbios_domain == 3) { 375 return CTRL_CLK_DOMAIN_SYSCLK; 376 } else if (vbios_domain == 5) { 377 return CTRL_CLK_DOMAIN_NVDCLK; 378 } 379 return 0; 380} 381 382u32 nvgpu_clk_get_vbios_clk_domain_gp10x( u32 vbios_domain) 383{ 384 if (vbios_domain == 0) { 385 return CTRL_CLK_DOMAIN_GPC2CLK; 386 } else if (vbios_domain == 1) { 387 return CTRL_CLK_DOMAIN_XBAR2CLK; 388 } else if (vbios_domain == 3) { 389 return CTRL_CLK_DOMAIN_SYS2CLK; 390 } 391 return 0; 392} 393 394static u32 lutbroadcastslaveregister(struct gk20a *g, 395 struct avfsfllobjs *pfllobjs, 396 struct fll_device *pfll, 397 struct fll_device *pfll_slave) 398{ 399 if (pfll->clk_domain != pfll_slave->clk_domain) { 400 return -EINVAL; 401 } 402 403 return boardobjgrpmask_bitset(&pfll-> 404 lut_prog_broadcast_slave_mask.super, 405 BOARDOBJ_GET_IDX(pfll_slave)); 406} 407 408static struct fll_device *construct_fll_device(struct gk20a *g, 409 void *pargs) 410{ 411 struct boardobj *board_obj_ptr = NULL; 412 struct fll_device *pfll_dev; 413 struct fll_device *board_obj_fll_ptr = NULL; 414 int status; 415 416 nvgpu_log_info(g, " "); 417 status = boardobj_construct_super(g, &board_obj_ptr, 418 sizeof(struct fll_device), pargs); 419 if (status) { 420 return NULL; 421 } 422 423 pfll_dev = (struct fll_device *)pargs; 424 board_obj_fll_ptr = (struct fll_device *)board_obj_ptr; 425 board_obj_ptr->pmudatainit = fll_device_init_pmudata_super; 426 board_obj_fll_ptr->lut_broadcast_slave_register = 427 lutbroadcastslaveregister; 428 board_obj_fll_ptr->id = pfll_dev->id; 429 board_obj_fll_ptr->mdiv = pfll_dev->mdiv; 430 board_obj_fll_ptr->rail_idx_for_lut = pfll_dev->rail_idx_for_lut; 431 board_obj_fll_ptr->input_freq_mhz = pfll_dev->input_freq_mhz; 432 board_obj_fll_ptr->clk_domain = pfll_dev->clk_domain; 433 board_obj_fll_ptr->vin_idx_logic = pfll_dev->vin_idx_logic; 434 board_obj_fll_ptr->vin_idx_sram = pfll_dev->vin_idx_sram; 435 board_obj_fll_ptr->min_freq_vfe_idx = 436 pfll_dev->min_freq_vfe_idx; 437 board_obj_fll_ptr->freq_ctrl_idx = pfll_dev->freq_ctrl_idx; 438 board_obj_fll_ptr->b_skip_pldiv_below_dvco_min = 439 pfll_dev->b_skip_pldiv_below_dvco_min; 440 memcpy(&board_obj_fll_ptr->lut_device, &pfll_dev->lut_device, 441 sizeof(struct nv_pmu_clk_lut_device_desc)); 442 memcpy(&board_obj_fll_ptr->regime_desc, &pfll_dev->regime_desc, 443 sizeof(struct nv_pmu_clk_regime_desc)); 444 boardobjgrpmask_e32_init( 445 &board_obj_fll_ptr->lut_prog_broadcast_slave_mask, NULL); 446 447 nvgpu_log_info(g, " Done"); 448 449 return (struct fll_device *)board_obj_ptr; 450} 451 452static int fll_device_init_pmudata_super(struct gk20a *g, 453 struct boardobj *board_obj_ptr, 454 struct nv_pmu_boardobj *ppmudata) 455{ 456 int status = 0; 457 struct fll_device *pfll_dev; 458 struct nv_pmu_clk_clk_fll_device_boardobj_set *perf_pmu_data; 459 460 nvgpu_log_info(g, " "); 461 462 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 463 if (status != 0) { 464 return status; 465 } 466 467 pfll_dev = (struct fll_device *)board_obj_ptr; 468 perf_pmu_data = (struct nv_pmu_clk_clk_fll_device_boardobj_set *) 469 ppmudata; 470 471 perf_pmu_data->id = pfll_dev->id; 472 perf_pmu_data->mdiv = pfll_dev->mdiv; 473 perf_pmu_data->rail_idx_for_lut = pfll_dev->rail_idx_for_lut; 474 perf_pmu_data->input_freq_mhz = pfll_dev->input_freq_mhz; 475 perf_pmu_data->vin_idx_logic = pfll_dev->vin_idx_logic; 476 perf_pmu_data->vin_idx_sram = pfll_dev->vin_idx_sram; 477 perf_pmu_data->clk_domain = pfll_dev->clk_domain; 478 perf_pmu_data->min_freq_vfe_idx = 479 pfll_dev->min_freq_vfe_idx; 480 perf_pmu_data->freq_ctrl_idx = pfll_dev->freq_ctrl_idx; 481 perf_pmu_data->b_skip_pldiv_below_dvco_min = pfll_dev->b_skip_pldiv_below_dvco_min; 482 memcpy(&perf_pmu_data->lut_device, &pfll_dev->lut_device, 483 sizeof(struct nv_pmu_clk_lut_device_desc)); 484 memcpy(&perf_pmu_data->regime_desc, &pfll_dev->regime_desc, 485 sizeof(struct nv_pmu_clk_regime_desc)); 486 487 status = boardobjgrpmask_export( 488 &pfll_dev->lut_prog_broadcast_slave_mask.super, 489 pfll_dev->lut_prog_broadcast_slave_mask.super.bitcount, 490 &perf_pmu_data->lut_prog_broadcast_slave_mask.super); 491 492 nvgpu_log_info(g, " Done"); 493 494 return status; 495}
diff --git a/include/clk/clk_fll.h b/include/clk/clk_fll.h
deleted file mode 100644
index 6cbdfe2..0000000
--- a/include/clk/clk_fll.h
+++ /dev/null
@@ -1,81 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_FLL_H 24#define NVGPU_CLK_FLL_H 25 26#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 27#include "boardobj/boardobjgrp_e32.h" 28#include "boardobj/boardobjgrpmask.h" 29 30/*data and function definition to talk to driver*/ 31int clk_fll_sw_setup(struct gk20a *g); 32int clk_fll_pmu_setup(struct gk20a *g); 33 34struct avfsfllobjs { 35 struct boardobjgrp_e32 super; 36 struct boardobjgrpmask_e32 lut_prog_master_mask; 37 u32 lut_step_size_uv; 38 u32 lut_min_voltage_uv; 39 u8 lut_num_entries; 40 u16 max_min_freq_mhz; 41}; 42 43struct fll_device; 44 45typedef u32 fll_lut_broadcast_slave_register(struct gk20a *g, 46 struct avfsfllobjs *pfllobjs, 47 struct fll_device *pfll, 48 struct fll_device *pfll_slave); 49 50struct fll_device { 51 struct boardobj super; 52 u8 id; 53 u8 mdiv; 54 u16 input_freq_mhz; 55 u32 clk_domain; 56 u8 vin_idx_logic; 57 u8 vin_idx_sram; 58 u8 rail_idx_for_lut; 59 struct nv_pmu_clk_lut_device_desc lut_device; 60 struct nv_pmu_clk_regime_desc regime_desc; 61 u8 min_freq_vfe_idx; 62 u8 freq_ctrl_idx; 63 u8 target_regime_id_override; 64 bool b_skip_pldiv_below_dvco_min; 65 bool b_dvco_1x; 66 struct boardobjgrpmask_e32 lut_prog_broadcast_slave_mask; 67 fll_lut_broadcast_slave_register *lut_broadcast_slave_register; 68}; 69 70u32 nvgpu_clk_get_vbios_clk_domain_gv10x( u32 vbios_domain); 71u32 nvgpu_clk_get_vbios_clk_domain_gp10x( u32 vbios_domain); 72 73#define CLK_FLL_LUT_VF_NUM_ENTRIES(pclk) \ 74 (pclk->avfs_fllobjs.lut_num_entries) 75 76#define CLK_FLL_LUT_MIN_VOLTAGE_UV(pclk) \ 77 (pclk->avfs_fllobjs.lut_min_voltage_uv) 78#define CLK_FLL_LUT_STEP_SIZE_UV(pclk) \ 79 (pclk->avfs_fllobjs.lut_step_size_uv) 80 81#endif /* NVGPU_CLK_FLL_H */
diff --git a/include/clk/clk_freq_controller.c b/include/clk/clk_freq_controller.c
deleted file mode 100644
index f4d09b0..0000000
--- a/include/clk/clk_freq_controller.c
+++ /dev/null
@@ -1,462 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bios.h> 24#include <nvgpu/gk20a.h> 25 26#include "clk.h" 27#include "clk_fll.h" 28#include "clk_domain.h" 29#include "clk_freq_controller.h" 30#include "boardobj/boardobjgrp.h" 31#include "boardobj/boardobjgrp_e32.h" 32#include "ctrl/ctrlclk.h" 33#include "ctrl/ctrlvolt.h" 34 35static int clk_freq_controller_pmudatainit_super(struct gk20a *g, 36 struct boardobj *board_obj_ptr, 37 struct nv_pmu_boardobj *ppmudata) 38{ 39 struct nv_pmu_clk_clk_freq_controller_boardobj_set *pfreq_cntlr_set; 40 struct clk_freq_controller *pfreq_cntlr; 41 int status = 0; 42 43 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 44 if (status) { 45 return status; 46 } 47 48 pfreq_cntlr_set = 49 (struct nv_pmu_clk_clk_freq_controller_boardobj_set *)ppmudata; 50 pfreq_cntlr = (struct clk_freq_controller *)board_obj_ptr; 51 52 pfreq_cntlr_set->controller_id = pfreq_cntlr->controller_id; 53 pfreq_cntlr_set->clk_domain = pfreq_cntlr->clk_domain; 54 pfreq_cntlr_set->parts_freq_mode = pfreq_cntlr->parts_freq_mode; 55 pfreq_cntlr_set->bdisable = pfreq_cntlr->bdisable; 56 pfreq_cntlr_set->freq_cap_noise_unaware_vmin_above = 57 pfreq_cntlr->freq_cap_noise_unaware_vmin_above; 58 pfreq_cntlr_set->freq_cap_noise_unaware_vmin_below = 59 pfreq_cntlr->freq_cap_noise_unaware_vmin_below; 60 pfreq_cntlr_set->freq_hyst_pos_mhz = pfreq_cntlr->freq_hyst_pos_mhz; 61 pfreq_cntlr_set->freq_hyst_neg_mhz = pfreq_cntlr->freq_hyst_neg_mhz; 62 63 return status; 64} 65 66static int clk_freq_controller_pmudatainit_pi(struct gk20a *g, 67 struct boardobj *board_obj_ptr, 68 struct nv_pmu_boardobj *ppmudata) 69{ 70 struct nv_pmu_clk_clk_freq_controller_pi_boardobj_set 71 *pfreq_cntlr_pi_set; 72 struct clk_freq_controller_pi *pfreq_cntlr_pi; 73 int status = 0; 74 75 status = clk_freq_controller_pmudatainit_super(g, 76 board_obj_ptr, ppmudata); 77 if (status) { 78 return -1; 79 } 80 81 pfreq_cntlr_pi_set = 82 (struct nv_pmu_clk_clk_freq_controller_pi_boardobj_set *) 83 ppmudata; 84 pfreq_cntlr_pi = (struct clk_freq_controller_pi *)board_obj_ptr; 85 86 pfreq_cntlr_pi_set->prop_gain = pfreq_cntlr_pi->prop_gain; 87 pfreq_cntlr_pi_set->integ_gain = pfreq_cntlr_pi->integ_gain; 88 pfreq_cntlr_pi_set->integ_decay = pfreq_cntlr_pi->integ_decay; 89 pfreq_cntlr_pi_set->volt_delta_min = pfreq_cntlr_pi->volt_delta_min; 90 pfreq_cntlr_pi_set->volt_delta_max = pfreq_cntlr_pi->volt_delta_max; 91 pfreq_cntlr_pi_set->slowdown_pct_min = pfreq_cntlr_pi->slowdown_pct_min; 92 pfreq_cntlr_pi_set->bpoison = pfreq_cntlr_pi->bpoison; 93 94 return status; 95} 96 97static int clk_freq_controller_construct_super(struct gk20a *g, 98 struct boardobj **ppboardobj, 99 u16 size, void *pargs) 100{ 101 struct clk_freq_controller *pfreq_cntlr = NULL; 102 struct clk_freq_controller *pfreq_cntlr_tmp = NULL; 103 int status = 0; 104 105 status = boardobj_construct_super(g, ppboardobj, size, pargs); 106 if (status) { 107 return -EINVAL; 108 } 109 110 pfreq_cntlr_tmp = (struct clk_freq_controller *)pargs; 111 pfreq_cntlr = (struct clk_freq_controller *)*ppboardobj; 112 113 pfreq_cntlr->super.pmudatainit = clk_freq_controller_pmudatainit_super; 114 115 pfreq_cntlr->controller_id = pfreq_cntlr_tmp->controller_id; 116 pfreq_cntlr->clk_domain = pfreq_cntlr_tmp->clk_domain; 117 pfreq_cntlr->parts_freq_mode = pfreq_cntlr_tmp->parts_freq_mode; 118 pfreq_cntlr->freq_cap_noise_unaware_vmin_above = 119 pfreq_cntlr_tmp->freq_cap_noise_unaware_vmin_above; 120 pfreq_cntlr->freq_cap_noise_unaware_vmin_below = 121 pfreq_cntlr_tmp->freq_cap_noise_unaware_vmin_below; 122 pfreq_cntlr->freq_hyst_pos_mhz = pfreq_cntlr_tmp->freq_hyst_pos_mhz; 123 pfreq_cntlr->freq_hyst_neg_mhz = pfreq_cntlr_tmp->freq_hyst_neg_mhz; 124 125 return status; 126} 127 128static int clk_freq_controller_construct_pi(struct gk20a *g, 129 struct boardobj **ppboardobj, 130 u16 size, void *pargs) 131{ 132 struct clk_freq_controller_pi *pfreq_cntlr_pi = NULL; 133 struct clk_freq_controller_pi *pfreq_cntlr_pi_tmp = NULL; 134 int status = 0; 135 136 status = clk_freq_controller_construct_super(g, ppboardobj, 137 size, pargs); 138 if (status) { 139 return -EINVAL; 140 } 141 142 pfreq_cntlr_pi = (struct clk_freq_controller_pi *)*ppboardobj; 143 pfreq_cntlr_pi_tmp = (struct clk_freq_controller_pi *)pargs; 144 145 pfreq_cntlr_pi->super.super.pmudatainit = 146 clk_freq_controller_pmudatainit_pi; 147 148 pfreq_cntlr_pi->prop_gain = pfreq_cntlr_pi_tmp->prop_gain; 149 pfreq_cntlr_pi->integ_gain = pfreq_cntlr_pi_tmp->integ_gain; 150 pfreq_cntlr_pi->integ_decay = pfreq_cntlr_pi_tmp->integ_decay; 151 pfreq_cntlr_pi->volt_delta_min = pfreq_cntlr_pi_tmp->volt_delta_min; 152 pfreq_cntlr_pi->volt_delta_max = pfreq_cntlr_pi_tmp->volt_delta_max; 153 pfreq_cntlr_pi->slowdown_pct_min = pfreq_cntlr_pi_tmp->slowdown_pct_min; 154 pfreq_cntlr_pi->bpoison = pfreq_cntlr_pi_tmp->bpoison; 155 156 return status; 157} 158 159static struct clk_freq_controller *clk_clk_freq_controller_construct( 160 struct gk20a *g, 161 void *pargs) 162{ 163 struct boardobj *board_obj_ptr = NULL; 164 int status = 0; 165 166 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_FREQ_CONTROLLER_TYPE_PI) { 167 return NULL; 168 } 169 170 status = clk_freq_controller_construct_pi(g, &board_obj_ptr, 171 sizeof(struct clk_freq_controller_pi), pargs); 172 if (status) { 173 return NULL; 174 } 175 176 return (struct clk_freq_controller *)board_obj_ptr; 177} 178 179 180static int clk_get_freq_controller_table(struct gk20a *g, 181 struct clk_freq_controllers *pclk_freq_controllers) 182{ 183 int status = 0; 184 u8 *pfreq_controller_table_ptr = NULL; 185 struct vbios_fct_1x_header header = { 0 }; 186 struct vbios_fct_1x_entry entry = { 0 }; 187 u8 entry_idx; 188 u8 *entry_offset; 189 struct clk_freq_controller *pclk_freq_cntr = NULL; 190 struct clk_freq_controller *ptmp_freq_cntr = NULL; 191 struct clk_freq_controller_pi *ptmp_freq_cntr_pi = NULL; 192 struct clk_domain *pclk_domain; 193 194 struct freq_controller_data_type { 195 union { 196 struct boardobj board_obj; 197 struct clk_freq_controller freq_controller; 198 struct clk_freq_controller_pi freq_controller_pi; 199 }; 200 } freq_controller_data; 201 202 pfreq_controller_table_ptr = 203 (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 204 g->bios.clock_token, 205 FREQUENCY_CONTROLLER_TABLE); 206 if (pfreq_controller_table_ptr == NULL) { 207 status = -EINVAL; 208 goto done; 209 } 210 211 memcpy(&header, pfreq_controller_table_ptr, 212 sizeof(struct vbios_fct_1x_header)); 213 214 pclk_freq_controllers->sampling_period_ms = header.sampling_period_ms; 215 pclk_freq_controllers->volt_policy_idx = 0; 216 217 /* Read in the entries. */ 218 for (entry_idx = 0; entry_idx < header.entry_count; entry_idx++) { 219 entry_offset = (pfreq_controller_table_ptr + 220 header.header_size + (entry_idx * header.entry_size)); 221 222 memset(&freq_controller_data, 0x0, 223 sizeof(struct freq_controller_data_type)); 224 ptmp_freq_cntr = &freq_controller_data.freq_controller; 225 ptmp_freq_cntr_pi = &freq_controller_data.freq_controller_pi; 226 227 memcpy(&entry, entry_offset, 228 sizeof(struct vbios_fct_1x_entry)); 229 230 if (!BIOS_GET_FIELD(entry.flags0, 231 NV_VBIOS_FCT_1X_ENTRY_FLAGS0_TYPE)) { 232 continue; 233 } 234 235 freq_controller_data.board_obj.type = (u8)BIOS_GET_FIELD( 236 entry.flags0, NV_VBIOS_FCT_1X_ENTRY_FLAGS0_TYPE); 237 238 ptmp_freq_cntr->controller_id = 239 (u8)BIOS_GET_FIELD(entry.param0, 240 NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID); 241 242 pclk_domain = CLK_CLK_DOMAIN_GET((&g->clk_pmu), 243 (u32)entry.clk_domain_idx); 244 freq_controller_data.freq_controller.clk_domain = 245 pclk_domain->api_domain; 246 247 ptmp_freq_cntr->parts_freq_mode = 248 (u8)BIOS_GET_FIELD(entry.param0, 249 NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE); 250 251 /* Populate PI specific data */ 252 ptmp_freq_cntr_pi->slowdown_pct_min = 253 (u8)BIOS_GET_FIELD(entry.param1, 254 NV_VBIOS_FCT_1X_ENTRY_PARAM1_SLOWDOWN_PCT_MIN); 255 256 ptmp_freq_cntr_pi->bpoison = 257 BIOS_GET_FIELD(entry.param1, 258 NV_VBIOS_FCT_1X_ENTRY_PARAM1_POISON); 259 260 ptmp_freq_cntr_pi->prop_gain = 261 (s32)BIOS_GET_FIELD(entry.param2, 262 NV_VBIOS_FCT_1X_ENTRY_PARAM2_PROP_GAIN); 263 264 ptmp_freq_cntr_pi->integ_gain = 265 (s32)BIOS_GET_FIELD(entry.param3, 266 NV_VBIOS_FCT_1X_ENTRY_PARAM3_INTEG_GAIN); 267 268 ptmp_freq_cntr_pi->integ_decay = 269 (s32)BIOS_GET_FIELD(entry.param4, 270 NV_VBIOS_FCT_1X_ENTRY_PARAM4_INTEG_DECAY); 271 272 ptmp_freq_cntr_pi->volt_delta_min = 273 (s32)BIOS_GET_FIELD(entry.param5, 274 NV_VBIOS_FCT_1X_ENTRY_PARAM5_VOLT_DELTA_MIN); 275 276 ptmp_freq_cntr_pi->volt_delta_max = 277 (s32)BIOS_GET_FIELD(entry.param6, 278 NV_VBIOS_FCT_1X_ENTRY_PARAM6_VOLT_DELTA_MAX); 279 280 ptmp_freq_cntr->freq_cap_noise_unaware_vmin_above = 281 (s16)BIOS_GET_FIELD(entry.param7, 282 NV_VBIOS_FCT_1X_ENTRY_PARAM7_FREQ_CAP_VF); 283 284 ptmp_freq_cntr->freq_cap_noise_unaware_vmin_below = 285 (s16)BIOS_GET_FIELD(entry.param7, 286 NV_VBIOS_FCT_1X_ENTRY_PARAM7_FREQ_CAP_VMIN); 287 288 ptmp_freq_cntr->freq_hyst_pos_mhz = 289 (s16)BIOS_GET_FIELD(entry.param8, 290 NV_VBIOS_FCT_1X_ENTRY_PARAM8_FREQ_HYST_POS); 291 ptmp_freq_cntr->freq_hyst_neg_mhz = 292 (s16)BIOS_GET_FIELD(entry.param8, 293 NV_VBIOS_FCT_1X_ENTRY_PARAM8_FREQ_HYST_NEG); 294 295 if (ptmp_freq_cntr_pi->volt_delta_max < 296 ptmp_freq_cntr_pi->volt_delta_min) { 297 goto done; 298 } 299 300 pclk_freq_cntr = clk_clk_freq_controller_construct(g, 301 (void *)&freq_controller_data); 302 303 if (pclk_freq_cntr == NULL) { 304 nvgpu_err(g, 305 "unable to construct clock freq cntlr boardobj for %d", 306 entry_idx); 307 status = -EINVAL; 308 goto done; 309 } 310 311 status = boardobjgrp_objinsert( 312 &pclk_freq_controllers->super.super, 313 (struct boardobj *)pclk_freq_cntr, entry_idx); 314 if (status) { 315 nvgpu_err(g, 316 "unable to insert clock freq cntlr boardobj for"); 317 status = -EINVAL; 318 goto done; 319 } 320 321 } 322 323done: 324 return status; 325} 326 327int clk_freq_controller_pmu_setup(struct gk20a *g) 328{ 329 int status; 330 struct boardobjgrp *pboardobjgrp = NULL; 331 332 nvgpu_log_info(g, " "); 333 334 pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super; 335 336 if (!pboardobjgrp->bconstructed) { 337 return -EINVAL; 338 } 339 340 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 341 342 nvgpu_log_info(g, "Done"); 343 return status; 344} 345 346static int _clk_freq_controller_devgrp_pmudata_instget(struct gk20a *g, 347 struct nv_pmu_boardobjgrp *pmuboardobjgrp, 348 struct nv_pmu_boardobj **ppboardobjpmudata, 349 u8 idx) 350{ 351 struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *pgrp_set = 352 (struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set *) 353 pmuboardobjgrp; 354 355 nvgpu_log_info(g, " "); 356 357 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 358 if (((u32)BIT(idx) & 359 pgrp_set->hdr.data.super.obj_mask.super.data[0]) == 0) { 360 return -EINVAL; 361 } 362 363 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 364 &pgrp_set->objects[idx].data.board_obj; 365 nvgpu_log_info(g, " Done"); 366 return 0; 367} 368 369static int _clk_freq_controllers_pmudatainit(struct gk20a *g, 370 struct boardobjgrp *pboardobjgrp, 371 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 372{ 373 struct nv_pmu_clk_clk_freq_controller_boardobjgrp_set_header *pset = 374 (struct nv_pmu_clk_clk_freq_controller_boardobjgrp_set_header *) 375 pboardobjgrppmu; 376 struct clk_freq_controllers *pcntrs = 377 (struct clk_freq_controllers *)pboardobjgrp; 378 int status = 0; 379 380 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 381 if (status) { 382 nvgpu_err(g, 383 "error updating pmu boardobjgrp for clk freq ctrs 0x%x", 384 status); 385 goto done; 386 } 387 pset->sampling_period_ms = pcntrs->sampling_period_ms; 388 pset->volt_policy_idx = pcntrs->volt_policy_idx; 389 390done: 391 return status; 392} 393 394int clk_freq_controller_sw_setup(struct gk20a *g) 395{ 396 int status = 0; 397 struct boardobjgrp *pboardobjgrp = NULL; 398 struct clk_freq_controllers *pclk_freq_controllers; 399 struct avfsfllobjs *pfllobjs = &(g->clk_pmu.avfs_fllobjs); 400 struct fll_device *pfll; 401 struct clk_freq_controller *pclkfreqctrl; 402 u8 i; 403 u8 j; 404 405 nvgpu_log_info(g, " "); 406 407 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers; 408 status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super); 409 if (status) { 410 nvgpu_err(g, 411 "error creating boardobjgrp for clk FCT, status - 0x%x", 412 status); 413 goto done; 414 } 415 416 pboardobjgrp = &g->clk_pmu.clk_freq_controllers.super.super; 417 418 pboardobjgrp->pmudatainit = _clk_freq_controllers_pmudatainit; 419 pboardobjgrp->pmudatainstget = 420 _clk_freq_controller_devgrp_pmudata_instget; 421 pboardobjgrp->pmustatusinstget = NULL; 422 423 /* Initialize mask to zero.*/ 424 boardobjgrpmask_e32_init(&pclk_freq_controllers->freq_ctrl_load_mask, 425 NULL); 426 427 BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_FREQ_CONTROLLER); 428 429 status = BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, 430 clk, CLK, clk_freq_controller, CLK_FREQ_CONTROLLER); 431 if (status) { 432 nvgpu_err(g, 433 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 434 status); 435 goto done; 436 } 437 438 status = clk_get_freq_controller_table(g, pclk_freq_controllers); 439 if (status) { 440 nvgpu_err(g, "error reading freq controller table - 0x%x", 441 status); 442 goto done; 443 } 444 445 BOARDOBJGRP_FOR_EACH(&(pclk_freq_controllers->super.super), 446 struct clk_freq_controller *, pclkfreqctrl, i) { 447 pfll = NULL; 448 j = 0; 449 BOARDOBJGRP_FOR_EACH(&(pfllobjs->super.super), 450 struct fll_device *, pfll, j) { 451 if (pclkfreqctrl->controller_id == pfll->id) { 452 pfll->freq_ctrl_idx = i; 453 break; 454 } 455 } 456 boardobjgrpmask_bitset(&pclk_freq_controllers-> 457 freq_ctrl_load_mask.super, i); 458 } 459done: 460 nvgpu_log_info(g, " done status %x", status); 461 return status; 462}
diff --git a/include/clk/clk_freq_controller.h b/include/clk/clk_freq_controller.h
deleted file mode 100644
index 7ae475c..0000000
--- a/include/clk/clk_freq_controller.h
+++ /dev/null
@@ -1,84 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_FREQ_CONTROLLER_H 24#define NVGPU_CLK_FREQ_CONTROLLER_H 25 26#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL 0xFF 27#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS 0x00 28#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC 0x01 29#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR 0x02 30#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0 0x03 31#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC1 0x04 32#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC2 0x05 33#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC3 0x06 34#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC4 0x07 35#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC5 0x08 36#define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPCS 0x09 37 38#define CTRL_CLK_CLK_FREQ_CONTROLLER_MASK_UNICAST_GPC \ 39 (BIT(CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0) | \ 40 BIT(CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC1) | \ 41 BIT(CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC2) | \ 42 BIT(CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC3) | \ 43 BIT(CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC4) | \ 44 BIT(CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC5)) 45 46#define CTRL_CLK_CLK_FREQ_CONTROLLER_TYPE_DISABLED 0x00 47#define CTRL_CLK_CLK_FREQ_CONTROLLER_TYPE_PI 0x01 48 49 50struct clk_freq_controller { 51 struct boardobj super; 52 u8 controller_id; 53 u8 parts_freq_mode; 54 bool bdisable; 55 u32 clk_domain; 56 s16 freq_cap_noise_unaware_vmin_above; 57 s16 freq_cap_noise_unaware_vmin_below; 58 s16 freq_hyst_pos_mhz; 59 s16 freq_hyst_neg_mhz; 60}; 61 62struct clk_freq_controller_pi { 63 struct clk_freq_controller super; 64 s32 prop_gain; 65 s32 integ_gain; 66 s32 integ_decay; 67 s32 volt_delta_min; 68 s32 volt_delta_max; 69 u8 slowdown_pct_min; 70 bool bpoison; 71}; 72 73struct clk_freq_controllers { 74 struct boardobjgrp_e32 super; 75 u32 sampling_period_ms; 76 struct boardobjgrpmask_e32 freq_ctrl_load_mask; 77 u8 volt_policy_idx; 78 void *pprereq_load; 79}; 80 81int clk_freq_controller_sw_setup(struct gk20a *g); 82int clk_freq_controller_pmu_setup(struct gk20a *g); 83 84#endif /* NVGPU_CLK_FREQ_CONTROLLER_H */
diff --git a/include/clk/clk_mclk.h b/include/clk/clk_mclk.h
deleted file mode 100644
index 47c81d1..0000000
--- a/include/clk/clk_mclk.h
+++ /dev/null
@@ -1,60 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_MCLK_H 24#define NVGPU_CLK_MCLK_H 25 26#include <nvgpu/lock.h> 27 28#define GP106_MCLK_LOW_SPEED 0U 29#define GP106_MCLK_MID_SPEED 1U 30#define GP106_MCLK_HIGH_SPEED 2U 31#define GP106_MCLK_NUM_SPEED 3U 32 33enum gk20a_mclk_speed { 34 gk20a_mclk_low_speed, 35 gk20a_mclk_mid_speed, 36 gk20a_mclk_high_speed, 37}; 38 39struct clk_mclk_state { 40 u32 speed; 41 struct nvgpu_mutex mclk_lock; 42 struct nvgpu_mutex data_lock; 43 44 u16 p5_min; 45 u16 p0_min; 46 47 void *vreg_buf; 48 bool init; 49 50#ifdef CONFIG_DEBUG_FS 51 s64 switch_max; 52 s64 switch_min; 53 u64 switch_num; 54 s64 switch_avg; 55 s64 switch_std; 56 bool debugfs_set; 57#endif 58}; 59 60#endif /* NVGPU_CLK_MCLK_H */
diff --git a/include/clk/clk_prog.c b/include/clk/clk_prog.c
deleted file mode 100644
index 9d44d6d..0000000
--- a/include/clk/clk_prog.c
+++ /dev/null
@@ -1,1152 +0,0 @@ 1/* 2 * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bios.h> 24#include <nvgpu/kmem.h> 25#include <nvgpu/gk20a.h> 26 27#include "clk.h" 28#include "clk_prog.h" 29#include "clk_vf_point.h" 30#include "boardobj/boardobjgrp.h" 31#include "boardobj/boardobjgrp_e32.h" 32#include "gp106/bios_gp106.h" 33#include "ctrl/ctrlclk.h" 34#include "ctrl/ctrlvolt.h" 35 36static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs); 37static int devinit_get_clk_prog_table(struct gk20a *g, 38 struct clk_progs *pprogobjs); 39static vf_flatten vfflatten_prog_1x_master; 40static vf_lookup vflookup_prog_1x_master; 41static get_fpoints getfpoints_prog_1x_master; 42static get_slaveclk getslaveclk_prog_1x_master; 43 44static int _clk_progs_pmudatainit(struct gk20a *g, 45 struct boardobjgrp *pboardobjgrp, 46 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 47{ 48 struct nv_pmu_clk_clk_prog_boardobjgrp_set_header *pset = 49 (struct nv_pmu_clk_clk_prog_boardobjgrp_set_header *) 50 pboardobjgrppmu; 51 struct clk_progs *pprogs = (struct clk_progs *)pboardobjgrp; 52 u32 status = 0; 53 54 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 55 if (status) { 56 nvgpu_err(g, "error updating pmu boardobjgrp for clk prog 0x%x", 57 status); 58 goto done; 59 } 60 pset->slave_entry_count = pprogs->slave_entry_count; 61 pset->vf_entry_count = pprogs->vf_entry_count; 62 63done: 64 return status; 65} 66 67static int _clk_progs_pmudata_instget(struct gk20a *g, 68 struct nv_pmu_boardobjgrp *pmuboardobjgrp, 69 struct nv_pmu_boardobj **ppboardobjpmudata, 70 u8 idx) 71{ 72 struct nv_pmu_clk_clk_prog_boardobj_grp_set *pgrp_set = 73 (struct nv_pmu_clk_clk_prog_boardobj_grp_set *)pmuboardobjgrp; 74 75 nvgpu_log_info(g, " "); 76 77 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 78 if (((u32)BIT(idx) & 79 pgrp_set->hdr.data.super.obj_mask.super.data[0]) == 0) { 80 return -EINVAL; 81 } 82 83 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 84 &pgrp_set->objects[idx].data.board_obj; 85 nvgpu_log_info(g, " Done"); 86 return 0; 87} 88 89int clk_prog_sw_setup(struct gk20a *g) 90{ 91 int status; 92 struct boardobjgrp *pboardobjgrp = NULL; 93 struct clk_progs *pclkprogobjs; 94 95 nvgpu_log_info(g, " "); 96 97 status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_progobjs.super); 98 if (status) { 99 nvgpu_err(g, 100 "error creating boardobjgrp for clk prog, status - 0x%x", 101 status); 102 goto done; 103 } 104 105 pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super; 106 pclkprogobjs = &(g->clk_pmu.clk_progobjs); 107 108 BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_PROG); 109 110 status = BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, 111 clk, CLK, clk_prog, CLK_PROG); 112 if (status) { 113 nvgpu_err(g, 114 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 115 status); 116 goto done; 117 } 118 119 pboardobjgrp->pmudatainit = _clk_progs_pmudatainit; 120 pboardobjgrp->pmudatainstget = _clk_progs_pmudata_instget; 121 122 status = devinit_get_clk_prog_table(g, pclkprogobjs); 123 if (status) { 124 goto done; 125 } 126 127 status = clk_domain_clk_prog_link(g, &g->clk_pmu); 128 if (status) { 129 nvgpu_err(g, "error constructing VF point board objects"); 130 goto done; 131 } 132 133 134done: 135 nvgpu_log_info(g, " done status %x", status); 136 return status; 137} 138 139int clk_prog_pmu_setup(struct gk20a *g) 140{ 141 int status; 142 struct boardobjgrp *pboardobjgrp = NULL; 143 144 nvgpu_log_info(g, " "); 145 146 pboardobjgrp = &g->clk_pmu.clk_progobjs.super.super; 147 148 if (!pboardobjgrp->bconstructed) { 149 return -EINVAL; 150 } 151 152 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 153 154 nvgpu_log_info(g, "Done"); 155 return status; 156} 157 158static int devinit_get_clk_prog_table(struct gk20a *g, 159 struct clk_progs *pclkprogobjs) 160{ 161 int status = 0; 162 u8 *clkprogs_tbl_ptr = NULL; 163 struct vbios_clock_programming_table_1x_header header = { 0 }; 164 struct vbios_clock_programming_table_1x_entry prog = { 0 }; 165 struct vbios_clock_programming_table_1x_slave_entry slaveprog = { 0 }; 166 struct vbios_clock_programming_table_1x_vf_entry vfprog = { 0 }; 167 u8 *entry = NULL; 168 u8 *slaveentry = NULL; 169 u8 *vfentry = NULL; 170 u32 i, j = 0; 171 struct clk_prog *pprog; 172 u8 prog_type; 173 u32 szfmt = VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_SIZE_0D; 174 u32 hszfmt = VBIOS_CLOCK_PROGRAMMING_TABLE_1X_HEADER_SIZE_08; 175 u32 slaveszfmt = VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_SIZE_03; 176 u32 vfszfmt = VBIOS_CLOCK_PROGRAMMING_TABLE_1X_VF_ENTRY_SIZE_02; 177 struct ctrl_clk_clk_prog_1x_master_vf_entry 178 vfentries[CTRL_CLK_CLK_PROG_1X_MASTER_VF_ENTRY_MAX_ENTRIES]; 179 struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry 180 ratioslaveentries[CTRL_CLK_PROG_1X_MASTER_MAX_SLAVE_ENTRIES]; 181 struct ctrl_clk_clk_prog_1x_master_table_slave_entry 182 tableslaveentries[CTRL_CLK_PROG_1X_MASTER_MAX_SLAVE_ENTRIES]; 183 union { 184 struct boardobj board_obj; 185 struct clk_prog clkprog; 186 struct clk_prog_1x v1x; 187 struct clk_prog_1x_master v1x_master; 188 struct clk_prog_1x_master_ratio v1x_master_ratio; 189 struct clk_prog_1x_master_table v1x_master_table; 190 } prog_data; 191 192 nvgpu_log_info(g, " "); 193 194 clkprogs_tbl_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 195 g->bios.clock_token, CLOCK_PROGRAMMING_TABLE); 196 if (clkprogs_tbl_ptr == NULL) { 197 status = -EINVAL; 198 goto done; 199 } 200 201 memcpy(&header, clkprogs_tbl_ptr, hszfmt); 202 if (header.header_size < hszfmt) { 203 status = -EINVAL; 204 goto done; 205 } 206 hszfmt = header.header_size; 207 208 if (header.entry_size <= VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_SIZE_05) { 209 szfmt = header.entry_size; 210 } else if (header.entry_size <= VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_SIZE_0D) { 211 szfmt = header.entry_size; 212 } else { 213 status = -EINVAL; 214 goto done; 215 } 216 217 if (header.vf_entry_size < vfszfmt) { 218 status = -EINVAL; 219 goto done; 220 } 221 vfszfmt = header.vf_entry_size; 222 if (header.slave_entry_size < slaveszfmt) { 223 status = -EINVAL; 224 goto done; 225 } 226 slaveszfmt = header.slave_entry_size; 227 if (header.vf_entry_count > CTRL_CLK_CLK_DELTA_MAX_VOLT_RAILS) { 228 status = -EINVAL; 229 goto done; 230 } 231 232 pclkprogobjs->slave_entry_count = header.slave_entry_count; 233 pclkprogobjs->vf_entry_count = header.vf_entry_count; 234 235 for (i = 0; i < header.entry_count; i++) { 236 memset(&prog_data, 0x0, (u32)sizeof(prog_data)); 237 238 /* Read table entries*/ 239 entry = clkprogs_tbl_ptr + hszfmt + 240 (i * (szfmt + (header.slave_entry_count * slaveszfmt) + 241 (header.vf_entry_count * vfszfmt))); 242 243 memcpy(&prog, entry, szfmt); 244 memset(vfentries, 0xFF, 245 sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * 246 CTRL_CLK_CLK_PROG_1X_MASTER_VF_ENTRY_MAX_ENTRIES); 247 memset(ratioslaveentries, 0xFF, 248 sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 249 CTRL_CLK_PROG_1X_MASTER_MAX_SLAVE_ENTRIES); 250 memset(tableslaveentries, 0xFF, 251 sizeof(struct ctrl_clk_clk_prog_1x_master_table_slave_entry) * 252 CTRL_CLK_PROG_1X_MASTER_MAX_SLAVE_ENTRIES); 253 prog_type = (u8)BIOS_GET_FIELD(prog.flags0, 254 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE); 255 256 switch (prog_type) { 257 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_PLL: 258 prog_data.v1x.source = CTRL_CLK_PROG_1X_SOURCE_PLL; 259 prog_data.v1x.source_data.pll.pll_idx = 260 (u8)BIOS_GET_FIELD(prog.param0, 261 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_PARAM0_PLL_PLL_INDEX); 262 prog_data.v1x.source_data.pll.freq_step_size_mhz = 263 (u8)BIOS_GET_FIELD(prog.param1, 264 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_PARAM1_PLL_FREQ_STEP_SIZE); 265 break; 266 267 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_ONE_SOURCE: 268 prog_data.v1x.source = CTRL_CLK_PROG_1X_SOURCE_ONE_SOURCE; 269 break; 270 271 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_FLL: 272 prog_data.v1x.source = CTRL_CLK_PROG_1X_SOURCE_FLL; 273 break; 274 275 default: 276 nvgpu_err(g, "invalid source %d", prog_type); 277 status = -EINVAL; 278 goto done; 279 } 280 281 prog_data.v1x.freq_max_mhz = (u16)prog.freq_max_mhz; 282 283 prog_type = (u8)BIOS_GET_FIELD(prog.flags0, 284 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE); 285 286 vfentry = entry + szfmt + 287 header.slave_entry_count * slaveszfmt; 288 slaveentry = entry + szfmt; 289 switch (prog_type) { 290 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_RATIO: 291 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_TABLE: 292 prog_data.v1x_master.b_o_c_o_v_enabled = false; 293 for (j = 0; j < header.vf_entry_count; j++) { 294 memcpy(&vfprog, vfentry, vfszfmt); 295 296 vfentries[j].vfe_idx = (u8)vfprog.vfe_idx; 297 if (CTRL_CLK_PROG_1X_SOURCE_FLL == 298 prog_data.v1x.source) { 299 vfentries[j].gain_vfe_idx = (u8)BIOS_GET_FIELD( 300 vfprog.param0, 301 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_VF_ENTRY_PARAM0_FLL_GAIN_VFE_IDX); 302 } else { 303 vfentries[j].gain_vfe_idx = CTRL_BOARDOBJ_IDX_INVALID; 304 } 305 vfentry += vfszfmt; 306 } 307 308 prog_data.v1x_master.p_vf_entries = vfentries; 309 310 for (j = 0; j < header.slave_entry_count; j++) { 311 memcpy(&slaveprog, slaveentry, slaveszfmt); 312 313 switch (prog_type) { 314 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_RATIO: 315 ratioslaveentries[j].clk_dom_idx = 316 (u8)slaveprog.clk_dom_idx; 317 ratioslaveentries[j].ratio = (u8) 318 BIOS_GET_FIELD(slaveprog.param0, 319 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_PARAM0_MASTER_RATIO_RATIO); 320 break; 321 322 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_TABLE: 323 tableslaveentries[j].clk_dom_idx = 324 (u8)slaveprog.clk_dom_idx; 325 tableslaveentries[j].freq_mhz = 326 (u16)BIOS_GET_FIELD(slaveprog.param0, 327 NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_PARAM0_MASTER_TABLE_FREQ); 328 break; 329 } 330 slaveentry += slaveszfmt; 331 } 332 333 switch (prog_type) { 334 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_RATIO: 335 prog_data.board_obj.type = CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO; 336 prog_data.v1x_master_ratio.p_slave_entries = 337 ratioslaveentries; 338 break; 339 340 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_TABLE: 341 prog_data.board_obj.type = CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE; 342 343 prog_data.v1x_master_table.p_slave_entries = 344 tableslaveentries; 345 break; 346 347 } 348 break; 349 350 case NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_SLAVE: 351 prog_data.board_obj.type = CTRL_CLK_CLK_PROG_TYPE_1X; 352 break; 353 354 355 default: 356 nvgpu_err(g, "source issue %d", prog_type); 357 status = -EINVAL; 358 goto done; 359 } 360 361 pprog = construct_clk_prog(g, (void *)&prog_data); 362 if (pprog == NULL) { 363 nvgpu_err(g, 364 "error constructing clk_prog boardobj %d", i); 365 status = -EINVAL; 366 goto done; 367 } 368 369 status = boardobjgrp_objinsert(&pclkprogobjs->super.super, 370 (struct boardobj *)pprog, i); 371 if (status) { 372 nvgpu_err(g, "error adding clk_prog boardobj %d", i); 373 status = -EINVAL; 374 goto done; 375 } 376 } 377done: 378 nvgpu_log_info(g, " done status %x", status); 379 return status; 380} 381 382static int _clk_prog_pmudatainit_super(struct gk20a *g, 383 struct boardobj *board_obj_ptr, 384 struct nv_pmu_boardobj *ppmudata) 385{ 386 int status = 0; 387 388 nvgpu_log_info(g, " "); 389 390 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 391 return status; 392} 393 394static int _clk_prog_pmudatainit_1x(struct gk20a *g, 395 struct boardobj *board_obj_ptr, 396 struct nv_pmu_boardobj *ppmudata) 397{ 398 int status = 0; 399 struct clk_prog_1x *pclk_prog_1x; 400 struct nv_pmu_clk_clk_prog_1x_boardobj_set *pset; 401 402 nvgpu_log_info(g, " "); 403 404 status = _clk_prog_pmudatainit_super(g, board_obj_ptr, ppmudata); 405 if (status != 0) { 406 return status; 407 } 408 409 pclk_prog_1x = (struct clk_prog_1x *)board_obj_ptr; 410 411 pset = (struct nv_pmu_clk_clk_prog_1x_boardobj_set *) 412 ppmudata; 413 414 pset->source = pclk_prog_1x->source; 415 pset->freq_max_mhz = pclk_prog_1x->freq_max_mhz; 416 pset->source_data = pclk_prog_1x->source_data; 417 418 return status; 419} 420 421static int _clk_prog_pmudatainit_1x_master(struct gk20a *g, 422 struct boardobj *board_obj_ptr, 423 struct nv_pmu_boardobj *ppmudata) 424{ 425 int status = 0; 426 struct clk_prog_1x_master *pclk_prog_1x_master; 427 struct nv_pmu_clk_clk_prog_1x_master_boardobj_set *pset; 428 u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * 429 g->clk_pmu.clk_progobjs.vf_entry_count; 430 431 nvgpu_log_info(g, " "); 432 433 status = _clk_prog_pmudatainit_1x(g, board_obj_ptr, ppmudata); 434 435 pclk_prog_1x_master = 436 (struct clk_prog_1x_master *)board_obj_ptr; 437 438 pset = (struct nv_pmu_clk_clk_prog_1x_master_boardobj_set *) 439 ppmudata; 440 441 memcpy(pset->vf_entries, pclk_prog_1x_master->p_vf_entries, vfsize); 442 443 pset->b_o_c_o_v_enabled = pclk_prog_1x_master->b_o_c_o_v_enabled; 444 pset->source_data = pclk_prog_1x_master->source_data; 445 446 memcpy(&pset->deltas, &pclk_prog_1x_master->deltas, 447 (u32) sizeof(struct ctrl_clk_clk_delta)); 448 449 return status; 450} 451 452static int _clk_prog_pmudatainit_1x_master_ratio(struct gk20a *g, 453 struct boardobj *board_obj_ptr, 454 struct nv_pmu_boardobj *ppmudata) 455{ 456 int status = 0; 457 struct clk_prog_1x_master_ratio *pclk_prog_1x_master_ratio; 458 struct nv_pmu_clk_clk_prog_1x_master_ratio_boardobj_set *pset; 459 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 460 g->clk_pmu.clk_progobjs.slave_entry_count; 461 462 nvgpu_log_info(g, " "); 463 464 status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); 465 if (status != 0) { 466 return status; 467 } 468 469 pclk_prog_1x_master_ratio = 470 (struct clk_prog_1x_master_ratio *)board_obj_ptr; 471 472 pset = (struct nv_pmu_clk_clk_prog_1x_master_ratio_boardobj_set *) 473 ppmudata; 474 475 memcpy(pset->slave_entries, 476 pclk_prog_1x_master_ratio->p_slave_entries, slavesize); 477 478 return status; 479} 480 481static int _clk_prog_pmudatainit_1x_master_table(struct gk20a *g, 482 struct boardobj *board_obj_ptr, 483 struct nv_pmu_boardobj *ppmudata) 484{ 485 int status = 0; 486 struct clk_prog_1x_master_table *pclk_prog_1x_master_table; 487 struct nv_pmu_clk_clk_prog_1x_master_table_boardobj_set *pset; 488 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 489 g->clk_pmu.clk_progobjs.slave_entry_count; 490 491 nvgpu_log_info(g, " "); 492 493 status = _clk_prog_pmudatainit_1x_master(g, board_obj_ptr, ppmudata); 494 if (status != 0) { 495 return status; 496 } 497 498 pclk_prog_1x_master_table = 499 (struct clk_prog_1x_master_table *)board_obj_ptr; 500 501 pset = (struct nv_pmu_clk_clk_prog_1x_master_table_boardobj_set *) 502 ppmudata; 503 memcpy(pset->slave_entries, 504 pclk_prog_1x_master_table->p_slave_entries, slavesize); 505 506 return status; 507} 508 509static u32 _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g, 510 struct clk_pmupstate *pclk, 511 struct clk_prog_1x_master *p1xmaster, 512 struct ctrl_clk_clk_prog_1x_master_vf_entry *p_vf_rail, 513 struct clk_vf_point *p_vf_point_tmp, 514 u8 *p_vf_point_idx) 515{ 516 struct clk_vf_point *p_vf_point; 517 u32 status; 518 519 nvgpu_log_info(g, " "); 520 521 p_vf_point = construct_clk_vf_point(g, (void *)p_vf_point_tmp); 522 if (p_vf_point == NULL) { 523 status = -ENOMEM; 524 goto done; 525 } 526 status = pclk->clk_vf_pointobjs.super.super.objinsert( 527 &pclk->clk_vf_pointobjs.super.super, 528 &p_vf_point->super, 529 *p_vf_point_idx); 530 if (status) { 531 goto done; 532 } 533 534 p_vf_rail->vf_point_idx_last = (*p_vf_point_idx)++; 535 536done: 537 nvgpu_log_info(g, "done status %x", status); 538 return status; 539} 540 541static int clk_prog_construct_super(struct gk20a *g, 542 struct boardobj **ppboardobj, 543 u16 size, void *pargs) 544{ 545 struct clk_prog *pclkprog; 546 int status = 0; 547 548 status = boardobj_construct_super(g, ppboardobj, 549 size, pargs); 550 if (status) { 551 return -EINVAL; 552 } 553 554 pclkprog = (struct clk_prog *)*ppboardobj; 555 556 pclkprog->super.pmudatainit = 557 _clk_prog_pmudatainit_super; 558 return status; 559} 560 561 562static int clk_prog_construct_1x(struct gk20a *g, 563 struct boardobj **ppboardobj, 564 u16 size, void *pargs) 565{ 566 struct boardobj *ptmpobj = (struct boardobj *)pargs; 567 struct clk_prog_1x *pclkprog; 568 struct clk_prog_1x *ptmpprog = 569 (struct clk_prog_1x *)pargs; 570 int status = 0; 571 572 nvgpu_log_info(g, " "); 573 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X); 574 status = clk_prog_construct_super(g, ppboardobj, size, pargs); 575 if (status) { 576 return -EINVAL; 577 } 578 579 pclkprog = (struct clk_prog_1x *)*ppboardobj; 580 581 pclkprog->super.super.pmudatainit = 582 _clk_prog_pmudatainit_1x; 583 584 pclkprog->source = ptmpprog->source; 585 pclkprog->freq_max_mhz = ptmpprog->freq_max_mhz; 586 pclkprog->source_data = ptmpprog->source_data; 587 588 return status; 589} 590 591static int clk_prog_construct_1x_master(struct gk20a *g, 592 struct boardobj **ppboardobj, 593 u16 size, void *pargs) 594{ 595 struct boardobj *ptmpobj = (struct boardobj *)pargs; 596 struct clk_prog_1x_master *pclkprog; 597 struct clk_prog_1x_master *ptmpprog = 598 (struct clk_prog_1x_master *)pargs; 599 int status = 0; 600 u32 vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * 601 g->clk_pmu.clk_progobjs.vf_entry_count; 602 u8 railidx; 603 604 nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs)); 605 606 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER); 607 status = clk_prog_construct_1x(g, ppboardobj, size, pargs); 608 if (status) { 609 return -EINVAL; 610 } 611 612 pclkprog = (struct clk_prog_1x_master *)*ppboardobj; 613 614 pclkprog->super.super.super.pmudatainit = 615 _clk_prog_pmudatainit_1x_master; 616 617 pclkprog->vfflatten = 618 vfflatten_prog_1x_master; 619 620 pclkprog->vflookup = 621 vflookup_prog_1x_master; 622 623 pclkprog->getfpoints = 624 getfpoints_prog_1x_master; 625 626 pclkprog->getslaveclk = 627 getslaveclk_prog_1x_master; 628 629 pclkprog->p_vf_entries = (struct ctrl_clk_clk_prog_1x_master_vf_entry *) 630 nvgpu_kzalloc(g, vfsize); 631 if (!pclkprog->p_vf_entries) 632 return -ENOMEM; 633 634 memcpy(pclkprog->p_vf_entries, ptmpprog->p_vf_entries, vfsize); 635 636 pclkprog->b_o_c_o_v_enabled = ptmpprog->b_o_c_o_v_enabled; 637 638 for (railidx = 0; 639 railidx < g->clk_pmu.clk_progobjs.vf_entry_count; 640 railidx++) { 641 pclkprog->p_vf_entries[railidx].vf_point_idx_first = 642 CTRL_CLK_CLK_VF_POINT_IDX_INVALID; 643 pclkprog->p_vf_entries[railidx].vf_point_idx_last = 644 CTRL_CLK_CLK_VF_POINT_IDX_INVALID; 645 } 646 647 return status; 648} 649 650static int clk_prog_construct_1x_master_ratio(struct gk20a *g, 651 struct boardobj **ppboardobj, 652 u16 size, void *pargs) 653{ 654 struct boardobj *ptmpobj = (struct boardobj *)pargs; 655 struct clk_prog_1x_master_ratio *pclkprog; 656 struct clk_prog_1x_master_ratio *ptmpprog = 657 (struct clk_prog_1x_master_ratio *)pargs; 658 int status = 0; 659 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 660 g->clk_pmu.clk_progobjs.slave_entry_count; 661 662 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO) { 663 return -EINVAL; 664 } 665 666 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO); 667 status = clk_prog_construct_1x_master(g, ppboardobj, size, pargs); 668 if (status) { 669 return -EINVAL; 670 } 671 672 pclkprog = (struct clk_prog_1x_master_ratio *)*ppboardobj; 673 674 pclkprog->super.super.super.super.pmudatainit = 675 _clk_prog_pmudatainit_1x_master_ratio; 676 677 pclkprog->p_slave_entries = 678 (struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry *) 679 nvgpu_kzalloc(g, slavesize); 680 if (!pclkprog->p_slave_entries) { 681 return -ENOMEM; 682 } 683 684 memset(pclkprog->p_slave_entries, CTRL_CLK_CLK_DOMAIN_INDEX_INVALID, 685 slavesize); 686 687 memcpy(pclkprog->p_slave_entries, ptmpprog->p_slave_entries, slavesize); 688 689 return status; 690} 691 692static int clk_prog_construct_1x_master_table(struct gk20a *g, 693 struct boardobj **ppboardobj, 694 u16 size, void *pargs) 695{ 696 struct boardobj *ptmpobj = (struct boardobj *)pargs; 697 struct clk_prog_1x_master_table *pclkprog; 698 struct clk_prog_1x_master_table *ptmpprog = 699 (struct clk_prog_1x_master_table *)pargs; 700 int status = 0; 701 u32 slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * 702 g->clk_pmu.clk_progobjs.slave_entry_count; 703 704 nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs)); 705 706 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE) { 707 return -EINVAL; 708 } 709 710 ptmpobj->type_mask |= BIT(CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE); 711 status = clk_prog_construct_1x_master(g, ppboardobj, size, pargs); 712 if (status) { 713 return -EINVAL; 714 } 715 716 pclkprog = (struct clk_prog_1x_master_table *)*ppboardobj; 717 718 pclkprog->super.super.super.super.pmudatainit = 719 _clk_prog_pmudatainit_1x_master_table; 720 721 pclkprog->p_slave_entries = 722 (struct ctrl_clk_clk_prog_1x_master_table_slave_entry *) 723 nvgpu_kzalloc(g, slavesize); 724 725 if (!pclkprog->p_slave_entries) { 726 status = -ENOMEM; 727 goto exit; 728 } 729 730 memset(pclkprog->p_slave_entries, CTRL_CLK_CLK_DOMAIN_INDEX_INVALID, 731 slavesize); 732 733 memcpy(pclkprog->p_slave_entries, ptmpprog->p_slave_entries, slavesize); 734 735exit: 736 if (status) { 737 (*ppboardobj)->destruct(*ppboardobj); 738 } 739 740 return status; 741} 742 743static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs) 744{ 745 struct boardobj *board_obj_ptr = NULL; 746 int status; 747 748 nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs)); 749 switch (BOARDOBJ_GET_TYPE(pargs)) { 750 case CTRL_CLK_CLK_PROG_TYPE_1X: 751 status = clk_prog_construct_1x(g, &board_obj_ptr, 752 sizeof(struct clk_prog_1x), pargs); 753 break; 754 755 case CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE: 756 status = clk_prog_construct_1x_master_table(g, &board_obj_ptr, 757 sizeof(struct clk_prog_1x_master_table), pargs); 758 break; 759 760 case CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO: 761 status = clk_prog_construct_1x_master_ratio(g, &board_obj_ptr, 762 sizeof(struct clk_prog_1x_master_ratio), pargs); 763 break; 764 765 default: 766 return NULL; 767 } 768 769 if (status) { 770 if (board_obj_ptr) { 771 board_obj_ptr->destruct(board_obj_ptr); 772 } 773 return NULL; 774 } 775 776 nvgpu_log_info(g, " Done"); 777 778 return (struct clk_prog *)board_obj_ptr; 779} 780 781static u32 vfflatten_prog_1x_master(struct gk20a *g, 782 struct clk_pmupstate *pclk, 783 struct clk_prog_1x_master *p1xmaster, 784 u8 clk_domain_idx, u16 *pfreqmaxlastmhz) 785{ 786 struct ctrl_clk_clk_prog_1x_master_vf_entry *p_vf_rail; 787 union { 788 struct boardobj board_obj; 789 struct clk_vf_point vf_point; 790 struct clk_vf_point_freq freq; 791 struct clk_vf_point_volt volt; 792 } vf_point_data; 793 u32 status = 0; 794 u8 step_count; 795 u8 freq_step_size_mhz = 0; 796 u8 vf_point_idx; 797 u8 vf_rail_idx; 798 799 nvgpu_log_info(g, " "); 800 memset(&vf_point_data, 0x0, sizeof(vf_point_data)); 801 802 vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX( 803 &pclk->clk_vf_pointobjs.super.super); 804 805 for (vf_rail_idx = 0; 806 vf_rail_idx < pclk->clk_progobjs.vf_entry_count; 807 vf_rail_idx++) { 808 u32 voltage_min_uv; 809 u32 voltage_step_size_uv; 810 u8 i; 811 812 p_vf_rail = &p1xmaster->p_vf_entries[vf_rail_idx]; 813 if (p_vf_rail->vfe_idx == CTRL_BOARDOBJ_IDX_INVALID) { 814 continue; 815 } 816 817 p_vf_rail->vf_point_idx_first = vf_point_idx; 818 819 vf_point_data.vf_point.vfe_equ_idx = p_vf_rail->vfe_idx; 820 vf_point_data.vf_point.volt_rail_idx = vf_rail_idx; 821 822 step_count = 0; 823 824 switch (p1xmaster->super.source) { 825 case CTRL_CLK_PROG_1X_SOURCE_PLL: 826 freq_step_size_mhz = 827 p1xmaster->super.source_data.pll.freq_step_size_mhz; 828 step_count = (freq_step_size_mhz == 0) ? 0 : 829 (p1xmaster->super.freq_max_mhz - *pfreqmaxlastmhz - 1) / 830 freq_step_size_mhz; 831 /* Intentional fall-through.*/ 832 833 case CTRL_CLK_PROG_1X_SOURCE_ONE_SOURCE: 834 vf_point_data.board_obj.type = 835 CTRL_CLK_CLK_VF_POINT_TYPE_FREQ; 836 do { 837 clkvfpointfreqmhzset(g, &vf_point_data.vf_point, 838 p1xmaster->super.freq_max_mhz - 839 step_count * freq_step_size_mhz); 840 841 status = _clk_prog_1x_master_rail_construct_vf_point(g, pclk, 842 p1xmaster, p_vf_rail, 843 &vf_point_data.vf_point, &vf_point_idx); 844 if (status) { 845 goto done; 846 } 847 } while (step_count-- > 0); 848 break; 849 850 case CTRL_CLK_PROG_1X_SOURCE_FLL: 851 voltage_min_uv = CLK_FLL_LUT_MIN_VOLTAGE_UV(pclk); 852 voltage_step_size_uv = CLK_FLL_LUT_STEP_SIZE_UV(pclk); 853 step_count = CLK_FLL_LUT_VF_NUM_ENTRIES(pclk); 854 855 /* FLL sources use a voltage-based VF_POINT.*/ 856 vf_point_data.board_obj.type = 857 CTRL_CLK_CLK_VF_POINT_TYPE_VOLT; 858 for (i = 0; i < step_count; i++) { 859 vf_point_data.volt.source_voltage_uv = 860 voltage_min_uv + i * voltage_step_size_uv; 861 862 status = _clk_prog_1x_master_rail_construct_vf_point(g, pclk, 863 p1xmaster, p_vf_rail, 864 &vf_point_data.vf_point, &vf_point_idx); 865 if (status) { 866 goto done; 867 } 868 } 869 break; 870 } 871 } 872 873 *pfreqmaxlastmhz = p1xmaster->super.freq_max_mhz; 874 875done: 876 nvgpu_log_info(g, "done status %x", status); 877 return status; 878} 879 880static u32 vflookup_prog_1x_master 881( 882 struct gk20a *g, 883 struct clk_pmupstate *pclk, 884 struct clk_prog_1x_master *p1xmaster, 885 u8 *slave_clk_domain, 886 u16 *pclkmhz, 887 u32 *pvoltuv, 888 u8 rail 889) 890{ 891 int j; 892 struct ctrl_clk_clk_prog_1x_master_vf_entry 893 *pvfentry; 894 struct clk_vf_point *pvfpoint; 895 struct clk_progs *pclkprogobjs; 896 struct clk_prog_1x_master_ratio *p1xmasterratio; 897 u16 clkmhz; 898 u32 voltuv; 899 u8 slaveentrycount; 900 int i; 901 struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry *pslaveents; 902 903 if ((*pclkmhz != 0) && (*pvoltuv != 0)) { 904 return -EINVAL; 905 } 906 907 pclkprogobjs = &(pclk->clk_progobjs); 908 909 slaveentrycount = pclkprogobjs->slave_entry_count; 910 911 if (pclkprogobjs->vf_entry_count > 912 CTRL_CLK_CLK_PROG_1X_MASTER_VF_ENTRY_MAX_ENTRIES) { 913 return -EINVAL; 914 } 915 916 if (rail >= pclkprogobjs->vf_entry_count) { 917 return -EINVAL; 918 } 919 920 pvfentry = p1xmaster->p_vf_entries; 921 922 pvfentry = (struct ctrl_clk_clk_prog_1x_master_vf_entry *)( 923 (u8 *)pvfentry + 924 (sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * 925 rail)); 926 927 clkmhz = *pclkmhz; 928 voltuv = *pvoltuv; 929 930 /*if domain is slave domain and freq is input 931 then derive master clk */ 932 if ((slave_clk_domain != NULL) && (*pclkmhz != 0)) { 933 if (p1xmaster->super.super.super.implements(g, 934 &p1xmaster->super.super.super, 935 CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO)) { 936 937 p1xmasterratio = 938 (struct clk_prog_1x_master_ratio *)p1xmaster; 939 pslaveents = p1xmasterratio->p_slave_entries; 940 for (i = 0; i < slaveentrycount; i++) { 941 if (pslaveents->clk_dom_idx == 942 *slave_clk_domain) { 943 break; 944 } 945 pslaveents++; 946 } 947 if (i == slaveentrycount) { 948 return -EINVAL; 949 } 950 clkmhz = (clkmhz * 100)/pslaveents->ratio; 951 } else { 952 /* only support ratio for now */ 953 return -EINVAL; 954 } 955 } 956 957 /* if both volt and clks are zero simply print*/ 958 if ((*pvoltuv == 0) && (*pclkmhz == 0)) { 959 for (j = pvfentry->vf_point_idx_first; 960 j <= pvfentry->vf_point_idx_last; j++) { 961 pvfpoint = CLK_CLK_VF_POINT_GET(pclk, j); 962 nvgpu_err(g, "v %x c %x", 963 clkvfpointvoltageuvget(g, pvfpoint), 964 clkvfpointfreqmhzget(g, pvfpoint)); 965 } 966 return -EINVAL; 967 } 968 /* start looking up f for v for v for f */ 969 /* looking for volt? */ 970 if (*pvoltuv == 0) { 971 pvfpoint = CLK_CLK_VF_POINT_GET(pclk, 972 pvfentry->vf_point_idx_last); 973 /* above range? */ 974 if (clkmhz > clkvfpointfreqmhzget(g, pvfpoint)) { 975 return -EINVAL; 976 } 977 978 for (j = pvfentry->vf_point_idx_last; 979 j >= pvfentry->vf_point_idx_first; j--) { 980 pvfpoint = CLK_CLK_VF_POINT_GET(pclk, j); 981 if (clkmhz <= clkvfpointfreqmhzget(g, pvfpoint)) { 982 voltuv = clkvfpointvoltageuvget(g, pvfpoint); 983 } else { 984 break; 985 } 986 } 987 } else { /* looking for clk? */ 988 989 pvfpoint = CLK_CLK_VF_POINT_GET(pclk, 990 pvfentry->vf_point_idx_first); 991 /* below range? */ 992 if (voltuv < clkvfpointvoltageuvget(g, pvfpoint)) { 993 return -EINVAL; 994 } 995 996 for (j = pvfentry->vf_point_idx_first; 997 j <= pvfentry->vf_point_idx_last; j++) { 998 pvfpoint = CLK_CLK_VF_POINT_GET(pclk, j); 999 if (voltuv >= clkvfpointvoltageuvget(g, pvfpoint)) { 1000 clkmhz = clkvfpointfreqmhzget(g, pvfpoint); 1001 } else { 1002 break; 1003 } 1004 } 1005 } 1006 1007 /*if domain is slave domain and freq was looked up 1008 then derive slave clk */ 1009 if ((slave_clk_domain != NULL) && (*pclkmhz == 0)) { 1010 if (p1xmaster->super.super.super.implements(g, 1011 &p1xmaster->super.super.super, 1012 CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO)) { 1013 1014 p1xmasterratio = 1015 (struct clk_prog_1x_master_ratio *)p1xmaster; 1016 pslaveents = p1xmasterratio->p_slave_entries; 1017 for (i = 0; i < slaveentrycount; i++) { 1018 if (pslaveents->clk_dom_idx == 1019 *slave_clk_domain) { 1020 break; 1021 } 1022 pslaveents++; 1023 } 1024 if (i == slaveentrycount) { 1025 return -EINVAL; 1026 } 1027 clkmhz = (clkmhz * pslaveents->ratio)/100; 1028 } else { 1029 /* only support ratio for now */ 1030 return -EINVAL; 1031 } 1032 } 1033 *pclkmhz = clkmhz; 1034 *pvoltuv = voltuv; 1035 if ((clkmhz == 0) || (voltuv == 0)) { 1036 return -EINVAL; 1037 } 1038 return 0; 1039} 1040 1041static u32 getfpoints_prog_1x_master 1042( 1043 struct gk20a *g, 1044 struct clk_pmupstate *pclk, 1045 struct clk_prog_1x_master *p1xmaster, 1046 u32 *pfpointscount, 1047 u16 **ppfreqpointsinmhz, 1048 u8 rail 1049) 1050{ 1051 1052 struct ctrl_clk_clk_prog_1x_master_vf_entry 1053 *pvfentry; 1054 struct clk_vf_point *pvfpoint; 1055 struct clk_progs *pclkprogobjs; 1056 u8 j; 1057 u32 fpointscount = 0; 1058 1059 if (pfpointscount == NULL) { 1060 return -EINVAL; 1061 } 1062 1063 pclkprogobjs = &(pclk->clk_progobjs); 1064 1065 if (pclkprogobjs->vf_entry_count > 1066 CTRL_CLK_CLK_PROG_1X_MASTER_VF_ENTRY_MAX_ENTRIES) { 1067 return -EINVAL; 1068 } 1069 1070 if (rail >= pclkprogobjs->vf_entry_count) { 1071 return -EINVAL; 1072 } 1073 1074 pvfentry = p1xmaster->p_vf_entries; 1075 1076 pvfentry = (struct ctrl_clk_clk_prog_1x_master_vf_entry *)( 1077 (u8 *)pvfentry + 1078 (sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * 1079 (rail+1))); 1080 1081 fpointscount = pvfentry->vf_point_idx_last - 1082 pvfentry->vf_point_idx_first + 1; 1083 1084 /* if pointer for freq data is NULL simply return count */ 1085 if (*ppfreqpointsinmhz == NULL) { 1086 goto done; 1087 } 1088 1089 if (fpointscount > *pfpointscount) { 1090 return -ENOMEM; 1091 } 1092 for (j = pvfentry->vf_point_idx_first; 1093 j <= pvfentry->vf_point_idx_last; j++) { 1094 pvfpoint = CLK_CLK_VF_POINT_GET(pclk, j); 1095 **ppfreqpointsinmhz = clkvfpointfreqmhzget(g, pvfpoint); 1096 (*ppfreqpointsinmhz)++; 1097 } 1098done: 1099 *pfpointscount = fpointscount; 1100 return 0; 1101} 1102 1103static int getslaveclk_prog_1x_master(struct gk20a *g, 1104 struct clk_pmupstate *pclk, 1105 struct clk_prog_1x_master *p1xmaster, 1106 u8 slave_clk_domain, 1107 u16 *pclkmhz, 1108 u16 masterclkmhz 1109) 1110{ 1111 struct clk_progs *pclkprogobjs; 1112 struct clk_prog_1x_master_ratio *p1xmasterratio; 1113 u8 slaveentrycount; 1114 u8 i; 1115 struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry *pslaveents; 1116 1117 if (pclkmhz == NULL) { 1118 return -EINVAL; 1119 } 1120 1121 if (masterclkmhz == 0) { 1122 return -EINVAL; 1123 } 1124 1125 *pclkmhz = 0; 1126 pclkprogobjs = &(pclk->clk_progobjs); 1127 1128 slaveentrycount = pclkprogobjs->slave_entry_count; 1129 1130 if (p1xmaster->super.super.super.implements(g, 1131 &p1xmaster->super.super.super, 1132 CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO)) { 1133 p1xmasterratio = 1134 (struct clk_prog_1x_master_ratio *)p1xmaster; 1135 pslaveents = p1xmasterratio->p_slave_entries; 1136 for (i = 0; i < slaveentrycount; i++) { 1137 if (pslaveents->clk_dom_idx == 1138 slave_clk_domain) { 1139 break; 1140 } 1141 pslaveents++; 1142 } 1143 if (i == slaveentrycount) { 1144 return -EINVAL; 1145 } 1146 *pclkmhz = (masterclkmhz * pslaveents->ratio)/100; 1147 } else { 1148 /* only support ratio for now */ 1149 return -EINVAL; 1150 } 1151 return 0; 1152}
diff --git a/include/clk/clk_prog.h b/include/clk/clk_prog.h
deleted file mode 100644
index af6368f..0000000
--- a/include/clk/clk_prog.h
+++ /dev/null
@@ -1,100 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_PROG_H 24#define NVGPU_CLK_PROG_H 25#include "ctrl/ctrlclk.h" 26#include "ctrl/ctrlboardobj.h" 27#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 28#include "boardobj/boardobjgrp_e32.h" 29#include "boardobj/boardobjgrp_e255.h" 30#include "boardobj/boardobjgrpmask.h" 31 32int clk_prog_sw_setup(struct gk20a *g); 33int clk_prog_pmu_setup(struct gk20a *g); 34struct clk_prog_1x_master; 35 36typedef u32 vf_flatten(struct gk20a *g, struct clk_pmupstate *pclk, 37 struct clk_prog_1x_master *p1xmaster, 38 u8 clk_domain_idx, u16 *pfreqmaxlastmhz); 39 40typedef u32 vf_lookup(struct gk20a *g, struct clk_pmupstate *pclk, 41 struct clk_prog_1x_master *p1xmaster, 42 u8 *slave_clk_domain_idx, u16 *pclkmhz, 43 u32 *pvoltuv, u8 rail); 44 45typedef int get_slaveclk(struct gk20a *g, struct clk_pmupstate *pclk, 46 struct clk_prog_1x_master *p1xmaster, 47 u8 slave_clk_domain_idx, u16 *pclkmhz, 48 u16 masterclkmhz); 49 50typedef u32 get_fpoints(struct gk20a *g, struct clk_pmupstate *pclk, 51 struct clk_prog_1x_master *p1xmaster, 52 u32 *pfpointscount, 53 u16 **ppfreqpointsinmhz, u8 rail); 54 55 56struct clk_progs { 57 struct boardobjgrp_e255 super; 58 u8 slave_entry_count; 59 u8 vf_entry_count; 60 61}; 62 63struct clk_prog { 64 struct boardobj super; 65}; 66 67struct clk_prog_1x { 68 struct clk_prog super; 69 u8 source; 70 u16 freq_max_mhz; 71 union ctrl_clk_clk_prog_1x_source_data source_data; 72}; 73 74struct clk_prog_1x_master { 75 struct clk_prog_1x super; 76 bool b_o_c_o_v_enabled; 77 struct ctrl_clk_clk_prog_1x_master_vf_entry *p_vf_entries; 78 struct ctrl_clk_clk_delta deltas; 79 union ctrl_clk_clk_prog_1x_master_source_data source_data; 80 vf_flatten *vfflatten; 81 vf_lookup *vflookup; 82 get_fpoints *getfpoints; 83 get_slaveclk *getslaveclk; 84}; 85 86struct clk_prog_1x_master_ratio { 87 struct clk_prog_1x_master super; 88 struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry *p_slave_entries; 89}; 90 91struct clk_prog_1x_master_table { 92 struct clk_prog_1x_master super; 93 struct ctrl_clk_clk_prog_1x_master_table_slave_entry *p_slave_entries; 94}; 95 96#define CLK_CLK_PROG_GET(pclk, idx) \ 97 ((struct clk_prog *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ 98 &pclk->clk_progobjs.super.super, (u8)(idx))) 99 100#endif /* NVGPU_CLK_PROG_H */
diff --git a/include/clk/clk_vf_point.c b/include/clk/clk_vf_point.c
deleted file mode 100644
index 96413c8..0000000
--- a/include/clk/clk_vf_point.c
+++ /dev/null
@@ -1,433 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/gk20a.h> 24 25#include "clk.h" 26#include "clk_vf_point.h" 27#include "boardobj/boardobjgrp.h" 28#include "boardobj/boardobjgrp_e32.h" 29#include "ctrl/ctrlclk.h" 30#include "ctrl/ctrlvolt.h" 31 32static int _clk_vf_point_pmudatainit_super(struct gk20a *g, struct boardobj 33 *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); 34 35static int _clk_vf_points_pmudatainit(struct gk20a *g, 36 struct boardobjgrp *pboardobjgrp, 37 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 38{ 39 u32 status = 0; 40 41 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 42 if (status) { 43 nvgpu_err(g, 44 "error updating pmu boardobjgrp for clk vfpoint 0x%x", 45 status); 46 goto done; 47 } 48 49done: 50 return status; 51} 52 53static int _clk_vf_points_pmudata_instget(struct gk20a *g, 54 struct nv_pmu_boardobjgrp *pmuboardobjgrp, 55 struct nv_pmu_boardobj **ppboardobjpmudata, 56 u8 idx) 57{ 58 struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *pgrp_set = 59 (struct nv_pmu_clk_clk_vf_point_boardobj_grp_set *) 60 pmuboardobjgrp; 61 62 nvgpu_log_info(g, " "); 63 64 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 65 if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) { 66 return -EINVAL; 67 } 68 69 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 70 &pgrp_set->objects[idx].data.board_obj; 71 nvgpu_log_info(g, " Done"); 72 return 0; 73} 74 75static int _clk_vf_points_pmustatus_instget(struct gk20a *g, 76 void *pboardobjgrppmu, 77 struct nv_pmu_boardobj_query **ppboardobjpmustatus, 78 u8 idx) 79{ 80 struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status *pgrp_get_status = 81 (struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status *) 82 pboardobjgrppmu; 83 84 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 85 if (idx >= CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) { 86 return -EINVAL; 87 } 88 89 *ppboardobjpmustatus = (struct nv_pmu_boardobj_query *) 90 &pgrp_get_status->objects[idx].data.board_obj; 91 return 0; 92} 93 94int clk_vf_point_sw_setup(struct gk20a *g) 95{ 96 int status; 97 struct boardobjgrp *pboardobjgrp = NULL; 98 99 nvgpu_log_info(g, " "); 100 101 status = boardobjgrpconstruct_e255(g, &g->clk_pmu.clk_vf_pointobjs.super); 102 if (status) { 103 nvgpu_err(g, 104 "error creating boardobjgrp for clk vfpoint, status - 0x%x", 105 status); 106 goto done; 107 } 108 109 pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super; 110 111 BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_VF_POINT); 112 113 status = BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, 114 clk, CLK, clk_vf_point, CLK_VF_POINT); 115 if (status) { 116 nvgpu_err(g, 117 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 118 status); 119 goto done; 120 } 121 122 status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, 123 &g->clk_pmu.clk_vf_pointobjs.super.super, 124 clk, CLK, clk_vf_point, CLK_VF_POINT); 125 if (status) { 126 nvgpu_err(g, 127 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 128 status); 129 goto done; 130 } 131 132 pboardobjgrp->pmudatainit = _clk_vf_points_pmudatainit; 133 pboardobjgrp->pmudatainstget = _clk_vf_points_pmudata_instget; 134 pboardobjgrp->pmustatusinstget = _clk_vf_points_pmustatus_instget; 135 136done: 137 nvgpu_log_info(g, " done status %x", status); 138 return status; 139} 140 141int clk_vf_point_pmu_setup(struct gk20a *g) 142{ 143 int status; 144 struct boardobjgrp *pboardobjgrp = NULL; 145 146 nvgpu_log_info(g, " "); 147 148 pboardobjgrp = &g->clk_pmu.clk_vf_pointobjs.super.super; 149 150 if (!pboardobjgrp->bconstructed) { 151 return -EINVAL; 152 } 153 154 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 155 156 nvgpu_log_info(g, "Done"); 157 return status; 158} 159 160static int clk_vf_point_construct_super(struct gk20a *g, 161 struct boardobj **ppboardobj, 162 u16 size, void *pargs) 163{ 164 struct clk_vf_point *pclkvfpoint; 165 struct clk_vf_point *ptmpvfpoint = 166 (struct clk_vf_point *)pargs; 167 int status = 0; 168 169 status = boardobj_construct_super(g, ppboardobj, 170 size, pargs); 171 if (status) { 172 return -EINVAL; 173 } 174 175 pclkvfpoint = (struct clk_vf_point *)*ppboardobj; 176 177 pclkvfpoint->super.pmudatainit = 178 _clk_vf_point_pmudatainit_super; 179 180 pclkvfpoint->vfe_equ_idx = ptmpvfpoint->vfe_equ_idx; 181 pclkvfpoint->volt_rail_idx = ptmpvfpoint->volt_rail_idx; 182 183 return status; 184} 185 186static int _clk_vf_point_pmudatainit_volt(struct gk20a *g, 187 struct boardobj *board_obj_ptr, 188 struct nv_pmu_boardobj *ppmudata) 189{ 190 int status = 0; 191 struct clk_vf_point_volt *pclk_vf_point_volt; 192 struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *pset; 193 194 nvgpu_log_info(g, " "); 195 196 status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); 197 if (status != 0) { 198 return status; 199 } 200 201 pclk_vf_point_volt = 202 (struct clk_vf_point_volt *)board_obj_ptr; 203 204 pset = (struct nv_pmu_clk_clk_vf_point_volt_boardobj_set *) 205 ppmudata; 206 207 pset->source_voltage_uv = pclk_vf_point_volt->source_voltage_uv; 208 pset->freq_delta.data = pclk_vf_point_volt->freq_delta.data; 209 pset->freq_delta.type = pclk_vf_point_volt->freq_delta.type; 210 211 return status; 212} 213 214static int _clk_vf_point_pmudatainit_freq(struct gk20a *g, 215 struct boardobj *board_obj_ptr, 216 struct nv_pmu_boardobj *ppmudata) 217{ 218 int status = 0; 219 struct clk_vf_point_freq *pclk_vf_point_freq; 220 struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *pset; 221 222 nvgpu_log_info(g, " "); 223 224 status = _clk_vf_point_pmudatainit_super(g, board_obj_ptr, ppmudata); 225 if (status != 0) { 226 return status; 227 } 228 229 pclk_vf_point_freq = 230 (struct clk_vf_point_freq *)board_obj_ptr; 231 232 pset = (struct nv_pmu_clk_clk_vf_point_freq_boardobj_set *) 233 ppmudata; 234 235 pset->freq_mhz = 236 clkvfpointfreqmhzget(g, &pclk_vf_point_freq->super); 237 238 pset->volt_delta_uv = pclk_vf_point_freq->volt_delta_uv; 239 240 return status; 241} 242 243static int clk_vf_point_construct_volt(struct gk20a *g, 244 struct boardobj **ppboardobj, 245 u16 size, void *pargs) 246{ 247 struct boardobj *ptmpobj = (struct boardobj *)pargs; 248 struct clk_vf_point_volt *pclkvfpoint; 249 struct clk_vf_point_volt *ptmpvfpoint = 250 (struct clk_vf_point_volt *)pargs; 251 int status = 0; 252 253 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_VF_POINT_TYPE_VOLT) { 254 return -EINVAL; 255 } 256 257 ptmpobj->type_mask = BIT(CTRL_CLK_CLK_VF_POINT_TYPE_VOLT); 258 status = clk_vf_point_construct_super(g, ppboardobj, size, pargs); 259 if (status) { 260 return -EINVAL; 261 } 262 263 pclkvfpoint = (struct clk_vf_point_volt *)*ppboardobj; 264 265 pclkvfpoint->super.super.pmudatainit = 266 _clk_vf_point_pmudatainit_volt; 267 268 pclkvfpoint->source_voltage_uv = ptmpvfpoint->source_voltage_uv; 269 pclkvfpoint->freq_delta = ptmpvfpoint->freq_delta; 270 271 return status; 272} 273 274static int clk_vf_point_construct_freq(struct gk20a *g, 275 struct boardobj **ppboardobj, 276 u16 size, void *pargs) 277{ 278 struct boardobj *ptmpobj = (struct boardobj *)pargs; 279 struct clk_vf_point_freq *pclkvfpoint; 280 struct clk_vf_point_freq *ptmpvfpoint = 281 (struct clk_vf_point_freq *)pargs; 282 int status = 0; 283 284 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_VF_POINT_TYPE_FREQ) { 285 return -EINVAL; 286 } 287 288 ptmpobj->type_mask = BIT(CTRL_CLK_CLK_VF_POINT_TYPE_FREQ); 289 status = clk_vf_point_construct_super(g, ppboardobj, size, pargs); 290 if (status) { 291 return -EINVAL; 292 } 293 294 pclkvfpoint = (struct clk_vf_point_freq *)*ppboardobj; 295 296 pclkvfpoint->super.super.pmudatainit = 297 _clk_vf_point_pmudatainit_freq; 298 299 clkvfpointfreqmhzset(g, &pclkvfpoint->super, 300 clkvfpointfreqmhzget(g, &ptmpvfpoint->super)); 301 302 return status; 303} 304 305struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs) 306{ 307 struct boardobj *board_obj_ptr = NULL; 308 int status; 309 310 nvgpu_log_info(g, " "); 311 switch (BOARDOBJ_GET_TYPE(pargs)) { 312 case CTRL_CLK_CLK_VF_POINT_TYPE_FREQ: 313 status = clk_vf_point_construct_freq(g, &board_obj_ptr, 314 sizeof(struct clk_vf_point_freq), pargs); 315 break; 316 317 case CTRL_CLK_CLK_VF_POINT_TYPE_VOLT: 318 status = clk_vf_point_construct_volt(g, &board_obj_ptr, 319 sizeof(struct clk_vf_point_volt), pargs); 320 break; 321 322 default: 323 return NULL; 324 } 325 326 if (status) { 327 return NULL; 328 } 329 330 nvgpu_log_info(g, " Done"); 331 332 return (struct clk_vf_point *)board_obj_ptr; 333} 334 335static int _clk_vf_point_pmudatainit_super(struct gk20a *g, 336 struct boardobj *board_obj_ptr, 337 struct nv_pmu_boardobj *ppmudata) 338{ 339 int status = 0; 340 struct clk_vf_point *pclk_vf_point; 341 struct nv_pmu_clk_clk_vf_point_boardobj_set *pset; 342 343 nvgpu_log_info(g, " "); 344 345 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 346 if (status != 0) { 347 return status; 348 } 349 350 pclk_vf_point = 351 (struct clk_vf_point *)board_obj_ptr; 352 353 pset = (struct nv_pmu_clk_clk_vf_point_boardobj_set *) 354 ppmudata; 355 356 357 pset->vfe_equ_idx = pclk_vf_point->vfe_equ_idx; 358 pset->volt_rail_idx = pclk_vf_point->volt_rail_idx; 359 return status; 360} 361 362 363static int clk_vf_point_update(struct gk20a *g, 364 struct boardobj *board_obj_ptr, 365 struct nv_pmu_boardobj *ppmudata) 366{ 367 struct clk_vf_point *pclk_vf_point; 368 struct nv_pmu_clk_clk_vf_point_boardobj_get_status *pstatus; 369 370 nvgpu_log_info(g, " "); 371 372 373 pclk_vf_point = 374 (struct clk_vf_point *)board_obj_ptr; 375 376 pstatus = (struct nv_pmu_clk_clk_vf_point_boardobj_get_status *) 377 ppmudata; 378 379 if (pstatus->super.type != pclk_vf_point->super.type) { 380 nvgpu_err(g, 381 "pmu data and boardobj type not matching"); 382 return -EINVAL; 383 } 384 /* now copy VF pair */ 385 memcpy(&pclk_vf_point->pair, &pstatus->pair, 386 sizeof(struct ctrl_clk_vf_pair)); 387 return 0; 388} 389 390/*get latest vf point data from PMU */ 391int clk_vf_point_cache(struct gk20a *g) 392{ 393 394 struct clk_vf_points *pclk_vf_points; 395 struct boardobjgrp *pboardobjgrp; 396 struct boardobjgrpmask *pboardobjgrpmask; 397 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu; 398 struct boardobj *pboardobj = NULL; 399 struct nv_pmu_boardobj_query *pboardobjpmustatus = NULL; 400 int status; 401 u8 index; 402 403 nvgpu_log_info(g, " "); 404 pclk_vf_points = &g->clk_pmu.clk_vf_pointobjs; 405 pboardobjgrp = &pclk_vf_points->super.super; 406 pboardobjgrpmask = &pclk_vf_points->super.mask.super; 407 408 status = pboardobjgrp->pmugetstatus(g, pboardobjgrp, pboardobjgrpmask); 409 if (status) { 410 nvgpu_err(g, "err getting boardobjs from pmu"); 411 return status; 412 } 413 pboardobjgrppmu = pboardobjgrp->pmu.getstatus.buf; 414 415 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct boardobj*, pboardobj, index) { 416 status = pboardobjgrp->pmustatusinstget(g, 417 (struct nv_pmu_boardobjgrp *)pboardobjgrppmu, 418 &pboardobjpmustatus, index); 419 if (status) { 420 nvgpu_err(g, "could not get status object instance"); 421 return status; 422 } 423 424 status = clk_vf_point_update(g, pboardobj, 425 (struct nv_pmu_boardobj *)pboardobjpmustatus); 426 if (status) { 427 nvgpu_err(g, "invalid data from pmu at %d", index); 428 return status; 429 } 430 } 431 432 return 0; 433}
diff --git a/include/clk/clk_vf_point.h b/include/clk/clk_vf_point.h
deleted file mode 100644
index b72fe64..0000000
--- a/include/clk/clk_vf_point.h
+++ /dev/null
@@ -1,83 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_VF_POINT_H 24#define NVGPU_CLK_VF_POINT_H 25#include "ctrl/ctrlclk.h" 26#include "ctrl/ctrlboardobj.h" 27#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 28#include "boardobj/boardobjgrp_e32.h" 29#include "boardobj/boardobjgrpmask.h" 30 31int clk_vf_point_sw_setup(struct gk20a *g); 32int clk_vf_point_pmu_setup(struct gk20a *g); 33int clk_vf_point_cache(struct gk20a *g); 34 35struct clk_vf_points { 36 struct boardobjgrp_e255 super; 37}; 38 39struct clk_vf_point { 40 struct boardobj super; 41 u8 vfe_equ_idx; 42 u8 volt_rail_idx; 43 struct ctrl_clk_vf_pair pair; 44}; 45 46struct clk_vf_point_volt { 47 struct clk_vf_point super; 48 u32 source_voltage_uv; 49 struct ctrl_clk_freq_delta freq_delta; 50}; 51 52struct clk_vf_point_freq { 53 struct clk_vf_point super; 54 int volt_delta_uv; 55}; 56 57#define CLK_CLK_VF_POINT_GET(pclk, idx) \ 58 ((struct clk_vf_point *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ 59 &pclk->clk_vf_pointobjs.super.super, (u8)(idx))) 60 61#define clkvfpointpairget(pvfpoint) \ 62 (&((pvfpoint)->pair)) 63 64#define clkvfpointfreqmhzget(pgpu, pvfpoint) \ 65 CTRL_CLK_VF_PAIR_FREQ_MHZ_GET(clkvfpointpairget(pvfpoint)) 66 67#define clkvfpointfreqdeltamhzGet(pgpu, pvfPoint) \ 68 ((BOARDOBJ_GET_TYPE(pvfpoint) == CTRL_CLK_CLK_VF_POINT_TYPE_VOLT) ? \ 69 (((struct clk_vf_point_volt *)(pvfpoint))->freq_delta_khz / 1000) : 0) 70 71#define clkvfpointfreqmhzset(pgpu, pvfpoint, _freqmhz) \ 72 CTRL_CLK_VF_PAIR_FREQ_MHZ_SET(clkvfpointpairget(pvfpoint), _freqmhz) 73 74#define clkvfpointvoltageuvset(pgpu, pvfpoint, _voltageuv) \ 75 CTRL_CLK_VF_PAIR_VOLTAGE_UV_SET(clkvfpointpairget(pvfpoint), \ 76 _voltageuv) 77 78#define clkvfpointvoltageuvget(pgpu, pvfpoint) \ 79 CTRL_CLK_VF_PAIR_VOLTAGE_UV_GET(clkvfpointpairget(pvfpoint)) \ 80 81struct clk_vf_point *construct_clk_vf_point(struct gk20a *g, void *pargs); 82 83#endif /* NVGPU_CLK_VF_POINT_H */
diff --git a/include/clk/clk_vin.c b/include/clk/clk_vin.c
deleted file mode 100644
index e0a4a5b..0000000
--- a/include/clk/clk_vin.c
+++ /dev/null
@@ -1,573 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bios.h> 24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 25#include <nvgpu/io.h> 26#include <nvgpu/gk20a.h> 27 28#include "boardobj/boardobjgrp.h" 29#include "boardobj/boardobjgrp_e32.h" 30 31#include "ctrl/ctrlvolt.h" 32 33#include "gp106/bios_gp106.h" 34 35#include "clk.h" 36#include "clk_vin.h" 37 38static int devinit_get_vin_device_table(struct gk20a *g, 39 struct avfsvinobjs *pvinobjs); 40 41static int vin_device_construct_v10(struct gk20a *g, 42 struct boardobj **ppboardobj, 43 u16 size, void *pargs); 44static int vin_device_construct_v20(struct gk20a *g, 45 struct boardobj **ppboardobj, 46 u16 size, void *pargs); 47static int vin_device_construct_super(struct gk20a *g, 48 struct boardobj **ppboardobj, 49 u16 size, void *pargs); 50static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs); 51 52static int vin_device_init_pmudata_v10(struct gk20a *g, 53 struct boardobj *board_obj_ptr, 54 struct nv_pmu_boardobj *ppmudata); 55static int vin_device_init_pmudata_v20(struct gk20a *g, 56 struct boardobj *board_obj_ptr, 57 struct nv_pmu_boardobj *ppmudata); 58static int vin_device_init_pmudata_super(struct gk20a *g, 59 struct boardobj *board_obj_ptr, 60 struct nv_pmu_boardobj *ppmudata); 61 62u32 clk_avfs_get_vin_cal_fuse_v10(struct gk20a *g, 63 struct avfsvinobjs *pvinobjs, 64 struct vin_device_v20 *pvindev) 65{ 66 u32 status = 0; 67 u32 slope, intercept; 68 u8 i; 69 70 if (pvinobjs->calibration_rev_vbios == g->ops.fuse.read_vin_cal_fuse_rev(g)) { 71 BOARDOBJGRP_FOR_EACH(&(pvinobjs->super.super), 72 struct vin_device_v20 *, pvindev, i) { 73 slope = 0; 74 intercept = 0; 75 pvindev = (struct vin_device_v20 *)CLK_GET_VIN_DEVICE(pvinobjs, i); 76 status = g->ops.fuse.read_vin_cal_slope_intercept_fuse(g, 77 pvindev->super.id, &slope, &intercept); 78 if (status) { 79 nvgpu_err(g, 80 "err reading vin cal for id %x", pvindev->super.id); 81 return status; 82 } 83 pvindev->data.vin_cal.cal_v10.slope = slope; 84 pvindev->data.vin_cal.cal_v10.intercept = intercept; 85 } 86 } 87 return status; 88 89} 90 91u32 clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, 92 struct avfsvinobjs *pvinobjs, 93 struct vin_device_v20 *pvindev) 94{ 95 u32 status = 0; 96 s8 gain, offset; 97 u8 i; 98 99 if (pvinobjs->calibration_rev_vbios == g->ops.fuse.read_vin_cal_fuse_rev(g)) { 100 BOARDOBJGRP_FOR_EACH(&(pvinobjs->super.super), 101 struct vin_device_v20 *, pvindev, i) { 102 gain = '\0'; 103 offset = '\0'; 104 pvindev = (struct vin_device_v20 *)CLK_GET_VIN_DEVICE(pvinobjs, i); 105 status = g->ops.fuse.read_vin_cal_gain_offset_fuse(g, 106 pvindev->super.id, &gain, &offset); 107 if (status) { 108 nvgpu_err(g, 109 "err reading vin cal for id %x", pvindev->super.id); 110 return status; 111 } 112 pvindev->data.vin_cal.cal_v20.gain = gain; 113 pvindev->data.vin_cal.cal_v20.offset = offset; 114 } 115 } 116 return status; 117 118} 119 120static int _clk_vin_devgrp_pmudatainit_super(struct gk20a *g, 121 struct boardobjgrp *pboardobjgrp, 122 struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) 123{ 124 struct nv_pmu_clk_clk_vin_device_boardobjgrp_set_header *pset = 125 (struct nv_pmu_clk_clk_vin_device_boardobjgrp_set_header *) 126 pboardobjgrppmu; 127 struct avfsvinobjs *pvin_obbj = (struct avfsvinobjs *)pboardobjgrp; 128 int status = 0; 129 130 nvgpu_log_info(g, " "); 131 132 status = boardobjgrp_pmudatainit_e32(g, pboardobjgrp, pboardobjgrppmu); 133 134 pset->b_vin_is_disable_allowed = pvin_obbj->vin_is_disable_allowed; 135 136 nvgpu_log_info(g, " Done"); 137 return status; 138} 139 140static int _clk_vin_devgrp_pmudata_instget(struct gk20a *g, 141 struct nv_pmu_boardobjgrp *pmuboardobjgrp, 142 struct nv_pmu_boardobj **ppboardobjpmudata, 143 u8 idx) 144{ 145 struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *pgrp_set = 146 (struct nv_pmu_clk_clk_vin_device_boardobj_grp_set *) 147 pmuboardobjgrp; 148 149 nvgpu_log_info(g, " "); 150 151 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 152 if (((u32)BIT(idx) & 153 pgrp_set->hdr.data.super.obj_mask.super.data[0]) == 0) { 154 return -EINVAL; 155 } 156 157 *ppboardobjpmudata = (struct nv_pmu_boardobj *) 158 &pgrp_set->objects[idx].data.board_obj; 159 nvgpu_log_info(g, " Done"); 160 return 0; 161} 162 163static int _clk_vin_devgrp_pmustatus_instget(struct gk20a *g, 164 void *pboardobjgrppmu, 165 struct nv_pmu_boardobj_query **ppboardobjpmustatus, 166 u8 idx) 167{ 168 struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status *pgrp_get_status = 169 (struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status *) 170 pboardobjgrppmu; 171 172 /*check whether pmuboardobjgrp has a valid boardobj in index*/ 173 if (((u32)BIT(idx) & 174 pgrp_get_status->hdr.data.super.obj_mask.super.data[0]) == 0) { 175 return -EINVAL; 176 } 177 178 *ppboardobjpmustatus = (struct nv_pmu_boardobj_query *) 179 &pgrp_get_status->objects[idx].data.board_obj; 180 return 0; 181} 182 183int clk_vin_sw_setup(struct gk20a *g) 184{ 185 int status; 186 struct boardobjgrp *pboardobjgrp = NULL; 187 struct vin_device_v20 *pvindev = NULL; 188 struct avfsvinobjs *pvinobjs; 189 190 nvgpu_log_info(g, " "); 191 192 status = boardobjgrpconstruct_e32(g, &g->clk_pmu.avfs_vinobjs.super); 193 if (status) { 194 nvgpu_err(g, 195 "error creating boardobjgrp for clk vin, statu - 0x%x", 196 status); 197 goto done; 198 } 199 200 pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super; 201 pvinobjs = &g->clk_pmu.avfs_vinobjs; 202 203 BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, VIN_DEVICE); 204 205 status = BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, 206 clk, CLK, clk_vin_device, CLK_VIN_DEVICE); 207 if (status) { 208 nvgpu_err(g, 209 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 210 status); 211 goto done; 212 } 213 214 pboardobjgrp->pmudatainit = _clk_vin_devgrp_pmudatainit_super; 215 pboardobjgrp->pmudatainstget = _clk_vin_devgrp_pmudata_instget; 216 pboardobjgrp->pmustatusinstget = _clk_vin_devgrp_pmustatus_instget; 217 218 status = devinit_get_vin_device_table(g, &g->clk_pmu.avfs_vinobjs); 219 if (status) { 220 goto done; 221 } 222 223 /*update vin calibration to fuse */ 224 g->ops.pmu_ver.clk.clk_avfs_get_vin_cal_data(g, pvinobjs, pvindev); 225 226 status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, 227 &g->clk_pmu.avfs_vinobjs.super.super, 228 clk, CLK, clk_vin_device, CLK_VIN_DEVICE); 229 if (status) { 230 nvgpu_err(g, 231 "error constructing PMU_BOARDOBJ_CMD_GRP_SET interface - 0x%x", 232 status); 233 goto done; 234 } 235 236done: 237 nvgpu_log_info(g, " done status %x", status); 238 return status; 239} 240 241int clk_vin_pmu_setup(struct gk20a *g) 242{ 243 int status; 244 struct boardobjgrp *pboardobjgrp = NULL; 245 246 nvgpu_log_info(g, " "); 247 248 pboardobjgrp = &g->clk_pmu.avfs_vinobjs.super.super; 249 250 if (!pboardobjgrp->bconstructed) { 251 return -EINVAL; 252 } 253 254 status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); 255 256 nvgpu_log_info(g, "Done"); 257 return status; 258} 259 260static int devinit_get_vin_device_table(struct gk20a *g, 261 struct avfsvinobjs *pvinobjs) 262{ 263 int status = 0; 264 u8 *vin_table_ptr = NULL; 265 struct vin_descriptor_header_10 vin_desc_table_header = { 0 }; 266 struct vin_descriptor_entry_10 vin_desc_table_entry = { 0 }; 267 u8 *vin_tbl_entry_ptr = NULL; 268 u32 index = 0; 269 u32 slope=0, intercept=0; 270 s8 offset='\0', gain='\0'; 271 struct vin_device *pvin_dev; 272 u32 cal_type; 273 274 union { 275 struct boardobj boardobj; 276 struct vin_device vin_device; 277 struct vin_device_v10 vin_device_v10; 278 struct vin_device_v20 vin_device_v20; 279 } vin_device_data; 280 281 nvgpu_log_info(g, " "); 282 283 vin_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, 284 g->bios.clock_token, VIN_TABLE); 285 if (vin_table_ptr == NULL) { 286 status = -1; 287 goto done; 288 } 289 290 memcpy(&vin_desc_table_header, vin_table_ptr, 291 sizeof(struct vin_descriptor_header_10)); 292 293 pvinobjs->calibration_rev_vbios = 294 BIOS_GET_FIELD(vin_desc_table_header.flags0, 295 NV_VIN_DESC_FLAGS0_VIN_CAL_REVISION); 296 pvinobjs->vin_is_disable_allowed = 297 BIOS_GET_FIELD(vin_desc_table_header.flags0, 298 NV_VIN_DESC_FLAGS0_DISABLE_CONTROL); 299 cal_type = BIOS_GET_FIELD(vin_desc_table_header.flags0, 300 NV_VIN_DESC_FLAGS0_VIN_CAL_TYPE); 301 if (!cal_type) { 302 cal_type = CTRL_CLK_VIN_CAL_TYPE_V10; 303 } 304 305 switch (cal_type) { 306 case CTRL_CLK_VIN_CAL_TYPE_V10: 307 /* VIN calibration slope: XX.YYY mV/code => XXYYY uV/code*/ 308 slope = ((BIOS_GET_FIELD(vin_desc_table_header.vin_cal, 309 NV_VIN_DESC_VIN_CAL_SLOPE_INTEGER) * 1000)) + 310 ((BIOS_GET_FIELD(vin_desc_table_header.vin_cal, 311 NV_VIN_DESC_VIN_CAL_SLOPE_FRACTION))); 312 313 /* VIN calibration intercept: ZZZ.W mV => ZZZW00 uV */ 314 intercept = ((BIOS_GET_FIELD(vin_desc_table_header.vin_cal, 315 NV_VIN_DESC_VIN_CAL_INTERCEPT_INTEGER) * 1000)) + 316 ((BIOS_GET_FIELD(vin_desc_table_header.vin_cal, 317 NV_VIN_DESC_VIN_CAL_INTERCEPT_FRACTION) * 100)); 318 319 break; 320 case CTRL_CLK_VIN_CAL_TYPE_V20: 321 offset = BIOS_GET_FIELD(vin_desc_table_header.vin_cal, 322 NV_VIN_DESC_VIN_CAL_OFFSET); 323 gain = BIOS_GET_FIELD(vin_desc_table_header.vin_cal, 324 NV_VIN_DESC_VIN_CAL_GAIN); 325 break; 326 default: 327 status = -1; 328 goto done; 329 } 330 /* Read table entries*/ 331 vin_tbl_entry_ptr = vin_table_ptr + vin_desc_table_header.header_sizee; 332 for (index = 0; index < vin_desc_table_header.entry_count; index++) { 333 memcpy(&vin_desc_table_entry, vin_tbl_entry_ptr, 334 sizeof(struct vin_descriptor_entry_10)); 335 336 if (vin_desc_table_entry.vin_device_type == CTRL_CLK_VIN_TYPE_DISABLED) { 337 continue; 338 } 339 340 vin_device_data.boardobj.type = 341 (u8)vin_desc_table_entry.vin_device_type; 342 vin_device_data.vin_device.id = (u8)vin_desc_table_entry.vin_device_id; 343 vin_device_data.vin_device.volt_domain_vbios = 344 (u8)vin_desc_table_entry.volt_domain_vbios; 345 vin_device_data.vin_device.flls_shared_mask = 0; 346 347 switch (vin_device_data.boardobj.type) { 348 case CTRL_CLK_VIN_TYPE_V10: 349 vin_device_data.vin_device_v10.data.vin_cal.slope = slope; 350 vin_device_data.vin_device_v10.data.vin_cal.intercept = intercept; 351 break; 352 case CTRL_CLK_VIN_TYPE_V20: 353 vin_device_data.vin_device_v20.data.cal_type = (u8) cal_type; 354 vin_device_data.vin_device_v20.data.vin_cal.cal_v20.offset = offset; 355 vin_device_data.vin_device_v20.data.vin_cal.cal_v20.gain = gain; 356 break; 357 default: 358 status = -1; 359 goto done; 360 }; 361 362 pvin_dev = construct_vin_device(g, (void *)&vin_device_data); 363 364 status = boardobjgrp_objinsert(&pvinobjs->super.super, 365 (struct boardobj *)pvin_dev, index); 366 367 vin_tbl_entry_ptr += vin_desc_table_header.entry_size; 368 } 369 370done: 371 nvgpu_log_info(g, " done status %x", status); 372 return status; 373} 374 375static int vin_device_construct_v10(struct gk20a *g, 376 struct boardobj **ppboardobj, 377 u16 size, void *pargs) 378{ 379 struct boardobj *ptmpobj = (struct boardobj *)pargs; 380 struct vin_device_v10 *pvin_device_v10; 381 struct vin_device_v10 *ptmpvin_device_v10 = (struct vin_device_v10 *)pargs; 382 int status = 0; 383 384 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_VIN_TYPE_V10) { 385 return -EINVAL; 386 } 387 388 ptmpobj->type_mask |= BIT(CTRL_CLK_VIN_TYPE_V10); 389 status = vin_device_construct_super(g, ppboardobj, size, pargs); 390 if (status) { 391 return -EINVAL; 392 } 393 394 pvin_device_v10 = (struct vin_device_v10 *)*ppboardobj; 395 396 pvin_device_v10->super.super.pmudatainit = 397 vin_device_init_pmudata_v10; 398 399 pvin_device_v10->data.vin_cal.slope = ptmpvin_device_v10->data.vin_cal.slope; 400 pvin_device_v10->data.vin_cal.intercept = ptmpvin_device_v10->data.vin_cal.intercept; 401 402 return status; 403} 404 405static int vin_device_construct_v20(struct gk20a *g, 406 struct boardobj **ppboardobj, 407 u16 size, void *pargs) 408{ 409 struct boardobj *ptmpobj = (struct boardobj *)pargs; 410 struct vin_device_v20 *pvin_device_v20; 411 struct vin_device_v20 *ptmpvin_device_v20 = (struct vin_device_v20 *)pargs; 412 int status = 0; 413 414 if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_VIN_TYPE_V20) { 415 return -EINVAL; 416 } 417 418 ptmpobj->type_mask |= BIT(CTRL_CLK_VIN_TYPE_V20); 419 status = vin_device_construct_super(g, ppboardobj, size, pargs); 420 if (status) { 421 return -EINVAL; 422 } 423 424 pvin_device_v20 = (struct vin_device_v20 *)*ppboardobj; 425 426 pvin_device_v20->super.super.pmudatainit = 427 vin_device_init_pmudata_v20; 428 429 pvin_device_v20->data.cal_type = ptmpvin_device_v20->data.cal_type; 430 pvin_device_v20->data.vin_cal.cal_v20.offset = ptmpvin_device_v20->data.vin_cal.cal_v20.offset; 431 pvin_device_v20->data.vin_cal.cal_v20.gain = ptmpvin_device_v20->data.vin_cal.cal_v20.gain; 432 433 return status; 434} 435static int vin_device_construct_super(struct gk20a *g, 436 struct boardobj **ppboardobj, 437 u16 size, void *pargs) 438{ 439 struct vin_device *pvin_device; 440 struct vin_device *ptmpvin_device = (struct vin_device *)pargs; 441 int status = 0; 442 status = boardobj_construct_super(g, ppboardobj, size, pargs); 443 444 if (status) { 445 return -EINVAL; 446 } 447 448 pvin_device = (struct vin_device *)*ppboardobj; 449 450 pvin_device->super.pmudatainit = 451 vin_device_init_pmudata_super; 452 453 pvin_device->id = ptmpvin_device->id; 454 pvin_device->volt_domain_vbios = ptmpvin_device->volt_domain_vbios; 455 pvin_device->flls_shared_mask = ptmpvin_device->flls_shared_mask; 456 pvin_device->volt_domain = CTRL_VOLT_DOMAIN_LOGIC; 457 458 return status; 459} 460static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs) 461{ 462 struct boardobj *board_obj_ptr = NULL; 463 int status; 464 465 nvgpu_log_info(g, " %d", BOARDOBJ_GET_TYPE(pargs)); 466 switch (BOARDOBJ_GET_TYPE(pargs)) { 467 case CTRL_CLK_VIN_TYPE_V10: 468 status = vin_device_construct_v10(g, &board_obj_ptr, 469 sizeof(struct vin_device_v10), pargs); 470 break; 471 472 case CTRL_CLK_VIN_TYPE_V20: 473 status = vin_device_construct_v20(g, &board_obj_ptr, 474 sizeof(struct vin_device_v20), pargs); 475 break; 476 477 default: 478 return NULL; 479 }; 480 481 if (status) { 482 return NULL; 483 } 484 485 nvgpu_log_info(g, " Done"); 486 487 return (struct vin_device *)board_obj_ptr; 488} 489 490 491 492static int vin_device_init_pmudata_v10(struct gk20a *g, 493 struct boardobj *board_obj_ptr, 494 struct nv_pmu_boardobj *ppmudata) 495{ 496 int status = 0; 497 struct vin_device_v20 *pvin_dev_v20; 498 struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *perf_pmu_data; 499 500 nvgpu_log_info(g, " "); 501 502 status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); 503 if (status != 0) { 504 return status; 505 } 506 507 pvin_dev_v20 = (struct vin_device_v20 *)board_obj_ptr; 508 perf_pmu_data = (struct nv_pmu_clk_clk_vin_device_v10_boardobj_set *) 509 ppmudata; 510 511 perf_pmu_data->data.vin_cal.intercept = pvin_dev_v20->data.vin_cal.cal_v10.intercept; 512 perf_pmu_data->data.vin_cal.slope = pvin_dev_v20->data.vin_cal.cal_v10.slope; 513 514 nvgpu_log_info(g, " Done"); 515 516 return status; 517} 518 519static int vin_device_init_pmudata_v20(struct gk20a *g, 520 struct boardobj *board_obj_ptr, 521 struct nv_pmu_boardobj *ppmudata) 522{ 523 int status = 0; 524 struct vin_device_v20 *pvin_dev_v20; 525 struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *perf_pmu_data; 526 527 nvgpu_log_info(g, " "); 528 529 status = vin_device_init_pmudata_super(g, board_obj_ptr, ppmudata); 530 if (status != 0) { 531 return status; 532 } 533 534 pvin_dev_v20 = (struct vin_device_v20 *)board_obj_ptr; 535 perf_pmu_data = (struct nv_pmu_clk_clk_vin_device_v20_boardobj_set *) 536 ppmudata; 537 538 perf_pmu_data->data.cal_type = pvin_dev_v20->data.cal_type; 539 perf_pmu_data->data.vin_cal.cal_v20.offset = pvin_dev_v20->data.vin_cal.cal_v20.offset; 540 perf_pmu_data->data.vin_cal.cal_v20.gain = pvin_dev_v20->data.vin_cal.cal_v20.gain; 541 542 nvgpu_log_info(g, " Done"); 543 544 return status; 545} 546 547static int vin_device_init_pmudata_super(struct gk20a *g, 548 struct boardobj *board_obj_ptr, 549 struct nv_pmu_boardobj *ppmudata) 550{ 551 int status = 0; 552 struct vin_device *pvin_dev; 553 struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data; 554 555 nvgpu_log_info(g, " "); 556 557 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); 558 if (status != 0) { 559 return status; 560 } 561 562 pvin_dev = (struct vin_device *)board_obj_ptr; 563 perf_pmu_data = (struct nv_pmu_clk_clk_vin_device_boardobj_set *) 564 ppmudata; 565 566 perf_pmu_data->id = pvin_dev->id; 567 perf_pmu_data->volt_domain = pvin_dev->volt_domain; 568 perf_pmu_data->flls_shared_mask = pvin_dev->flls_shared_mask; 569 570 nvgpu_log_info(g, " Done"); 571 572 return status; 573}
diff --git a/include/clk/clk_vin.h b/include/clk/clk_vin.h
deleted file mode 100644
index 73b93e4..0000000
--- a/include/clk/clk_vin.h
+++ /dev/null
@@ -1,79 +0,0 @@ 1/* 2* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3* 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21*/ 22 23#ifndef NVGPU_CLK_VIN_H 24#define NVGPU_CLK_VIN_H 25 26#include "boardobj/boardobj.h" 27#include "boardobj/boardobjgrp.h" 28#include "boardobj/boardobjgrp_e32.h" 29 30struct vin_device; 31struct clk_pmupstate; 32 33struct avfsvinobjs { 34 struct boardobjgrp_e32 super; 35 u8 calibration_rev_vbios; 36 u8 calibration_rev_fused; 37 bool vin_is_disable_allowed; 38}; 39typedef u32 vin_device_state_load(struct gk20a *g, 40 struct clk_pmupstate *clk, struct vin_device *pdev); 41 42struct vin_device { 43 struct boardobj super; 44 u8 id; 45 u8 volt_domain; 46 u8 volt_domain_vbios; 47 u32 flls_shared_mask; 48 49 vin_device_state_load *state_load; 50}; 51 52struct vin_device_v10 { 53 struct vin_device super; 54 struct ctrl_clk_vin_device_info_data_v10 data; 55}; 56 57struct vin_device_v20 { 58 struct vin_device super; 59 struct ctrl_clk_vin_device_info_data_v20 data; 60}; 61 62/* get vin device object from descriptor table index*/ 63#define CLK_GET_VIN_DEVICE(pvinobjs, dev_index) \ 64 ((struct vin_device *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ 65 ((struct boardobjgrp *)&(pvinobjs->super.super)), (dev_index))) 66 67boardobj_construct construct_vindevice; 68boardobj_pmudatainit vindeviceinit_pmudata_super; 69 70int clk_vin_sw_setup(struct gk20a *g); 71int clk_vin_pmu_setup(struct gk20a *g); 72u32 clk_avfs_get_vin_cal_fuse_v10(struct gk20a *g, 73 struct avfsvinobjs *pvinobjs, 74 struct vin_device_v20 *pvindev); 75u32 clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, 76 struct avfsvinobjs *pvinobjs, 77 struct vin_device_v20 *pvindev); 78 79#endif /* NVGPU_CLK_VIN_H */
diff --git a/include/ctrl/ctrlboardobj.h b/include/ctrl/ctrlboardobj.h
deleted file mode 100644
index 8f57e88..0000000
--- a/include/ctrl/ctrlboardobj.h
+++ /dev/null
@@ -1,89 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_CTRLBOARDOBJ_H 24#define NVGPU_CTRLBOARDOBJ_H 25 26struct ctrl_boardobj { 27 u8 type; 28}; 29 30#define CTRL_BOARDOBJGRP_TYPE_INVALID 0x00U 31#define CTRL_BOARDOBJGRP_TYPE_E32 0x01U 32#define CTRL_BOARDOBJGRP_TYPE_E255 0x02U 33 34#define CTRL_BOARDOBJGRP_E32_MAX_OBJECTS 32U 35 36#define CTRL_BOARDOBJGRP_E255_MAX_OBJECTS 255U 37 38#define CTRL_BOARDOBJ_MAX_BOARD_OBJECTS \ 39 CTRL_BOARDOBJGRP_E32_MAX_OBJECTS 40 41#define CTRL_BOARDOBJ_IDX_INVALID 255U 42 43#define CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE 32U 44 45#define CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(_bit) \ 46 ((_bit) / CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE) 47 48#define CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(_bit) \ 49 ((_bit) % CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE) 50 51#define CTRL_BOARDOBJGRP_MASK_DATA_SIZE(_bits) \ 52 (CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX((_bits) - 1U) + 1U) 53 54 55#define CTRL_BOARDOBJGRP_MASK_ARRAY_START_SIZE 1U 56#define CTRL_BOARDOBJGRP_MASK_ARRAY_EXTENSION_SIZE(_bits) \ 57 (CTRL_BOARDOBJGRP_MASK_DATA_SIZE(_bits) - \ 58 CTRL_BOARDOBJGRP_MASK_ARRAY_START_SIZE) 59 60struct ctrl_boardobjgrp_mask { 61 u32 data[1]; 62}; 63 64struct ctrl_boardobjgrp_mask_e32 { 65 struct ctrl_boardobjgrp_mask super; 66}; 67 68struct ctrl_boardobjgrp_mask_e255 { 69 struct ctrl_boardobjgrp_mask super; 70 u32 data_e255[7]; 71}; 72 73struct ctrl_boardobjgrp_super { 74 struct ctrl_boardobjgrp_mask obj_mask; 75}; 76 77struct ctrl_boardobjgrp_e32 { 78 struct ctrl_boardobjgrp_mask_e32 obj_mask; 79}; 80 81struct CTRL_boardobjgrp_e255 { 82 struct ctrl_boardobjgrp_mask_e255 obj_mask; 83}; 84 85struct ctrl_boardobjgrp { 86 u32 obj_mask; 87}; 88 89#endif /* NVGPU_CTRLBOARDOBJ_H */
diff --git a/include/ctrl/ctrlclk.h b/include/ctrl/ctrlclk.h
deleted file mode 100644
index fbd5677..0000000
--- a/include/ctrl/ctrlclk.h
+++ /dev/null
@@ -1,212 +0,0 @@ 1/* 2 * general p state infrastructure 3 * 4 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_CTRLCLK_H 25#define NVGPU_CTRLCLK_H 26 27#include "ctrlboardobj.h" 28#include "ctrlclkavfs.h" 29#include "ctrlvolt.h" 30 31#define CTRL_CLK_CLK_DELTA_MAX_VOLT_RAILS 4 32 33/* valid clock domain values */ 34#define CTRL_CLK_DOMAIN_MCLK (0x00000010) 35#define CTRL_CLK_DOMAIN_HOSTCLK (0x00000020) 36#define CTRL_CLK_DOMAIN_DISPCLK (0x00000040) 37#define CTRL_CLK_DOMAIN_GPC2CLK (0x00010000) 38#define CTRL_CLK_DOMAIN_XBAR2CLK (0x00040000) 39#define CTRL_CLK_DOMAIN_SYS2CLK (0x00800000) 40#define CTRL_CLK_DOMAIN_HUB2CLK (0x01000000) 41#define CTRL_CLK_DOMAIN_PWRCLK (0x00080000) 42#define CTRL_CLK_DOMAIN_NVDCLK (0x00100000) 43#define CTRL_CLK_DOMAIN_PCIEGENCLK (0x00200000) 44 45#define CTRL_CLK_DOMAIN_GPCCLK (0x00000001) 46#define CTRL_CLK_DOMAIN_XBARCLK (0x00000002) 47#define CTRL_CLK_DOMAIN_SYSCLK (0x00000004) 48#define CTRL_CLK_DOMAIN_HUBCLK (0x00000008) 49 50#define CTRL_CLK_CLK_DOMAIN_TYPE_3X 0x01 51#define CTRL_CLK_CLK_DOMAIN_TYPE_3X_FIXED 0x02 52#define CTRL_CLK_CLK_DOMAIN_TYPE_3X_PROG 0x03 53#define CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER 0x04 54#define CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE 0x05 55#define CTRL_CLK_CLK_DOMAIN_TYPE_30_PROG 0x06 56#define CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER 0x07 57#define CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE 0x08 58#define CTRL_CLK_CLK_DOMAIN_TYPE_35_PROG 0x09 59 60#define CTRL_CLK_CLK_DOMAIN_3X_PROG_ORDERING_INDEX_INVALID 0xFF 61#define CTRL_CLK_CLK_DOMAIN_INDEX_INVALID 0xFF 62 63#define CTRL_CLK_CLK_PROG_TYPE_1X 0x01 64#define CTRL_CLK_CLK_PROG_TYPE_1X_MASTER 0x02 65#define CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO 0x03 66#define CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_TABLE 0x04 67#define CTRL_CLK_CLK_PROG_TYPE_UNKNOWN 255 68 69/*! 70 * Enumeration of CLK_PROG source types. 71 */ 72#define CTRL_CLK_PROG_1X_SOURCE_PLL 0x00 73#define CTRL_CLK_PROG_1X_SOURCE_ONE_SOURCE 0x01 74#define CTRL_CLK_PROG_1X_SOURCE_FLL 0x02 75#define CTRL_CLK_PROG_1X_SOURCE_INVALID 255 76 77#define CTRL_CLK_CLK_PROG_1X_MASTER_VF_ENTRY_MAX_ENTRIES 4 78#define CTRL_CLK_PROG_1X_MASTER_MAX_SLAVE_ENTRIES 6 79 80#define CTRL_CLK_CLK_VF_POINT_IDX_INVALID 255 81 82#define CTRL_CLK_CLK_VF_POINT_TYPE_FREQ 0x01 83#define CTRL_CLK_CLK_VF_POINT_TYPE_VOLT 0x02 84#define CTRL_CLK_CLK_VF_POINT_TYPE_UNKNOWN 255 85 86struct ctrl_clk_clk_prog_1x_master_source_fll { 87 u32 base_vfsmooth_volt_uv; 88 u32 max_vf_ramprate; 89 u32 max_freq_stepsize_mhz; 90}; 91 92union ctrl_clk_clk_prog_1x_master_source_data { 93 struct ctrl_clk_clk_prog_1x_master_source_fll fll; 94}; 95 96struct ctrl_clk_clk_vf_point_info_freq { 97 u16 freq_mhz; 98}; 99 100struct ctrl_clk_clk_vf_point_info_volt { 101 u32 sourceVoltageuV; 102 u8 vfGainVfeEquIdx; 103 u8 clkDomainIdx; 104}; 105 106struct ctrl_clk_clk_prog_1x_master_vf_entry { 107 u8 vfe_idx; 108 u8 gain_vfe_idx; 109 u8 vf_point_idx_first; 110 u8 vf_point_idx_last; 111}; 112 113struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry { 114 u8 clk_dom_idx; 115 u8 ratio; 116}; 117 118struct ctrl_clk_clk_prog_1x_master_table_slave_entry { 119 u8 clk_dom_idx; 120 u16 freq_mhz; 121}; 122 123struct ctrl_clk_clk_prog_1x_source_pll { 124 u8 pll_idx; 125 u8 freq_step_size_mhz; 126}; 127 128union ctrl_clk_freq_delta_data { 129 s32 delta_khz; 130 s16 delta_percent; 131}; 132struct ctrl_clk_freq_delta { 133 u8 type; 134 union ctrl_clk_freq_delta_data data; 135}; 136 137struct ctrl_clk_clk_delta { 138 struct ctrl_clk_freq_delta freq_delta; 139 int volt_deltauv[CTRL_CLK_CLK_DELTA_MAX_VOLT_RAILS]; 140}; 141 142struct ctrl_clk_vin_v10 { 143 u32 slope; 144 u32 intercept; 145}; 146 147struct ctrl_clk_vin_v20 { 148 s8 offset; 149 s8 gain; 150}; 151 152union ctrl_clk_vin_data_v20 { 153 struct ctrl_clk_vin_v10 cal_v10; 154 struct ctrl_clk_vin_v20 cal_v20; 155}; 156 157struct ctrl_clk_vin_device_info_data_v10 { 158 struct ctrl_clk_vin_v10 vin_cal; 159}; 160 161struct ctrl_clk_vin_device_info_data_v20 { 162 u8 cal_type; 163 union ctrl_clk_vin_data_v20 vin_cal; 164}; 165 166union ctrl_clk_clk_prog_1x_source_data { 167 struct ctrl_clk_clk_prog_1x_source_pll pll; 168}; 169 170struct ctrl_clk_vf_pair { 171 u16 freq_mhz; 172 u32 voltage_uv; 173}; 174 175struct ctrl_clk_clk_domain_list_item { 176 u32 clk_domain; 177 u32 clk_freq_khz; 178 u32 clk_flags; 179 u8 current_regime_id; 180 u8 target_regime_id; 181}; 182 183struct ctrl_clk_clk_domain_list_item_v1 { 184 u32 clk_domain; 185 u32 clk_freq_khz; 186 u8 regime_id; 187 u8 source; 188}; 189 190struct ctrl_clk_clk_domain_list { 191 u8 num_domains; 192 struct ctrl_clk_clk_domain_list_item_v1 193 clk_domains[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; 194}; 195 196#define CTRL_CLK_VF_PAIR_FREQ_MHZ_GET(pvfpair) \ 197 ((pvfpair)->freq_mhz) 198 199#define CTRL_CLK_VF_PAIR_VOLTAGE_UV_GET(pvfpair) \ 200 ((pvfpair)->voltage_uv) 201 202#define CTRL_CLK_VF_PAIR_FREQ_MHZ_SET(pvfpair, _freqmhz) \ 203 (((pvfpair)->freq_mhz) = (_freqmhz)) 204 205#define CTRL_CLK_VF_PAIR_FREQ_MHZ_SET(pvfpair, _freqmhz) \ 206 (((pvfpair)->freq_mhz) = (_freqmhz)) 207 208 209#define CTRL_CLK_VF_PAIR_VOLTAGE_UV_SET(pvfpair, _voltageuv) \ 210 (((pvfpair)->voltage_uv) = (_voltageuv)) 211 212#endif /* NVGPU_CTRLCLK_H */
diff --git a/include/ctrl/ctrlclkavfs.h b/include/ctrl/ctrlclkavfs.h
deleted file mode 100644
index 676ae7e..0000000
--- a/include/ctrl/ctrlclkavfs.h
+++ /dev/null
@@ -1,112 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_CTRLCLKAVFS_H 24#define NVGPU_CTRLCLKAVFS_H 25 26#include "ctrlboardobj.h" 27/*! 28 * Valid global VIN ID values 29 */ 30#define CTRL_CLK_VIN_ID_SYS 0x00000000 31#define CTRL_CLK_VIN_ID_LTC 0x00000001 32#define CTRL_CLK_VIN_ID_XBAR 0x00000002 33#define CTRL_CLK_VIN_ID_GPC0 0x00000003 34#define CTRL_CLK_VIN_ID_GPC1 0x00000004 35#define CTRL_CLK_VIN_ID_GPC2 0x00000005 36#define CTRL_CLK_VIN_ID_GPC3 0x00000006 37#define CTRL_CLK_VIN_ID_GPC4 0x00000007 38#define CTRL_CLK_VIN_ID_GPC5 0x00000008 39#define CTRL_CLK_VIN_ID_GPCS 0x00000009 40#define CTRL_CLK_VIN_ID_SRAM 0x0000000A 41#define CTRL_CLK_VIN_ID_UNDEFINED 0x000000FF 42 43#define CTRL_CLK_VIN_TYPE_DISABLED 0x00000000 44#define CTRL_CLK_VIN_TYPE_V10 0x00000001 45#define CTRL_CLK_VIN_TYPE_V20 0x00000002 46 47/*! 48 * Various types of VIN calibration that the GPU can support 49 */ 50#define CTRL_CLK_VIN_CAL_TYPE_V10 (0x00000000) 51#define CTRL_CLK_VIN_CAL_TYPE_V20 (0x00000001) 52 53/*! 54 * Mask of all GPC VIN IDs supported by RM 55 */ 56#define CTRL_CLK_VIN_MASK_UNICAST_GPC (BIT(CTRL_CLK_VIN_ID_GPC0) | \ 57 BIT(CTRL_CLK_VIN_ID_GPC1) | \ 58 BIT(CTRL_CLK_VIN_ID_GPC2) | \ 59 BIT(CTRL_CLK_VIN_ID_GPC3) | \ 60 BIT(CTRL_CLK_VIN_ID_GPC4) | \ 61 BIT(CTRL_CLK_VIN_ID_GPC5)) 62#define CTRL_CLK_LUT_NUM_ENTRIES_MAX (128) 63#define CTRL_CLK_LUT_NUM_ENTRIES_GV10x (128) 64#define CTRL_CLK_LUT_NUM_ENTRIES_GP10x (100) 65#define CTRL_CLK_VIN_STEP_SIZE_UV (10000) 66#define CTRL_CLK_LUT_MIN_VOLTAGE_UV (450000) 67#define CTRL_CLK_FLL_TYPE_DISABLED 0 68 69#define CTRL_CLK_FLL_ID_SYS (0x00000000) 70#define CTRL_CLK_FLL_ID_LTC (0x00000001) 71#define CTRL_CLK_FLL_ID_XBAR (0x00000002) 72#define CTRL_CLK_FLL_ID_GPC0 (0x00000003) 73#define CTRL_CLK_FLL_ID_GPC1 (0x00000004) 74#define CTRL_CLK_FLL_ID_GPC2 (0x00000005) 75#define CTRL_CLK_FLL_ID_GPC3 (0x00000006) 76#define CTRL_CLK_FLL_ID_GPC4 (0x00000007) 77#define CTRL_CLK_FLL_ID_GPC5 (0x00000008) 78#define CTRL_CLK_FLL_ID_GPCS (0x00000009) 79#define CTRL_CLK_FLL_ID_UNDEFINED (0x000000FF) 80#define CTRL_CLK_FLL_MASK_UNDEFINED (0x00000000) 81 82/*! 83 * Mask of all GPC FLL IDs supported by RM 84 */ 85#define CTRL_CLK_FLL_MASK_UNICAST_GPC (BIT(CTRL_CLK_FLL_ID_GPC0) | \ 86 BIT(CTRL_CLK_FLL_ID_GPC1) | \ 87 BIT(CTRL_CLK_FLL_ID_GPC2) | \ 88 BIT(CTRL_CLK_FLL_ID_GPC3) | \ 89 BIT(CTRL_CLK_FLL_ID_GPC4) | \ 90 BIT(CTRL_CLK_FLL_ID_GPC5)) 91/*! 92 * Mask of all FLL IDs supported by Nvgpu driver 93 */ 94#define CTRL_CLK_FLL_ID_ALL_MASK (BIT(CTRL_CLK_FLL_ID_SYS) | \ 95 BIT(CTRL_CLK_FLL_ID_LTC) | \ 96 BIT(CTRL_CLK_FLL_ID_XBAR) | \ 97 BIT(CTRL_CLK_FLL_ID_GPC0) | \ 98 BIT(CTRL_CLK_FLL_ID_GPC1) | \ 99 BIT(CTRL_CLK_FLL_ID_GPC2) | \ 100 BIT(CTRL_CLK_FLL_ID_GPC3) | \ 101 BIT(CTRL_CLK_FLL_ID_GPC4) | \ 102 BIT(CTRL_CLK_FLL_ID_GPC5) | \ 103 BIT(CTRL_CLK_FLL_ID_GPCS)) 104 105#define CTRL_CLK_FLL_REGIME_ID_INVALID (0x00000000) 106#define CTRL_CLK_FLL_REGIME_ID_FFR (0x00000001) 107#define CTRL_CLK_FLL_REGIME_ID_FR (0x00000002) 108 109#define CTRL_CLK_FLL_LUT_VSELECT_LOGIC (0x00000000) 110#define CTRL_CLK_FLL_LUT_VSELECT_MIN (0x00000001) 111#define CTRL_CLK_FLL_LUT_VSELECT_SRAM (0x00000002) 112#endif /* NVGPU_CTRLCLKAVFS_H */
diff --git a/include/ctrl/ctrlperf.h b/include/ctrl/ctrlperf.h
deleted file mode 100644
index 2928cad..0000000
--- a/include/ctrl/ctrlperf.h
+++ /dev/null
@@ -1,103 +0,0 @@ 1/* 2 * general p state infrastructure 3 * 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_CTRLPERF_H 25#define NVGPU_CTRLPERF_H 26 27struct ctrl_perf_volt_rail_list_item { 28 u8 volt_domain; 29 u32 voltage_uv; 30 u32 voltage_min_noise_unaware_uv; 31}; 32 33struct ctrl_perf_volt_rail_list { 34 u8 num_rails; 35 struct ctrl_perf_volt_rail_list_item 36 rails[CTRL_VOLT_VOLT_RAIL_MAX_RAILS]; 37}; 38 39union ctrl_perf_vfe_var_single_sensed_fuse_value_data { 40 int signed_value; 41 u32 unsigned_value; 42}; 43 44struct ctrl_perf_vfe_var_single_sensed_fuse_value { 45 bool b_signed; 46 union ctrl_perf_vfe_var_single_sensed_fuse_value_data data; 47}; 48 49struct ctrl_bios_vfield_register_segment_super { 50 u8 low_bit; 51 u8 high_bit; 52}; 53 54struct ctrl_bios_vfield_register_segment_reg { 55 struct ctrl_bios_vfield_register_segment_super super; 56 u32 addr; 57}; 58 59struct ctrl_bios_vfield_register_segment_index_reg { 60 struct ctrl_bios_vfield_register_segment_super super; 61 u32 addr; 62 u32 reg_index; 63 u32 index; 64}; 65 66union ctrl_bios_vfield_register_segment_data { 67 struct ctrl_bios_vfield_register_segment_reg reg; 68 struct ctrl_bios_vfield_register_segment_index_reg index_reg; 69}; 70 71struct ctrl_bios_vfield_register_segment { 72 u8 type; 73 union ctrl_bios_vfield_register_segment_data data; 74}; 75 76#define NV_PMU_VFE_VAR_SINGLE_SENSED_FUSE_SEGMENTS_MAX 1 77 78struct ctrl_perf_vfe_var_single_sensed_fuse_info { 79 u8 segment_count; 80 struct ctrl_bios_vfield_register_segment segments[NV_PMU_VFE_VAR_SINGLE_SENSED_FUSE_SEGMENTS_MAX]; 81}; 82 83struct ctrl_perf_vfe_var_single_sensed_fuse_override_info { 84 u32 fuse_val_override; 85 u8 b_fuse_regkey_override; 86}; 87 88struct ctrl_perf_vfe_var_single_sensed_fuse_vfield_info { 89 struct ctrl_perf_vfe_var_single_sensed_fuse_info fuse; 90 u32 fuse_val_default; 91 u32 hw_correction_scale; 92 int hw_correction_offset; 93 u8 v_field_id; 94}; 95 96struct ctrl_perf_vfe_var_single_sensed_fuse_ver_vfield_info { 97 struct ctrl_perf_vfe_var_single_sensed_fuse_info fuse; 98 u8 ver_expected; 99 bool b_ver_check; 100 bool b_use_default_on_ver_check_fail; 101 u8 v_field_id_ver; 102}; 103#endif /* NVGPU_CTRLPERF_H */
diff --git a/include/ctrl/ctrlpmgr.h b/include/ctrl/ctrlpmgr.h
deleted file mode 100644
index 90f6501..0000000
--- a/include/ctrl/ctrlpmgr.h
+++ /dev/null
@@ -1,98 +0,0 @@ 1/* 2 * Control pmgr state infrastructure 3 * 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_CTRLPMGR_H 25#define NVGPU_CTRLPMGR_H 26 27#include "ctrlboardobj.h" 28 29/* valid power domain values */ 30#define CTRL_PMGR_PWR_DEVICES_MAX_DEVICES 32U 31#define CTRL_PMGR_PWR_VIOLATION_MAX 0x06U 32 33#define CTRL_PMGR_PWR_DEVICE_TYPE_INA3221 0x4EU 34 35#define CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID 0xFFU 36#define CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR 0x08U 37 38#define CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X 0x30U 39#define CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD 0x04U 40#define CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD 0x0CU 41 42#define CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS 0x8U 43#define CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES 0x08U 44#define CTRL_PMGR_PWR_POLICY_INDEX_INVALID 0xFFU 45#define CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM 0xFEU 46#define CTRL_PMGR_PWR_POLICY_LIMIT_MAX (0xFFFFFFFFU) 47 48struct ctrl_pmgr_pwr_device_info_rshunt { 49 bool use_fxp8_8; 50 u16 rshunt_value; 51}; 52 53struct ctrl_pmgr_pwr_policy_info_integral { 54 u8 past_sample_count; 55 u8 next_sample_count; 56 u16 ratio_limit_min; 57 u16 ratio_limit_max; 58}; 59 60enum ctrl_pmgr_pwr_policy_filter_type { 61 CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE = 0, 62 CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK, 63 CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE, 64 CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR 65}; 66 67struct ctrl_pmgr_pwr_policy_filter_param_block { 68 u32 block_size; 69}; 70 71struct ctrl_pmgr_pwr_policy_filter_param_moving_average { 72 u32 window_size; 73}; 74 75struct ctrl_pmgr_pwr_policy_filter_param_iir { 76 u32 divisor; 77}; 78 79union ctrl_pmgr_pwr_policy_filter_param { 80 struct ctrl_pmgr_pwr_policy_filter_param_block block; 81 struct ctrl_pmgr_pwr_policy_filter_param_moving_average moving_avg; 82 struct ctrl_pmgr_pwr_policy_filter_param_iir iir; 83}; 84 85struct ctrl_pmgr_pwr_policy_limit_input { 86 u8 pwr_policy_idx; 87 u32 limit_value; 88}; 89 90struct ctrl_pmgr_pwr_policy_limit_arbitration { 91 bool b_arb_max; 92 u8 num_inputs; 93 u32 output; 94 struct ctrl_pmgr_pwr_policy_limit_input 95 inputs[CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS]; 96}; 97 98#endif /* NVGPU_CTRLPMGR_H */
diff --git a/include/ctrl/ctrltherm.h b/include/ctrl/ctrltherm.h
deleted file mode 100644
index 27af7b0..0000000
--- a/include/ctrl/ctrltherm.h
+++ /dev/null
@@ -1,33 +0,0 @@ 1/* 2 * Control thermal infrastructure 3 * 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_CTRLTHERM_H 25#define NVGPU_CTRLTHERM_H 26 27#include "ctrlboardobj.h" 28 29#define CTRL_THERMAL_THERM_DEVICE_CLASS_GPU 0x01 30 31#define CTRL_THERMAL_THERM_CHANNEL_CLASS_DEVICE 0x01 32 33#endif /* NVGPU_CTRLTHERM_H */
diff --git a/include/ctrl/ctrlvolt.h b/include/ctrl/ctrlvolt.h
deleted file mode 100644
index 84994eb..0000000
--- a/include/ctrl/ctrlvolt.h
+++ /dev/null
@@ -1,143 +0,0 @@ 1/* 2 * general p state infrastructure 3 * 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_CTRLVOLT_H 25#define NVGPU_CTRLVOLT_H 26 27#define CTRL_VOLT_VOLT_RAIL_MAX_RAILS \ 28 CTRL_BOARDOBJGRP_E32_MAX_OBJECTS 29 30#include "ctrlperf.h" 31#include "ctrlboardobj.h" 32 33#define CTRL_VOLT_RAIL_VOLT_DELTA_MAX_ENTRIES 0x04U 34#define CTRL_VOLT_VOLT_DEV_VID_VSEL_MAX_ENTRIES (8U) 35#define CTRL_VOLT_DOMAIN_INVALID 0x00U 36#define CTRL_VOLT_DOMAIN_LOGIC 0x01U 37#define CLK_PROG_VFE_ENTRY_LOGIC 0x00U 38#define CLK_PROG_VFE_ENTRY_SRAM 0x01U 39 40/* 41 * Macros for Voltage Domain HAL. 42 */ 43#define CTRL_VOLT_DOMAIN_HAL_GP10X_SINGLE_RAIL 0x00U 44#define CTRL_VOLT_DOMAIN_HAL_GP10X_SPLIT_RAIL 0x01U 45 46/* 47 * Macros for Voltage Domains. 48 */ 49#define CTRL_VOLT_DOMAIN_INVALID 0x00U 50#define CTRL_VOLT_DOMAIN_LOGIC 0x01U 51#define CTRL_VOLT_DOMAIN_SRAM 0x02U 52 53/*! 54 * Special value corresponding to an invalid Voltage Rail Index. 55 */ 56#define CTRL_VOLT_RAIL_INDEX_INVALID \ 57 CTRL_BOARDOBJ_IDX_INVALID 58 59/*! 60 * Special value corresponding to an invalid Voltage Device Index. 61 */ 62#define CTRL_VOLT_DEVICE_INDEX_INVALID \ 63 CTRL_BOARDOBJ_IDX_INVALID 64 65/*! 66 * Special value corresponding to an invalid Voltage Policy Index. 67 */ 68#define CTRL_VOLT_POLICY_INDEX_INVALID \ 69 CTRL_BOARDOBJ_IDX_INVALID 70 71enum nv_pmu_pmgr_pwm_source { 72 NV_PMU_PMGR_PWM_SOURCE_INVALID = 0, 73 NV_PMU_PMGR_PWM_SOURCE_THERM_VID_PWM_0 = 4, 74 NV_PMU_PMGR_PWM_SOURCE_THERM_VID_PWM_1, 75 NV_PMU_PMGR_PWM_SOURCE_RSVD_0 = 7, 76 NV_PMU_PMGR_PWM_SOURCE_RSVD_1 = 8, 77}; 78 79/*! 80 * Macros for Voltage Device Types. 81 */ 82#define CTRL_VOLT_DEVICE_TYPE_INVALID 0x00U 83#define CTRL_VOLT_DEVICE_TYPE_PWM 0x03U 84 85/* 86 * Macros for Volt Device Operation types. 87 */ 88#define CTRL_VOLT_DEVICE_OPERATION_TYPE_INVALID 0x00U 89#define CTRL_VOLT_DEVICE_OPERATION_TYPE_DEFAULT 0x01U 90#define CTRL_VOLT_DEVICE_OPERATION_TYPE_LPWR_STEADY_STATE 0x02U 91#define CTRL_VOLT_DEVICE_OPERATION_TYPE_LPWR_SLEEP_STATE 0x03U 92#define CTRL_VOLT_VOLT_DEVICE_OPERATION_TYPE_IPC_VMIN 0x04U 93 94/*! 95 * Macros for Voltage Domains. 96 */ 97#define CTRL_VOLT_DOMAIN_INVALID 0x00U 98#define CTRL_VOLT_DOMAIN_LOGIC 0x01U 99#define CTRL_VOLT_DOMAIN_SRAM 0x02U 100 101/*! 102 * Macros for Volt Policy types. 103 * 104 * Virtual VOLT_POLICY types are indexed starting from 0xFF. 105 */ 106#define CTRL_VOLT_POLICY_TYPE_INVALID 0x00U 107#define CTRL_VOLT_POLICY_TYPE_SINGLE_RAIL 0x01U 108#define CTRL_VOLT_POLICY_TYPE_SR_MULTI_STEP 0x02U 109#define CTRL_VOLT_POLICY_TYPE_SR_SINGLE_STEP 0x03U 110#define CTRL_VOLT_POLICY_TYPE_SINGLE_RAIL_MULTI_STEP 0x04U 111#define CTRL_VOLT_POLICY_TYPE_SPLIT_RAIL 0xFEU 112#define CTRL_VOLT_POLICY_TYPE_UNKNOWN 0xFFU 113 114/*! 115 * Macros for Volt Policy Client types. 116 */ 117#define CTRL_VOLT_POLICY_CLIENT_INVALID 0x00U 118#define CTRL_VOLT_POLICY_CLIENT_PERF_CORE_VF_SEQ 0x01U 119 120struct ctrl_volt_volt_rail_list_item { 121 u8 rail_idx; 122 u32 voltage_uv; 123}; 124 125struct ctrl_volt_volt_rail_list { 126 u8 num_rails; 127 struct ctrl_volt_volt_rail_list_item 128 rails[CTRL_VOLT_VOLT_RAIL_MAX_RAILS]; 129}; 130 131struct ctrl_volt_volt_rail_list_item_v1 { 132 u8 rail_idx; 133 u32 voltage_uv; 134 u32 voltage_min_noise_unaware_uv; 135}; 136 137struct ctrl_volt_volt_rail_list_v1 { 138 u8 num_rails; 139 struct ctrl_volt_volt_rail_list_item_v1 140 rails[CTRL_VOLT_VOLT_RAIL_MAX_RAILS]; 141}; 142 143#endif /* NVGPU_CTRLVOLT_H */
diff --git a/include/gk20a/ce2_gk20a.c b/include/gk20a/ce2_gk20a.c
deleted file mode 100644
index 2a40b08..0000000
--- a/include/gk20a/ce2_gk20a.c
+++ /dev/null
@@ -1,576 +0,0 @@ 1/* 2 * GK20A Graphics Copy Engine (gr host) 3 * 4 * Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/kmem.h> 26#include <nvgpu/dma.h> 27#include <nvgpu/os_sched.h> 28#include <nvgpu/log.h> 29#include <nvgpu/enabled.h> 30#include <nvgpu/io.h> 31#include <nvgpu/utils.h> 32#include <nvgpu/channel.h> 33#include <nvgpu/power_features/cg.h> 34 35#include "gk20a.h" 36#include "gk20a/fence_gk20a.h" 37 38#include <nvgpu/hw/gk20a/hw_ce2_gk20a.h> 39#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 40#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 41#include <nvgpu/hw/gk20a/hw_ram_gk20a.h> 42#include <nvgpu/hw/gk20a/hw_top_gk20a.h> 43#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 44#include <nvgpu/barrier.h> 45 46/* 47 * Copy engine defines line size in pixels 48 */ 49#define MAX_CE_SHIFT 31 /* 4Gpixels -1 */ 50#define MAX_CE_MASK ((u32) (~(~0U << MAX_CE_SHIFT))) 51#define MAX_CE_ALIGN(a) (a & MAX_CE_MASK) 52 53 54static u32 ce2_nonblockpipe_isr(struct gk20a *g, u32 fifo_intr) 55{ 56 nvgpu_log(g, gpu_dbg_intr, "ce2 non-blocking pipe interrupt\n"); 57 58 return ce2_intr_status_nonblockpipe_pending_f(); 59} 60 61static u32 ce2_blockpipe_isr(struct gk20a *g, u32 fifo_intr) 62{ 63 nvgpu_log(g, gpu_dbg_intr, "ce2 blocking pipe interrupt\n"); 64 65 return ce2_intr_status_blockpipe_pending_f(); 66} 67 68static u32 ce2_launcherr_isr(struct gk20a *g, u32 fifo_intr) 69{ 70 nvgpu_log(g, gpu_dbg_intr, "ce2 launch error interrupt\n"); 71 72 return ce2_intr_status_launcherr_pending_f(); 73} 74 75void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base) 76{ 77 u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); 78 u32 clear_intr = 0; 79 80 nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); 81 82 /* clear blocking interrupts: they exibit broken behavior */ 83 if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) { 84 clear_intr |= ce2_blockpipe_isr(g, ce2_intr); 85 } 86 87 if (ce2_intr & ce2_intr_status_launcherr_pending_f()) { 88 clear_intr |= ce2_launcherr_isr(g, ce2_intr); 89 } 90 91 gk20a_writel(g, ce2_intr_status_r(), clear_intr); 92 return; 93} 94 95u32 gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) 96{ 97 u32 ops = 0; 98 u32 ce2_intr = gk20a_readl(g, ce2_intr_status_r()); 99 100 nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); 101 102 if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { 103 gk20a_writel(g, ce2_intr_status_r(), 104 ce2_nonblockpipe_isr(g, ce2_intr)); 105 ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE | 106 GK20A_NONSTALL_OPS_POST_EVENTS); 107 } 108 return ops; 109} 110 111/* static CE app api */ 112static void gk20a_ce_put_fences(struct gk20a_gpu_ctx *ce_ctx) 113{ 114 u32 i; 115 116 for (i = 0; i < NVGPU_CE_MAX_INFLIGHT_JOBS; i++) { 117 struct gk20a_fence **fence = &ce_ctx->postfences[i]; 118 if (*fence) { 119 gk20a_fence_put(*fence); 120 } 121 *fence = NULL; 122 } 123} 124 125/* assume this api should need to call under nvgpu_mutex_acquire(&ce_app->app_mutex) */ 126static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) 127{ 128 struct nvgpu_list_node *list = &ce_ctx->list; 129 130 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED; 131 132 nvgpu_mutex_acquire(&ce_ctx->gpu_ctx_mutex); 133 134 if (nvgpu_mem_is_valid(&ce_ctx->cmd_buf_mem)) { 135 gk20a_ce_put_fences(ce_ctx); 136 nvgpu_dma_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem); 137 } 138 139 /* 140 * free the channel 141 * gk20a_channel_close() will also unbind the channel from TSG 142 */ 143 gk20a_channel_close(ce_ctx->ch); 144 nvgpu_ref_put(&ce_ctx->tsg->refcount, gk20a_tsg_release); 145 146 /* housekeeping on app */ 147 if (list->prev && list->next) { 148 nvgpu_list_del(list); 149 } 150 151 nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex); 152 nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex); 153 154 nvgpu_kfree(ce_ctx->g, ce_ctx); 155} 156 157static inline unsigned int gk20a_ce_get_method_size(int request_operation, 158 u64 size) 159{ 160 /* failure size */ 161 unsigned int methodsize = UINT_MAX; 162 unsigned int iterations = 0; 163 u32 shift; 164 u64 chunk = size; 165 u32 height, width; 166 167 while (chunk) { 168 iterations++; 169 170 shift = MAX_CE_ALIGN(chunk) ? __ffs(MAX_CE_ALIGN(chunk)) : 171 MAX_CE_SHIFT; 172 width = chunk >> shift; 173 height = 1 << shift; 174 width = MAX_CE_ALIGN(width); 175 176 chunk -= (u64) height * width; 177 } 178 179 if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) { 180 methodsize = (2 + (16 * iterations)) * sizeof(u32); 181 } else if (request_operation & NVGPU_CE_MEMSET) { 182 methodsize = (2 + (15 * iterations)) * sizeof(u32); 183 } 184 185 return methodsize; 186} 187 188int gk20a_ce_prepare_submit(u64 src_buf, 189 u64 dst_buf, 190 u64 size, 191 u32 *cmd_buf_cpu_va, 192 u32 max_cmd_buf_size, 193 unsigned int payload, 194 int launch_flags, 195 int request_operation, 196 u32 dma_copy_class) 197{ 198 u32 launch = 0; 199 u32 methodSize = 0; 200 u64 offset = 0; 201 u64 chunk_size = 0; 202 u64 chunk = size; 203 204 /* failure case handling */ 205 if ((gk20a_ce_get_method_size(request_operation, size) > 206 max_cmd_buf_size) || (!size) || 207 (request_operation > NVGPU_CE_MEMSET)) { 208 return 0; 209 } 210 211 /* set the channel object */ 212 cmd_buf_cpu_va[methodSize++] = 0x20018000; 213 cmd_buf_cpu_va[methodSize++] = dma_copy_class; 214 215 /* 216 * The purpose clear the memory in 2D rectangles. We get the ffs to 217 * determine the number of lines to copy. The only constraint is that 218 * maximum number of pixels per line is 4Gpix - 1, which is awkward for 219 * calculation, so we settle to 2Gpix per line to make calculatione 220 * more agreable 221 */ 222 223 /* The copy engine in 2D mode can have (2^32 - 1) x (2^32 - 1) pixels in 224 * a single submit, we are going to try to clear a range of up to 2Gpix 225 * multiple lines. Because we want to copy byte aligned we will be 226 * setting 1 byte pixels */ 227 228 /* 229 * per iteration 230 * <------------------------- 40 bits ------------------------------> 231 * 1 <------ ffs -------> 232 * <-----------up to 30 bits-----------> 233 */ 234 while (chunk) { 235 u32 width, height, shift; 236 237 /* 238 * We will be aligning to bytes, making the maximum number of 239 * pix per line 2Gb 240 */ 241 242 shift = MAX_CE_ALIGN(chunk) ? __ffs(MAX_CE_ALIGN(chunk)) : 243 MAX_CE_SHIFT; 244 height = chunk >> shift; 245 width = 1 << shift; 246 height = MAX_CE_ALIGN(height); 247 248 chunk_size = (u64) height * width; 249 250 /* reset launch flag */ 251 launch = 0; 252 253 if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) { 254 /* setup the source */ 255 cmd_buf_cpu_va[methodSize++] = 0x20028100; 256 cmd_buf_cpu_va[methodSize++] = (u64_hi32(src_buf + 257 offset) & NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK); 258 cmd_buf_cpu_va[methodSize++] = (u64_lo32(src_buf + 259 offset) & NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK); 260 261 cmd_buf_cpu_va[methodSize++] = 0x20018098; 262 if (launch_flags & NVGPU_CE_SRC_LOCATION_LOCAL_FB) { 263 cmd_buf_cpu_va[methodSize++] = 0x00000000; 264 } else if (launch_flags & 265 NVGPU_CE_SRC_LOCATION_NONCOHERENT_SYSMEM) { 266 cmd_buf_cpu_va[methodSize++] = 0x00000002; 267 } else { 268 cmd_buf_cpu_va[methodSize++] = 0x00000001; 269 } 270 271 launch |= 0x00001000; 272 } else if (request_operation & NVGPU_CE_MEMSET) { 273 /* Remap from component A on 1 byte wide pixels */ 274 cmd_buf_cpu_va[methodSize++] = 0x200181c2; 275 cmd_buf_cpu_va[methodSize++] = 0x00000004; 276 277 cmd_buf_cpu_va[methodSize++] = 0x200181c0; 278 cmd_buf_cpu_va[methodSize++] = payload; 279 280 launch |= 0x00000400; 281 } else { 282 /* Illegal size */ 283 return 0; 284 } 285 286 /* setup the destination/output */ 287 cmd_buf_cpu_va[methodSize++] = 0x20068102; 288 cmd_buf_cpu_va[methodSize++] = (u64_hi32(dst_buf + 289 offset) & NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK); 290 cmd_buf_cpu_va[methodSize++] = (u64_lo32(dst_buf + 291 offset) & NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK); 292 /* Pitch in/out */ 293 cmd_buf_cpu_va[methodSize++] = width; 294 cmd_buf_cpu_va[methodSize++] = width; 295 /* width and line count */ 296 cmd_buf_cpu_va[methodSize++] = width; 297 cmd_buf_cpu_va[methodSize++] = height; 298 299 cmd_buf_cpu_va[methodSize++] = 0x20018099; 300 if (launch_flags & NVGPU_CE_DST_LOCATION_LOCAL_FB) { 301 cmd_buf_cpu_va[methodSize++] = 0x00000000; 302 } else if (launch_flags & 303 NVGPU_CE_DST_LOCATION_NONCOHERENT_SYSMEM) { 304 cmd_buf_cpu_va[methodSize++] = 0x00000002; 305 } else { 306 cmd_buf_cpu_va[methodSize++] = 0x00000001; 307 } 308 309 launch |= 0x00002005; 310 311 if (launch_flags & NVGPU_CE_SRC_MEMORY_LAYOUT_BLOCKLINEAR) { 312 launch |= 0x00000000; 313 } else { 314 launch |= 0x00000080; 315 } 316 317 if (launch_flags & NVGPU_CE_DST_MEMORY_LAYOUT_BLOCKLINEAR) { 318 launch |= 0x00000000; 319 } else { 320 launch |= 0x00000100; 321 } 322 323 cmd_buf_cpu_va[methodSize++] = 0x200180c0; 324 cmd_buf_cpu_va[methodSize++] = launch; 325 offset += chunk_size; 326 chunk -= chunk_size; 327 } 328 329 return methodSize; 330} 331 332/* global CE app related apis */ 333int gk20a_init_ce_support(struct gk20a *g) 334{ 335 struct gk20a_ce_app *ce_app = &g->ce_app; 336 int err; 337 u32 ce_reset_mask; 338 339 ce_reset_mask = gk20a_fifo_get_all_ce_engine_reset_mask(g); 340 341 g->ops.mc.reset(g, ce_reset_mask); 342 343 nvgpu_cg_slcg_ce2_load_enable(g); 344 345 nvgpu_cg_blcg_ce_load_enable(g); 346 347 if (ce_app->initialised) { 348 /* assume this happen during poweron/poweroff GPU sequence */ 349 ce_app->app_state = NVGPU_CE_ACTIVE; 350 return 0; 351 } 352 353 nvgpu_log(g, gpu_dbg_fn, "ce: init"); 354 355 err = nvgpu_mutex_init(&ce_app->app_mutex); 356 if (err) { 357 return err; 358 } 359 360 nvgpu_mutex_acquire(&ce_app->app_mutex); 361 362 nvgpu_init_list_node(&ce_app->allocated_contexts); 363 ce_app->ctx_count = 0; 364 ce_app->next_ctx_id = 0; 365 ce_app->initialised = true; 366 ce_app->app_state = NVGPU_CE_ACTIVE; 367 368 nvgpu_mutex_release(&ce_app->app_mutex); 369 370 if (g->ops.ce2.init_prod_values != NULL) { 371 g->ops.ce2.init_prod_values(g); 372 } 373 374 nvgpu_log(g, gpu_dbg_cde_ctx, "ce: init finished"); 375 376 return 0; 377} 378 379void gk20a_ce_destroy(struct gk20a *g) 380{ 381 struct gk20a_ce_app *ce_app = &g->ce_app; 382 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save; 383 384 if (!ce_app->initialised) { 385 return; 386 } 387 388 ce_app->app_state = NVGPU_CE_SUSPEND; 389 ce_app->initialised = false; 390 391 nvgpu_mutex_acquire(&ce_app->app_mutex); 392 393 nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save, 394 &ce_app->allocated_contexts, gk20a_gpu_ctx, list) { 395 gk20a_ce_delete_gpu_context(ce_ctx); 396 } 397 398 nvgpu_init_list_node(&ce_app->allocated_contexts); 399 ce_app->ctx_count = 0; 400 ce_app->next_ctx_id = 0; 401 402 nvgpu_mutex_release(&ce_app->app_mutex); 403 404 nvgpu_mutex_destroy(&ce_app->app_mutex); 405} 406 407void gk20a_ce_suspend(struct gk20a *g) 408{ 409 struct gk20a_ce_app *ce_app = &g->ce_app; 410 411 if (!ce_app->initialised) { 412 return; 413 } 414 415 ce_app->app_state = NVGPU_CE_SUSPEND; 416 417 return; 418} 419 420/* CE app utility functions */ 421u32 gk20a_ce_create_context(struct gk20a *g, 422 int runlist_id, 423 int timeslice, 424 int runlist_level) 425{ 426 struct gk20a_gpu_ctx *ce_ctx; 427 struct gk20a_ce_app *ce_app = &g->ce_app; 428 struct nvgpu_setup_bind_args setup_bind_args; 429 u32 ctx_id = ~0; 430 int err = 0; 431 432 if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) { 433 return ctx_id; 434 } 435 436 ce_ctx = nvgpu_kzalloc(g, sizeof(*ce_ctx)); 437 if (!ce_ctx) { 438 return ctx_id; 439 } 440 441 err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex); 442 if (err) { 443 nvgpu_kfree(g, ce_ctx); 444 return ctx_id; 445 } 446 447 ce_ctx->g = g; 448 449 ce_ctx->cmd_buf_read_queue_offset = 0; 450 451 ce_ctx->vm = g->mm.ce.vm; 452 453 /* allocate a tsg if needed */ 454 ce_ctx->tsg = gk20a_tsg_open(g, nvgpu_current_pid(g)); 455 if (!ce_ctx->tsg) { 456 nvgpu_err(g, "ce: gk20a tsg not available"); 457 err = -ENOMEM; 458 goto end; 459 } 460 461 /* always kernel client needs privileged channel */ 462 ce_ctx->ch = gk20a_open_new_channel(g, runlist_id, true, 463 nvgpu_current_pid(g), nvgpu_current_tid(g)); 464 if (!ce_ctx->ch) { 465 nvgpu_err(g, "ce: gk20a channel not available"); 466 err = -ENOMEM; 467 goto end; 468 } 469 ce_ctx->ch->timeout.enabled = false; 470 471 /* bind the channel to the vm */ 472 err = g->ops.mm.vm_bind_channel(g->mm.ce.vm, ce_ctx->ch); 473 if (err) { 474 nvgpu_err(g, "ce: could not bind vm"); 475 goto end; 476 } 477 478 err = gk20a_tsg_bind_channel(ce_ctx->tsg, ce_ctx->ch); 479 if (err) { 480 nvgpu_err(g, "ce: unable to bind to tsg"); 481 goto end; 482 } 483 484 setup_bind_args.num_gpfifo_entries = 1024; 485 setup_bind_args.num_inflight_jobs = 0; 486 setup_bind_args.flags = 0; 487 /* allocate gpfifo (1024 should be more than enough) */ 488 err = nvgpu_channel_setup_bind(ce_ctx->ch, &setup_bind_args); 489 if (err) { 490 nvgpu_err(g, "ce: unable to setup and bind channel"); 491 goto end; 492 } 493 494 /* allocate command buffer from sysmem */ 495 err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, 496 NVGPU_CE_MAX_INFLIGHT_JOBS * 497 NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF, 498 &ce_ctx->cmd_buf_mem); 499 if (err) { 500 nvgpu_err(g, 501 "ce: could not allocate command buffer for CE context"); 502 goto end; 503 } 504 505 memset(ce_ctx->cmd_buf_mem.cpu_va, 0x00, ce_ctx->cmd_buf_mem.size); 506 507 /* -1 means default channel timeslice value */ 508 if (timeslice != -1) { 509 err = gk20a_fifo_tsg_set_timeslice(ce_ctx->tsg, timeslice); 510 if (err) { 511 nvgpu_err(g, 512 "ce: could not set the channel timeslice value for CE context"); 513 goto end; 514 } 515 } 516 517 /* -1 means default channel runlist level */ 518 if (runlist_level != -1) { 519 err = gk20a_tsg_set_runlist_interleave(ce_ctx->tsg, 520 runlist_level); 521 if (err) { 522 nvgpu_err(g, 523 "ce: could not set the runlist interleave for CE context"); 524 goto end; 525 } 526 } 527 528 nvgpu_mutex_acquire(&ce_app->app_mutex); 529 ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id; 530 nvgpu_list_add(&ce_ctx->list, &ce_app->allocated_contexts); 531 ++ce_app->next_ctx_id; 532 ++ce_app->ctx_count; 533 nvgpu_mutex_release(&ce_app->app_mutex); 534 535 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; 536 537end: 538 if (ctx_id == (u32)~0) { 539 nvgpu_mutex_acquire(&ce_app->app_mutex); 540 gk20a_ce_delete_gpu_context(ce_ctx); 541 nvgpu_mutex_release(&ce_app->app_mutex); 542 } 543 return ctx_id; 544 545} 546 547void gk20a_ce_delete_context(struct gk20a *g, 548 u32 ce_ctx_id) 549{ 550 gk20a_ce_delete_context_priv(g, ce_ctx_id); 551} 552 553void gk20a_ce_delete_context_priv(struct gk20a *g, 554 u32 ce_ctx_id) 555{ 556 struct gk20a_ce_app *ce_app = &g->ce_app; 557 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save; 558 559 if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) { 560 return; 561 } 562 563 nvgpu_mutex_acquire(&ce_app->app_mutex); 564 565 nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save, 566 &ce_app->allocated_contexts, gk20a_gpu_ctx, list) { 567 if (ce_ctx->ctx_id == ce_ctx_id) { 568 gk20a_ce_delete_gpu_context(ce_ctx); 569 --ce_app->ctx_count; 570 break; 571 } 572 } 573 574 nvgpu_mutex_release(&ce_app->app_mutex); 575 return; 576}
diff --git a/include/gk20a/ce2_gk20a.h b/include/gk20a/ce2_gk20a.h
deleted file mode 100644
index df3a0e8..0000000
--- a/include/gk20a/ce2_gk20a.h
+++ /dev/null
@@ -1,156 +0,0 @@ 1/* 2 * drivers/video/tegra/host/gk20a/fifo_gk20a.h 3 * 4 * GK20A graphics copy engine (gr host) 5 * 6 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26#ifndef NVGPU_GK20A_CE2_GK20A_H 27#define NVGPU_GK20A_CE2_GK20A_H 28 29struct channel_gk20a; 30struct tsg_gk20a; 31 32void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base); 33u32 gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base); 34 35/* CE command utility macros */ 36#define NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK 0xffffffff 37#define NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK 0xff 38 39#define NVGPU_CE_MAX_INFLIGHT_JOBS 32 40#define NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF 256 41 42/* dma launch_flags */ 43enum { 44 /* location */ 45 NVGPU_CE_SRC_LOCATION_COHERENT_SYSMEM = (1 << 0), 46 NVGPU_CE_SRC_LOCATION_NONCOHERENT_SYSMEM = (1 << 1), 47 NVGPU_CE_SRC_LOCATION_LOCAL_FB = (1 << 2), 48 NVGPU_CE_DST_LOCATION_COHERENT_SYSMEM = (1 << 3), 49 NVGPU_CE_DST_LOCATION_NONCOHERENT_SYSMEM = (1 << 4), 50 NVGPU_CE_DST_LOCATION_LOCAL_FB = (1 << 5), 51 52 /* memory layout */ 53 NVGPU_CE_SRC_MEMORY_LAYOUT_PITCH = (1 << 6), 54 NVGPU_CE_SRC_MEMORY_LAYOUT_BLOCKLINEAR = (1 << 7), 55 NVGPU_CE_DST_MEMORY_LAYOUT_PITCH = (1 << 8), 56 NVGPU_CE_DST_MEMORY_LAYOUT_BLOCKLINEAR = (1 << 9), 57 58 /* transfer type */ 59 NVGPU_CE_DATA_TRANSFER_TYPE_PIPELINED = (1 << 10), 60 NVGPU_CE_DATA_TRANSFER_TYPE_NON_PIPELINED = (1 << 11), 61}; 62 63/* CE operation mode */ 64enum { 65 NVGPU_CE_PHYS_MODE_TRANSFER = (1 << 0), 66 NVGPU_CE_MEMSET = (1 << 1), 67}; 68 69/* CE app state machine flags */ 70enum { 71 NVGPU_CE_ACTIVE = (1 << 0), 72 NVGPU_CE_SUSPEND = (1 << 1), 73}; 74 75/* gpu context state machine flags */ 76enum { 77 NVGPU_CE_GPU_CTX_ALLOCATED = (1 << 0), 78 NVGPU_CE_GPU_CTX_DELETED = (1 << 1), 79}; 80 81/* global ce app db */ 82struct gk20a_ce_app { 83 bool initialised; 84 struct nvgpu_mutex app_mutex; 85 int app_state; 86 87 struct nvgpu_list_node allocated_contexts; 88 u32 ctx_count; 89 u32 next_ctx_id; 90}; 91 92/* ce context db */ 93struct gk20a_gpu_ctx { 94 struct gk20a *g; 95 u32 ctx_id; 96 struct nvgpu_mutex gpu_ctx_mutex; 97 int gpu_ctx_state; 98 99 /* tsg related data */ 100 struct tsg_gk20a *tsg; 101 102 /* channel related data */ 103 struct channel_gk20a *ch; 104 struct vm_gk20a *vm; 105 106 /* cmd buf mem_desc */ 107 struct nvgpu_mem cmd_buf_mem; 108 struct gk20a_fence *postfences[NVGPU_CE_MAX_INFLIGHT_JOBS]; 109 110 struct nvgpu_list_node list; 111 112 u32 cmd_buf_read_queue_offset; 113}; 114 115static inline struct gk20a_gpu_ctx * 116gk20a_gpu_ctx_from_list(struct nvgpu_list_node *node) 117{ 118 return (struct gk20a_gpu_ctx *) 119 ((uintptr_t)node - offsetof(struct gk20a_gpu_ctx, list)); 120}; 121 122/* global CE app related apis */ 123int gk20a_init_ce_support(struct gk20a *g); 124void gk20a_ce_suspend(struct gk20a *g); 125void gk20a_ce_destroy(struct gk20a *g); 126 127/* CE app utility functions */ 128u32 gk20a_ce_create_context(struct gk20a *g, 129 int runlist_id, 130 int timeslice, 131 int runlist_level); 132int gk20a_ce_execute_ops(struct gk20a *g, 133 u32 ce_ctx_id, 134 u64 src_buf, 135 u64 dst_buf, 136 u64 size, 137 unsigned int payload, 138 int launch_flags, 139 int request_operation, 140 u32 submit_flags, 141 struct gk20a_fence **gk20a_fence_out); 142void gk20a_ce_delete_context_priv(struct gk20a *g, 143 u32 ce_ctx_id); 144void gk20a_ce_delete_context(struct gk20a *g, 145 u32 ce_ctx_id); 146int gk20a_ce_prepare_submit(u64 src_buf, 147 u64 dst_buf, 148 u64 size, 149 u32 *cmd_buf_cpu_va, 150 u32 max_cmd_buf_size, 151 unsigned int payload, 152 int launch_flags, 153 int request_operation, 154 u32 dma_copy_class); 155 156#endif /*NVGPU_GK20A_CE2_GK20A_H*/
diff --git a/include/gk20a/clk_gk20a.h b/include/gk20a/clk_gk20a.h
deleted file mode 100644
index b8ec942..0000000
--- a/include/gk20a/clk_gk20a.h
+++ /dev/null
@@ -1,134 +0,0 @@ 1/* 2 * Copyright (c) 2011 - 2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef CLK_GK20A_H 23#define CLK_GK20A_H 24 25#include <nvgpu/lock.h> 26 27#if defined(CONFIG_COMMON_CLK) 28#include <linux/clk-provider.h> 29#endif 30 31#define GPUFREQ_TABLE_END ~(u32)1 32enum { 33 /* only one PLL for gk20a */ 34 GK20A_GPC_PLL = 0, 35 /* 2 PLL revisions for gm20b */ 36 GM20B_GPC_PLL_B1, 37 GM20B_GPC_PLL_C1, 38}; 39 40enum gpc_pll_mode { 41 GPC_PLL_MODE_F = 0, /* fixed frequency mode a.k.a legacy mode */ 42 GPC_PLL_MODE_DVFS, /* DVFS mode a.k.a NA mode */ 43}; 44 45struct na_dvfs { 46 u32 n_int; 47 u32 sdm_din; 48 int dfs_coeff; 49 int dfs_det_max; 50 int dfs_ext_cal; 51 int uv_cal; 52 int mv; 53}; 54 55struct pll { 56 u32 id; 57 u32 clk_in; /* KHz */ 58 u32 M; 59 u32 N; 60 u32 PL; 61 u32 freq; /* KHz */ 62 bool enabled; 63 enum gpc_pll_mode mode; 64 struct na_dvfs dvfs; 65}; 66 67struct pll_parms { 68 u32 min_freq, max_freq; /* KHz */ 69 u32 min_vco, max_vco; /* KHz */ 70 u32 min_u, max_u; /* KHz */ 71 u32 min_M, max_M; 72 u32 min_N, max_N; 73 u32 min_PL, max_PL; 74 /* NA mode parameters*/ 75 int coeff_slope, coeff_offs; /* coeff = slope * V + offs */ 76 int uvdet_slope, uvdet_offs; /* uV = slope * det + offs */ 77 u32 vco_ctrl; 78 /* 79 * Timing parameters in us. Lock timeout is applied to locking in fixed 80 * frequency mode and to dynamic ramp in any mode; does not affect lock 81 * latency, since lock/ramp done status bit is polled. NA mode lock and 82 * and IDDQ exit delays set the time of the respective opertaions with 83 * no status polling. 84 */ 85 u32 lock_timeout; 86 u32 na_lock_delay; 87 u32 iddq_exit_delay; 88 /* NA mode DFS control */ 89 u32 dfs_ctrl; 90}; 91 92struct namemap_cfg; 93 94struct clk_gk20a { 95 struct gk20a *g; 96#if defined(CONFIG_COMMON_CLK) 97 struct clk *tegra_clk; 98 struct clk *tegra_clk_parent; 99 struct clk_hw hw; 100#endif 101 struct pll gpc_pll; 102 struct pll gpc_pll_last; 103 struct nvgpu_mutex clk_mutex; 104 struct namemap_cfg *clk_namemap; 105 u32 namemap_num; 106 u32 *namemap_xlat_table; 107 bool sw_ready; 108 bool clk_hw_on; 109 bool debugfs_set; 110 int pll_poweron_uv; 111 unsigned long dvfs_safe_max_freq; 112}; 113 114#if defined(CONFIG_COMMON_CLK) 115#define to_clk_gk20a(_hw) container_of(_hw, struct clk_gk20a, hw) 116#endif 117 118struct gpu_ops; 119 120#define KHZ 1000 121#define MHZ 1000000 122 123static inline unsigned long rate_gpc2clk_to_gpu(unsigned long rate) 124{ 125 /* convert the kHz gpc2clk frequency to Hz gpcpll frequency */ 126 return (rate * KHZ) / 2; 127} 128static inline unsigned long rate_gpu_to_gpc2clk(unsigned long rate) 129{ 130 /* convert the Hz gpcpll frequency to kHz gpc2clk frequency */ 131 return (rate * 2) / KHZ; 132} 133 134#endif /* CLK_GK20A_H */
diff --git a/include/gk20a/css_gr_gk20a.c b/include/gk20a/css_gr_gk20a.c
deleted file mode 100644
index 28a3d49..0000000
--- a/include/gk20a/css_gr_gk20a.c
+++ /dev/null
@@ -1,636 +0,0 @@ 1/* 2 * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). 3 * 4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/bitops.h> 26#include <nvgpu/kmem.h> 27#include <nvgpu/lock.h> 28#include <nvgpu/dma.h> 29#include <nvgpu/mm.h> 30#include <nvgpu/sizes.h> 31#include <nvgpu/barrier.h> 32#include <nvgpu/log.h> 33#include <nvgpu/bug.h> 34#include <nvgpu/io.h> 35#include <nvgpu/utils.h> 36#include <nvgpu/channel.h> 37#include <nvgpu/unit.h> 38 39#include "gk20a.h" 40#include "css_gr_gk20a.h" 41 42#include <nvgpu/hw/gk20a/hw_perf_gk20a.h> 43 44/* check client for pointed perfmon ownership */ 45#define CONTAINS_PERFMON(cl, pm) \ 46 ((cl)->perfmon_start <= (pm) && \ 47 ((pm) - (cl)->perfmon_start) < (cl)->perfmon_count) 48 49/* address of fifo entry by offset */ 50#define CSS_FIFO_ENTRY(fifo, offs) \ 51 ((struct gk20a_cs_snapshot_fifo_entry *)(((char *)(fifo)) + (offs))) 52 53/* calculate area capacity in number of fifo entries */ 54#define CSS_FIFO_ENTRY_CAPACITY(s) \ 55 (((s) - sizeof(struct gk20a_cs_snapshot_fifo)) \ 56 / sizeof(struct gk20a_cs_snapshot_fifo_entry)) 57 58/* reserved to indicate failures with data */ 59#define CSS_FIRST_PERFMON_ID 32 60/* should correlate with size of gk20a_cs_snapshot_fifo_entry::perfmon_id */ 61#define CSS_MAX_PERFMON_IDS 256 62 63/* reports whether the hw queue overflowed */ 64bool css_hw_get_overflow_status(struct gk20a *g) 65{ 66 const u32 st = perf_pmasys_control_membuf_status_overflowed_f(); 67 return st == (gk20a_readl(g, perf_pmasys_control_r()) & st); 68} 69 70/* returns how many pending snapshot entries are pending */ 71u32 css_hw_get_pending_snapshots(struct gk20a *g) 72{ 73 return gk20a_readl(g, perf_pmasys_mem_bytes_r()) / 74 sizeof(struct gk20a_cs_snapshot_fifo_entry); 75} 76 77/* informs hw how many snapshots have been processed (frees up fifo space) */ 78void css_hw_set_handled_snapshots(struct gk20a *g, u32 done) 79{ 80 if (done > 0) { 81 gk20a_writel(g, perf_pmasys_mem_bump_r(), 82 done * sizeof(struct gk20a_cs_snapshot_fifo_entry)); 83 } 84} 85 86/* disable streaming to memory */ 87static void css_hw_reset_streaming(struct gk20a *g) 88{ 89 u32 engine_status; 90 91 /* reset the perfmon */ 92 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_PERFMON)); 93 94 /* RBUFEMPTY must be set -- otherwise we'll pick up */ 95 /* snapshot that have been queued up from earlier */ 96 engine_status = gk20a_readl(g, perf_pmasys_enginestatus_r()); 97 WARN_ON(0 == (engine_status 98 & perf_pmasys_enginestatus_rbufempty_empty_f())); 99 100 /* turn off writes */ 101 gk20a_writel(g, perf_pmasys_control_r(), 102 perf_pmasys_control_membuf_clear_status_doit_f()); 103 104 /* pointing all pending snapshots as handled */ 105 css_hw_set_handled_snapshots(g, css_hw_get_pending_snapshots(g)); 106} 107 108/* 109 * WARNING: all css_gr_XXX functions are local and expected to be called 110 * from locked context (protected by cs_lock) 111 */ 112 113static int css_gr_create_shared_data(struct gr_gk20a *gr) 114{ 115 struct gk20a_cs_snapshot *data; 116 117 if (gr->cs_data) 118 return 0; 119 120 data = nvgpu_kzalloc(gr->g, sizeof(*data)); 121 if (!data) 122 return -ENOMEM; 123 124 nvgpu_init_list_node(&data->clients); 125 gr->cs_data = data; 126 127 return 0; 128} 129 130int css_hw_enable_snapshot(struct channel_gk20a *ch, 131 struct gk20a_cs_snapshot_client *cs_client) 132{ 133 struct gk20a *g = ch->g; 134 struct mm_gk20a *mm = &g->mm; 135 struct gr_gk20a *gr = &g->gr; 136 struct gk20a_cs_snapshot *data = gr->cs_data; 137 u32 snapshot_size = cs_client->snapshot_size; 138 int ret; 139 140 u32 virt_addr_lo; 141 u32 virt_addr_hi; 142 u32 inst_pa_page; 143 144 if (data->hw_snapshot) 145 return 0; 146 147 if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) 148 snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; 149 150 ret = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, snapshot_size, 151 &data->hw_memdesc); 152 if (ret) 153 return ret; 154 155 /* perf output buffer may not cross a 4GB boundary - with a separate */ 156 /* va smaller than that, it won't but check anyway */ 157 if (!data->hw_memdesc.cpu_va || 158 data->hw_memdesc.size < snapshot_size || 159 data->hw_memdesc.gpu_va + u64_lo32(snapshot_size) > SZ_4G) { 160 ret = -EFAULT; 161 goto failed_allocation; 162 } 163 164 data->hw_snapshot = 165 (struct gk20a_cs_snapshot_fifo_entry *)data->hw_memdesc.cpu_va; 166 data->hw_end = data->hw_snapshot + 167 snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry); 168 data->hw_get = data->hw_snapshot; 169 memset(data->hw_snapshot, 0xff, snapshot_size); 170 171 /* address and size are aligned to 32 bytes, the lowest bits read back 172 * as zeros */ 173 virt_addr_lo = u64_lo32(data->hw_memdesc.gpu_va); 174 virt_addr_hi = u64_hi32(data->hw_memdesc.gpu_va); 175 176 css_hw_reset_streaming(g); 177 178 gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo); 179 gk20a_writel(g, perf_pmasys_outbaseupper_r(), 180 perf_pmasys_outbaseupper_ptr_f(virt_addr_hi)); 181 gk20a_writel(g, perf_pmasys_outsize_r(), snapshot_size); 182 183 /* this field is aligned to 4K */ 184 inst_pa_page = nvgpu_inst_block_addr(g, &g->mm.hwpm.inst_block) >> 12; 185 186 /* A write to MEM_BLOCK triggers the block bind operation. MEM_BLOCK 187 * should be written last */ 188 gk20a_writel(g, perf_pmasys_mem_block_r(), 189 perf_pmasys_mem_block_base_f(inst_pa_page) | 190 nvgpu_aperture_mask(g, &mm->hwpm.inst_block, 191 perf_pmasys_mem_block_target_sys_ncoh_f(), 192 perf_pmasys_mem_block_target_sys_coh_f(), 193 perf_pmasys_mem_block_target_lfb_f()) | 194 perf_pmasys_mem_block_valid_true_f()); 195 196 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots enabled\n"); 197 198 return 0; 199 200failed_allocation: 201 if (data->hw_memdesc.size) { 202 nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc); 203 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 204 } 205 data->hw_snapshot = NULL; 206 207 return ret; 208} 209 210void css_hw_disable_snapshot(struct gr_gk20a *gr) 211{ 212 struct gk20a *g = gr->g; 213 struct gk20a_cs_snapshot *data = gr->cs_data; 214 215 if (!data->hw_snapshot) 216 return; 217 218 css_hw_reset_streaming(g); 219 220 gk20a_writel(g, perf_pmasys_outbase_r(), 0); 221 gk20a_writel(g, perf_pmasys_outbaseupper_r(), 222 perf_pmasys_outbaseupper_ptr_f(0)); 223 gk20a_writel(g, perf_pmasys_outsize_r(), 0); 224 225 gk20a_writel(g, perf_pmasys_mem_block_r(), 226 perf_pmasys_mem_block_base_f(0) | 227 perf_pmasys_mem_block_valid_false_f() | 228 perf_pmasys_mem_block_target_f(0)); 229 230 nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc); 231 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); 232 data->hw_snapshot = NULL; 233 234 nvgpu_log_info(g, "cyclestats: buffer for hardware snapshots disabled\n"); 235} 236 237static void css_gr_free_shared_data(struct gr_gk20a *gr) 238{ 239 struct gk20a *g = gr->g; 240 241 if (gr->cs_data) { 242 /* the clients list is expected to be empty */ 243 g->ops.css.disable_snapshot(gr); 244 245 /* release the objects */ 246 nvgpu_kfree(gr->g, gr->cs_data); 247 gr->cs_data = NULL; 248 } 249} 250 251 252struct gk20a_cs_snapshot_client* 253css_gr_search_client(struct nvgpu_list_node *clients, u32 perfmon) 254{ 255 struct gk20a_cs_snapshot_client *client; 256 257 nvgpu_list_for_each_entry(client, clients, 258 gk20a_cs_snapshot_client, list) { 259 if (CONTAINS_PERFMON(client, perfmon)) 260 return client; 261 } 262 263 return NULL; 264} 265 266static int css_gr_flush_snapshots(struct channel_gk20a *ch) 267{ 268 struct gk20a *g = ch->g; 269 struct gr_gk20a *gr = &g->gr; 270 struct gk20a_cs_snapshot *css = gr->cs_data; 271 struct gk20a_cs_snapshot_client *cur; 272 u32 pending, completed; 273 bool hw_overflow; 274 int err; 275 276 /* variables for iterating over HW entries */ 277 u32 sid; 278 struct gk20a_cs_snapshot_fifo_entry *src; 279 280 /* due to data sharing with userspace we allowed update only */ 281 /* overflows and put field in the fifo header */ 282 struct gk20a_cs_snapshot_fifo *dst; 283 struct gk20a_cs_snapshot_fifo_entry *dst_get; 284 struct gk20a_cs_snapshot_fifo_entry *dst_put; 285 struct gk20a_cs_snapshot_fifo_entry *dst_nxt; 286 struct gk20a_cs_snapshot_fifo_entry *dst_head; 287 struct gk20a_cs_snapshot_fifo_entry *dst_tail; 288 289 if (!css) 290 return -EINVAL; 291 292 if (nvgpu_list_empty(&css->clients)) 293 return -EBADF; 294 295 /* check data available */ 296 err = g->ops.css.check_data_available(ch, &pending, &hw_overflow); 297 if (err) 298 return err; 299 300 if (!pending) 301 return 0; 302 303 if (hw_overflow) { 304 nvgpu_list_for_each_entry(cur, &css->clients, 305 gk20a_cs_snapshot_client, list) { 306 cur->snapshot->hw_overflow_events_occured++; 307 } 308 309 nvgpu_warn(g, "cyclestats: hardware overflow detected"); 310 } 311 312 /* process all items in HW buffer */ 313 sid = 0; 314 completed = 0; 315 cur = NULL; 316 dst = NULL; 317 dst_put = NULL; 318 src = css->hw_get; 319 320 /* proceed all completed records */ 321 while (sid < pending && 0 == src->zero0) { 322 /* we may have a new perfmon_id which required to */ 323 /* switch to a new client -> let's forget current */ 324 if (cur && !CONTAINS_PERFMON(cur, src->perfmon_id)) { 325 dst->put = (char *)dst_put - (char *)dst; 326 dst = NULL; 327 cur = NULL; 328 } 329 330 /* now we have to select a new current client */ 331 /* the client selection rate depends from experiment */ 332 /* activity but on Android usually happened 1-2 times */ 333 if (!cur) { 334 cur = css_gr_search_client(&css->clients, 335 src->perfmon_id); 336 if (cur) { 337 /* found - setup all required data */ 338 dst = cur->snapshot; 339 dst_get = CSS_FIFO_ENTRY(dst, dst->get); 340 dst_put = CSS_FIFO_ENTRY(dst, dst->put); 341 dst_head = CSS_FIFO_ENTRY(dst, dst->start); 342 dst_tail = CSS_FIFO_ENTRY(dst, dst->end); 343 344 dst_nxt = dst_put + 1; 345 if (dst_nxt == dst_tail) 346 dst_nxt = dst_head; 347 } else { 348 /* client not found - skipping this entry */ 349 nvgpu_warn(g, "cyclestats: orphaned perfmon %u", 350 src->perfmon_id); 351 goto next_hw_fifo_entry; 352 } 353 } 354 355 /* check for software overflows */ 356 if (dst_nxt == dst_get) { 357 /* no data copy, no pointer updates */ 358 dst->sw_overflow_events_occured++; 359 nvgpu_warn(g, "cyclestats: perfmon %u soft overflow", 360 src->perfmon_id); 361 } else { 362 *dst_put = *src; 363 completed++; 364 365 dst_put = dst_nxt++; 366 367 if (dst_nxt == dst_tail) 368 dst_nxt = dst_head; 369 } 370 371next_hw_fifo_entry: 372 sid++; 373 if (++src >= css->hw_end) 374 src = css->hw_snapshot; 375 } 376 377 /* update client put pointer if necessary */ 378 if (cur && dst) 379 dst->put = (char *)dst_put - (char *)dst; 380 381 /* re-set HW buffer after processing taking wrapping into account */ 382 if (css->hw_get < src) { 383 memset(css->hw_get, 0xff, (src - css->hw_get) * sizeof(*src)); 384 } else { 385 memset(css->hw_snapshot, 0xff, 386 (src - css->hw_snapshot) * sizeof(*src)); 387 memset(css->hw_get, 0xff, 388 (css->hw_end - css->hw_get) * sizeof(*src)); 389 } 390 gr->cs_data->hw_get = src; 391 392 if (g->ops.css.set_handled_snapshots) 393 g->ops.css.set_handled_snapshots(g, sid); 394 395 if (completed != sid) { 396 /* not all entries proceed correctly. some of problems */ 397 /* reported as overflows, some as orphaned perfmons, */ 398 /* but it will be better notify with summary about it */ 399 nvgpu_warn(g, "cyclestats: completed %u from %u entries", 400 completed, pending); 401 } 402 403 return 0; 404} 405 406u32 css_gr_allocate_perfmon_ids(struct gk20a_cs_snapshot *data, 407 u32 count) 408{ 409 unsigned long *pids = data->perfmon_ids; 410 unsigned int f; 411 412 f = bitmap_find_next_zero_area(pids, CSS_MAX_PERFMON_IDS, 413 CSS_FIRST_PERFMON_ID, count, 0); 414 if (f > CSS_MAX_PERFMON_IDS) 415 f = 0; 416 else 417 bitmap_set(pids, f, count); 418 419 return f; 420} 421 422u32 css_gr_release_perfmon_ids(struct gk20a_cs_snapshot *data, 423 u32 start, 424 u32 count) 425{ 426 unsigned long *pids = data->perfmon_ids; 427 u32 end = start + count; 428 u32 cnt = 0; 429 430 if (start >= CSS_FIRST_PERFMON_ID && end <= CSS_MAX_PERFMON_IDS) { 431 bitmap_clear(pids, start, count); 432 cnt = count; 433 } 434 435 return cnt; 436} 437 438 439static int css_gr_free_client_data(struct gk20a *g, 440 struct gk20a_cs_snapshot *data, 441 struct gk20a_cs_snapshot_client *client) 442{ 443 int ret = 0; 444 445 if (client->list.next && client->list.prev) 446 nvgpu_list_del(&client->list); 447 448 if (client->perfmon_start && client->perfmon_count 449 && g->ops.css.release_perfmon_ids) { 450 if (client->perfmon_count != g->ops.css.release_perfmon_ids(data, 451 client->perfmon_start, client->perfmon_count)) 452 ret = -EINVAL; 453 } 454 455 return ret; 456} 457 458static int css_gr_create_client_data(struct gk20a *g, 459 struct gk20a_cs_snapshot *data, 460 u32 perfmon_count, 461 struct gk20a_cs_snapshot_client *cur) 462{ 463 /* 464 * Special handling in-case of rm-server 465 * 466 * client snapshot buffer will not be mapped 467 * in-case of rm-server its only mapped in 468 * guest side 469 */ 470 if (cur->snapshot) { 471 memset(cur->snapshot, 0, sizeof(*cur->snapshot)); 472 cur->snapshot->start = sizeof(*cur->snapshot); 473 /* we should be ensure that can fit all fifo entries here */ 474 cur->snapshot->end = 475 CSS_FIFO_ENTRY_CAPACITY(cur->snapshot_size) 476 * sizeof(struct gk20a_cs_snapshot_fifo_entry) 477 + sizeof(struct gk20a_cs_snapshot_fifo); 478 cur->snapshot->get = cur->snapshot->start; 479 cur->snapshot->put = cur->snapshot->start; 480 } 481 482 cur->perfmon_count = perfmon_count; 483 484 /* In virtual case, perfmon ID allocation is handled by the server 485 * at the time of the attach (allocate_perfmon_ids is NULL in this case) 486 */ 487 if (cur->perfmon_count && g->ops.css.allocate_perfmon_ids) { 488 cur->perfmon_start = g->ops.css.allocate_perfmon_ids(data, 489 cur->perfmon_count); 490 if (!cur->perfmon_start) 491 return -ENOENT; 492 } 493 494 nvgpu_list_add_tail(&cur->list, &data->clients); 495 496 return 0; 497} 498 499 500int gr_gk20a_css_attach(struct channel_gk20a *ch, 501 u32 perfmon_count, 502 u32 *perfmon_start, 503 struct gk20a_cs_snapshot_client *cs_client) 504{ 505 int ret = 0; 506 struct gk20a *g = ch->g; 507 struct gr_gk20a *gr; 508 509 /* we must have a placeholder to store pointer to client structure */ 510 if (!cs_client) 511 return -EINVAL; 512 513 if (!perfmon_count || 514 perfmon_count > CSS_MAX_PERFMON_IDS - CSS_FIRST_PERFMON_ID) 515 return -EINVAL; 516 517 nvgpu_speculation_barrier(); 518 519 gr = &g->gr; 520 521 nvgpu_mutex_acquire(&gr->cs_lock); 522 523 ret = css_gr_create_shared_data(gr); 524 if (ret) 525 goto failed; 526 527 ret = css_gr_create_client_data(g, gr->cs_data, 528 perfmon_count, 529 cs_client); 530 if (ret) 531 goto failed; 532 533 ret = g->ops.css.enable_snapshot(ch, cs_client); 534 if (ret) 535 goto failed; 536 537 if (perfmon_start) 538 *perfmon_start = cs_client->perfmon_start; 539 540 nvgpu_mutex_release(&gr->cs_lock); 541 542 return 0; 543 544failed: 545 if (gr->cs_data) { 546 if (cs_client) { 547 css_gr_free_client_data(g, gr->cs_data, cs_client); 548 cs_client = NULL; 549 } 550 551 if (nvgpu_list_empty(&gr->cs_data->clients)) 552 css_gr_free_shared_data(gr); 553 } 554 nvgpu_mutex_release(&gr->cs_lock); 555 556 if (perfmon_start) 557 *perfmon_start = 0; 558 559 return ret; 560} 561 562int gr_gk20a_css_detach(struct channel_gk20a *ch, 563 struct gk20a_cs_snapshot_client *cs_client) 564{ 565 int ret = 0; 566 struct gk20a *g = ch->g; 567 struct gr_gk20a *gr; 568 569 if (!cs_client) 570 return -EINVAL; 571 572 gr = &g->gr; 573 nvgpu_mutex_acquire(&gr->cs_lock); 574 if (gr->cs_data) { 575 struct gk20a_cs_snapshot *data = gr->cs_data; 576 577 if (g->ops.css.detach_snapshot) 578 g->ops.css.detach_snapshot(ch, cs_client); 579 580 ret = css_gr_free_client_data(g, data, cs_client); 581 if (nvgpu_list_empty(&data->clients)) 582 css_gr_free_shared_data(gr); 583 } else { 584 ret = -EBADF; 585 } 586 nvgpu_mutex_release(&gr->cs_lock); 587 588 return ret; 589} 590 591int gr_gk20a_css_flush(struct channel_gk20a *ch, 592 struct gk20a_cs_snapshot_client *cs_client) 593{ 594 int ret = 0; 595 struct gk20a *g = ch->g; 596 struct gr_gk20a *gr; 597 598 if (!cs_client) 599 return -EINVAL; 600 601 gr = &g->gr; 602 nvgpu_mutex_acquire(&gr->cs_lock); 603 ret = css_gr_flush_snapshots(ch); 604 nvgpu_mutex_release(&gr->cs_lock); 605 606 return ret; 607} 608 609/* helper function with locking to cleanup snapshot code code in gr_gk20a.c */ 610void gr_gk20a_free_cyclestats_snapshot_data(struct gk20a *g) 611{ 612 struct gr_gk20a *gr = &g->gr; 613 614 nvgpu_mutex_acquire(&gr->cs_lock); 615 css_gr_free_shared_data(gr); 616 nvgpu_mutex_release(&gr->cs_lock); 617 nvgpu_mutex_destroy(&gr->cs_lock); 618} 619 620int css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, 621 bool *hw_overflow) 622{ 623 struct gk20a *g = ch->g; 624 struct gr_gk20a *gr = &g->gr; 625 struct gk20a_cs_snapshot *css = gr->cs_data; 626 627 if (!css->hw_snapshot) 628 return -EINVAL; 629 630 *pending = css_hw_get_pending_snapshots(g); 631 if (!*pending) 632 return 0; 633 634 *hw_overflow = css_hw_get_overflow_status(g); 635 return 0; 636}
diff --git a/include/gk20a/css_gr_gk20a.h b/include/gk20a/css_gr_gk20a.h
deleted file mode 100644
index bf8890b..0000000
--- a/include/gk20a/css_gr_gk20a.h
+++ /dev/null
@@ -1,151 +0,0 @@ 1/* 2 * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). 3 * 4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#ifndef CSS_GR_GK20A_H 26#define CSS_GR_GK20A_H 27 28#include <nvgpu/nvgpu_mem.h> 29#include <nvgpu/list.h> 30 31/* the minimal size of HW buffer - should be enough to avoid HW overflows */ 32#define CSS_MIN_HW_SNAPSHOT_SIZE (8 * 1024 * 1024) 33 34struct gk20a; 35struct gr_gk20a; 36struct channel_gk20a; 37 38/* cycle stats fifo header (must match NvSnapshotBufferFifo) */ 39struct gk20a_cs_snapshot_fifo { 40 /* layout description of the buffer */ 41 u32 start; 42 u32 end; 43 44 /* snafu bits */ 45 u32 hw_overflow_events_occured; 46 u32 sw_overflow_events_occured; 47 48 /* the kernel copies new entries to put and 49 * increment the put++. if put == get then 50 * overflowEventsOccured++ 51 */ 52 u32 put; 53 u32 _reserved10; 54 u32 _reserved11; 55 u32 _reserved12; 56 57 /* the driver/client reads from get until 58 * put==get, get++ */ 59 u32 get; 60 u32 _reserved20; 61 u32 _reserved21; 62 u32 _reserved22; 63 64 /* unused */ 65 u32 _reserved30; 66 u32 _reserved31; 67 u32 _reserved32; 68 u32 _reserved33; 69}; 70 71/* cycle stats fifo entry (must match NvSnapshotBufferFifoEntry) */ 72struct gk20a_cs_snapshot_fifo_entry { 73 /* global 48 timestamp */ 74 u32 timestamp31_00:32; 75 u32 timestamp39_32:8; 76 77 /* id of perfmon, should correlate with CSS_MAX_PERFMON_IDS */ 78 u32 perfmon_id:8; 79 80 /* typically samples_counter is wired to #pmtrigger count */ 81 u32 samples_counter:12; 82 83 /* DS=Delay Sample, SZ=Size (0=32B, 1=16B) */ 84 u32 ds:1; 85 u32 sz:1; 86 u32 zero0:1; 87 u32 zero1:1; 88 89 /* counter results */ 90 u32 event_cnt:32; 91 u32 trigger0_cnt:32; 92 u32 trigger1_cnt:32; 93 u32 sample_cnt:32; 94 95 /* Local PmTrigger results for Maxwell+ or padding otherwise */ 96 u16 local_trigger_b_count:16; 97 u16 book_mark_b:16; 98 u16 local_trigger_a_count:16; 99 u16 book_mark_a:16; 100}; 101 102/* cycle stats snapshot client data (e.g. associated with channel) */ 103struct gk20a_cs_snapshot_client { 104 struct nvgpu_list_node list; 105 struct gk20a_cs_snapshot_fifo *snapshot; 106 u32 snapshot_size; 107 u32 perfmon_start; 108 u32 perfmon_count; 109}; 110 111static inline struct gk20a_cs_snapshot_client * 112gk20a_cs_snapshot_client_from_list(struct nvgpu_list_node *node) 113{ 114 return (struct gk20a_cs_snapshot_client *) 115 ((uintptr_t)node - offsetof(struct gk20a_cs_snapshot_client, list)); 116}; 117 118/* should correlate with size of gk20a_cs_snapshot_fifo_entry::perfmon_id */ 119#define CSS_MAX_PERFMON_IDS 256 120 121/* local definitions to avoid hardcodes sizes and shifts */ 122#define PM_BITMAP_SIZE DIV_ROUND_UP(CSS_MAX_PERFMON_IDS, BITS_PER_LONG) 123 124/* cycle stats snapshot control structure for one HW entry and many clients */ 125struct gk20a_cs_snapshot { 126 unsigned long perfmon_ids[PM_BITMAP_SIZE]; 127 struct nvgpu_list_node clients; 128 struct nvgpu_mem hw_memdesc; 129 /* pointer to allocated cpu_va memory where GPU place data */ 130 struct gk20a_cs_snapshot_fifo_entry *hw_snapshot; 131 struct gk20a_cs_snapshot_fifo_entry *hw_end; 132 struct gk20a_cs_snapshot_fifo_entry *hw_get; 133}; 134 135bool css_hw_get_overflow_status(struct gk20a *g); 136u32 css_hw_get_pending_snapshots(struct gk20a *g); 137void css_hw_set_handled_snapshots(struct gk20a *g, u32 done); 138int css_hw_enable_snapshot(struct channel_gk20a *ch, 139 struct gk20a_cs_snapshot_client *cs_client); 140void css_hw_disable_snapshot(struct gr_gk20a *gr); 141u32 css_gr_allocate_perfmon_ids(struct gk20a_cs_snapshot *data, 142 u32 count); 143u32 css_gr_release_perfmon_ids(struct gk20a_cs_snapshot *data, 144 u32 start, 145 u32 count); 146int css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, 147 bool *hw_overflow); 148struct gk20a_cs_snapshot_client* 149css_gr_search_client(struct nvgpu_list_node *clients, u32 perfmon); 150 151#endif /* CSS_GR_GK20A_H */
diff --git a/include/gk20a/dbg_gpu_gk20a.c b/include/gk20a/dbg_gpu_gk20a.c
deleted file mode 100644
index 1686d01..0000000
--- a/include/gk20a/dbg_gpu_gk20a.c
+++ /dev/null
@@ -1,388 +0,0 @@ 1/* 2 * Tegra GK20A GPU Debugger/Profiler Driver 3 * 4 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/kmem.h> 26#include <nvgpu/log.h> 27#include <nvgpu/vm.h> 28#include <nvgpu/atomic.h> 29#include <nvgpu/mm.h> 30#include <nvgpu/bug.h> 31#include <nvgpu/io.h> 32#include <nvgpu/utils.h> 33#include <nvgpu/channel.h> 34#include <nvgpu/unit.h> 35#include <nvgpu/power_features/power_features.h> 36 37#include "gk20a.h" 38#include "gr_gk20a.h" 39#include "dbg_gpu_gk20a.h" 40#include "regops_gk20a.h" 41 42#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 43#include <nvgpu/hw/gk20a/hw_perf_gk20a.h> 44 45static void gk20a_perfbuf_reset_streaming(struct gk20a *g) 46{ 47 u32 engine_status; 48 u32 num_unread_bytes; 49 50 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_PERFMON)); 51 52 engine_status = gk20a_readl(g, perf_pmasys_enginestatus_r()); 53 WARN_ON(0u == 54 (engine_status & perf_pmasys_enginestatus_rbufempty_empty_f())); 55 56 gk20a_writel(g, perf_pmasys_control_r(), 57 perf_pmasys_control_membuf_clear_status_doit_f()); 58 59 num_unread_bytes = gk20a_readl(g, perf_pmasys_mem_bytes_r()); 60 if (num_unread_bytes != 0u) { 61 gk20a_writel(g, perf_pmasys_mem_bump_r(), num_unread_bytes); 62 } 63} 64 65/* 66 * API to get first channel from the list of all channels 67 * bound to the debug session 68 */ 69struct channel_gk20a * 70nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s) 71{ 72 struct dbg_session_channel_data *ch_data; 73 struct channel_gk20a *ch; 74 struct gk20a *g = dbg_s->g; 75 76 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 77 if (nvgpu_list_empty(&dbg_s->ch_list)) { 78 nvgpu_mutex_release(&dbg_s->ch_list_lock); 79 return NULL; 80 } 81 82 ch_data = nvgpu_list_first_entry(&dbg_s->ch_list, 83 dbg_session_channel_data, 84 ch_entry); 85 ch = g->fifo.channel + ch_data->chid; 86 87 nvgpu_mutex_release(&dbg_s->ch_list_lock); 88 89 return ch; 90} 91 92void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) 93{ 94 struct dbg_session_data *session_data; 95 struct dbg_session_gk20a *dbg_s; 96 struct gk20a *g = ch->g; 97 98 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); 99 100 /* guard against the session list being modified */ 101 nvgpu_mutex_acquire(&ch->dbg_s_lock); 102 103 nvgpu_list_for_each_entry(session_data, &ch->dbg_s_list, 104 dbg_session_data, dbg_s_entry) { 105 dbg_s = session_data->dbg_s; 106 if (dbg_s->dbg_events.events_enabled) { 107 nvgpu_log(g, gpu_dbg_gpu_dbg, "posting event on session id %d", 108 dbg_s->id); 109 nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending", 110 dbg_s->dbg_events.num_pending_events); 111 112 dbg_s->dbg_events.num_pending_events++; 113 114 nvgpu_dbg_session_post_event(dbg_s); 115 } 116 } 117 118 nvgpu_mutex_release(&ch->dbg_s_lock); 119} 120 121bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) 122{ 123 struct dbg_session_data *session_data; 124 struct dbg_session_gk20a *dbg_s; 125 bool broadcast = false; 126 struct gk20a *g = ch->g; 127 128 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); 129 130 /* guard against the session list being modified */ 131 nvgpu_mutex_acquire(&ch->dbg_s_lock); 132 133 nvgpu_list_for_each_entry(session_data, &ch->dbg_s_list, 134 dbg_session_data, dbg_s_entry) { 135 dbg_s = session_data->dbg_s; 136 if (dbg_s->broadcast_stop_trigger) { 137 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, 138 "stop trigger broadcast enabled"); 139 broadcast = true; 140 break; 141 } 142 } 143 144 nvgpu_mutex_release(&ch->dbg_s_lock); 145 146 return broadcast; 147} 148 149int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) 150{ 151 struct dbg_session_data *session_data; 152 struct dbg_session_gk20a *dbg_s; 153 struct gk20a *g = ch->g; 154 155 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); 156 157 /* guard against the session list being modified */ 158 nvgpu_mutex_acquire(&ch->dbg_s_lock); 159 160 nvgpu_list_for_each_entry(session_data, &ch->dbg_s_list, 161 dbg_session_data, dbg_s_entry) { 162 dbg_s = session_data->dbg_s; 163 if (dbg_s->broadcast_stop_trigger) { 164 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, 165 "stop trigger broadcast disabled"); 166 dbg_s->broadcast_stop_trigger = false; 167 } 168 } 169 170 nvgpu_mutex_release(&ch->dbg_s_lock); 171 172 return 0; 173} 174 175u32 nvgpu_set_powergate_locked(struct dbg_session_gk20a *dbg_s, 176 bool mode) 177{ 178 u32 err = 0U; 179 struct gk20a *g = dbg_s->g; 180 181 if (dbg_s->is_pg_disabled != mode) { 182 if (mode == false) { 183 g->dbg_powergating_disabled_refcount--; 184 } 185 186 /* 187 * Allow powergate disable or enable only if 188 * the global pg disabled refcount is zero 189 */ 190 if (g->dbg_powergating_disabled_refcount == 0) { 191 err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, 192 mode); 193 } 194 195 if (mode) { 196 g->dbg_powergating_disabled_refcount++; 197 } 198 199 dbg_s->is_pg_disabled = mode; 200 } 201 202 return err; 203} 204 205int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powergate) 206{ 207 int err = 0; 208 struct gk20a *g = dbg_s->g; 209 210 /* This function must be called with g->dbg_sessions_lock held */ 211 212 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %s", 213 g->name, disable_powergate ? "disable" : "enable"); 214 215 /* 216 * Powergate mode here refers to railgate+powergate+clockgate 217 * so in case slcg/blcg/elcg are disabled and railgating is enabled, 218 * disable railgating and then set is_pg_disabled = true 219 * Similarly re-enable railgating and not other features if they are not 220 * enabled when powermode=MODE_ENABLE 221 */ 222 if (disable_powergate) { 223 /* save off current powergate, clk state. 224 * set gpu module's can_powergate = 0. 225 * set gpu module's clk to max. 226 * while *a* debug session is active there will be no power or 227 * clocking state changes allowed from mainline code (but they 228 * should be saved). 229 */ 230 231 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, 232 "module busy"); 233 err = gk20a_busy(g); 234 if (err) { 235 return err; 236 } 237 238 err = nvgpu_cg_pg_disable(g); 239 240 if (err == 0) { 241 dbg_s->is_pg_disabled = true; 242 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, 243 "pg disabled"); 244 } 245 } else { 246 /* restore (can) powergate, clk state */ 247 /* release pending exceptions to fault/be handled as usual */ 248 /*TBD: ordering of these? */ 249 250 err = nvgpu_cg_pg_enable(g); 251 252 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); 253 gk20a_idle(g); 254 255 if (err == 0) { 256 dbg_s->is_pg_disabled = false; 257 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, 258 "pg enabled"); 259 } 260 } 261 262 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %s done", 263 g->name, disable_powergate ? "disable" : "enable"); 264 return err; 265} 266 267bool nvgpu_check_and_set_global_reservation( 268 struct dbg_session_gk20a *dbg_s, 269 struct dbg_profiler_object_data *prof_obj) 270{ 271 struct gk20a *g = dbg_s->g; 272 273 if (g->profiler_reservation_count == 0) { 274 g->global_profiler_reservation_held = true; 275 g->profiler_reservation_count = 1; 276 dbg_s->has_profiler_reservation = true; 277 prof_obj->has_reservation = true; 278 return true; 279 } 280 return false; 281} 282 283bool nvgpu_check_and_set_context_reservation( 284 struct dbg_session_gk20a *dbg_s, 285 struct dbg_profiler_object_data *prof_obj) 286{ 287 struct gk20a *g = dbg_s->g; 288 289 /* Assumes that we've already checked that no global reservation 290 * is in effect. 291 */ 292 g->profiler_reservation_count++; 293 dbg_s->has_profiler_reservation = true; 294 prof_obj->has_reservation = true; 295 return true; 296} 297 298void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s, 299 struct dbg_profiler_object_data *prof_obj) 300{ 301 struct gk20a *g = dbg_s->g; 302 303 g->profiler_reservation_count--; 304 if (g->profiler_reservation_count < 0) { 305 nvgpu_err(g, "Negative reservation count!"); 306 } 307 dbg_s->has_profiler_reservation = false; 308 prof_obj->has_reservation = false; 309 if (prof_obj->ch == NULL) { 310 g->global_profiler_reservation_held = false; 311 } 312} 313 314int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) 315{ 316 struct mm_gk20a *mm = &g->mm; 317 u32 virt_addr_lo; 318 u32 virt_addr_hi; 319 u32 inst_pa_page; 320 int err; 321 322 err = gk20a_busy(g); 323 if (err) { 324 nvgpu_err(g, "failed to poweron"); 325 return err; 326 } 327 328 err = g->ops.mm.alloc_inst_block(g, &mm->perfbuf.inst_block); 329 if (err) { 330 return err; 331 } 332 333 g->ops.mm.init_inst_block(&mm->perfbuf.inst_block, mm->perfbuf.vm, 0); 334 335 gk20a_perfbuf_reset_streaming(g); 336 337 virt_addr_lo = u64_lo32(offset); 338 virt_addr_hi = u64_hi32(offset); 339 340 /* address and size are aligned to 32 bytes, the lowest bits read back 341 * as zeros */ 342 gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo); 343 gk20a_writel(g, perf_pmasys_outbaseupper_r(), 344 perf_pmasys_outbaseupper_ptr_f(virt_addr_hi)); 345 gk20a_writel(g, perf_pmasys_outsize_r(), size); 346 347 /* this field is aligned to 4K */ 348 inst_pa_page = nvgpu_inst_block_addr(g, &mm->perfbuf.inst_block) >> 12; 349 350 /* A write to MEM_BLOCK triggers the block bind operation. MEM_BLOCK 351 * should be written last */ 352 gk20a_writel(g, perf_pmasys_mem_block_r(), 353 perf_pmasys_mem_block_base_f(inst_pa_page) | 354 nvgpu_aperture_mask(g, &mm->perfbuf.inst_block, 355 perf_pmasys_mem_block_target_sys_ncoh_f(), 356 perf_pmasys_mem_block_target_sys_coh_f(), 357 perf_pmasys_mem_block_target_lfb_f()) | 358 perf_pmasys_mem_block_valid_true_f()); 359 360 gk20a_idle(g); 361 return 0; 362} 363 364/* must be called with dbg_sessions_lock held */ 365int gk20a_perfbuf_disable_locked(struct gk20a *g) 366{ 367 int err = gk20a_busy(g); 368 if (err) { 369 nvgpu_err(g, "failed to poweron"); 370 return err; 371 } 372 373 gk20a_perfbuf_reset_streaming(g); 374 375 gk20a_writel(g, perf_pmasys_outbase_r(), 0); 376 gk20a_writel(g, perf_pmasys_outbaseupper_r(), 377 perf_pmasys_outbaseupper_ptr_f(0)); 378 gk20a_writel(g, perf_pmasys_outsize_r(), 0); 379 380 gk20a_writel(g, perf_pmasys_mem_block_r(), 381 perf_pmasys_mem_block_base_f(0) | 382 perf_pmasys_mem_block_valid_false_f() | 383 perf_pmasys_mem_block_target_f(0)); 384 385 gk20a_idle(g); 386 387 return 0; 388}
diff --git a/include/gk20a/dbg_gpu_gk20a.h b/include/gk20a/dbg_gpu_gk20a.h
deleted file mode 100644
index fb5ae1f..0000000
--- a/include/gk20a/dbg_gpu_gk20a.h
+++ /dev/null
@@ -1,147 +0,0 @@ 1/* 2 * Tegra GK20A GPU Debugger Driver 3 * 4 * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef DBG_GPU_H 25#define DBG_GPU_H 26 27#include <nvgpu/cond.h> 28#include <nvgpu/lock.h> 29#include <nvgpu/list.h> 30 31struct gk20a; 32struct channel_gk20a; 33struct dbg_session_gk20a; 34 35/* used by the interrupt handler to post events */ 36void gk20a_dbg_gpu_post_events(struct channel_gk20a *fault_ch); 37 38struct channel_gk20a * 39nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s); 40 41struct dbg_gpu_session_events { 42 struct nvgpu_cond wait_queue; 43 bool events_enabled; 44 int num_pending_events; 45}; 46 47struct dbg_session_gk20a { 48 /* dbg session id used for trace/prints */ 49 int id; 50 51 /* profiler session, if any */ 52 bool is_profiler; 53 54 /* has a valid profiler reservation */ 55 bool has_profiler_reservation; 56 57 /* power enabled or disabled */ 58 bool is_pg_disabled; 59 60 /* timeouts enabled or disabled */ 61 bool is_timeout_disabled; 62 63 struct gk20a *g; 64 65 /* list of bound channels, if any */ 66 struct nvgpu_list_node ch_list; 67 struct nvgpu_mutex ch_list_lock; 68 69 /* event support */ 70 struct dbg_gpu_session_events dbg_events; 71 72 bool broadcast_stop_trigger; 73 74 struct nvgpu_mutex ioctl_lock; 75}; 76 77struct dbg_session_data { 78 struct dbg_session_gk20a *dbg_s; 79 struct nvgpu_list_node dbg_s_entry; 80}; 81 82static inline struct dbg_session_data * 83dbg_session_data_from_dbg_s_entry(struct nvgpu_list_node *node) 84{ 85 return (struct dbg_session_data *) 86 ((uintptr_t)node - offsetof(struct dbg_session_data, dbg_s_entry)); 87}; 88 89struct dbg_session_channel_data { 90 int channel_fd; 91 u32 chid; 92 struct nvgpu_list_node ch_entry; 93 struct dbg_session_data *session_data; 94 int (*unbind_single_channel)(struct dbg_session_gk20a *dbg_s, 95 struct dbg_session_channel_data *ch_data); 96}; 97 98static inline struct dbg_session_channel_data * 99dbg_session_channel_data_from_ch_entry(struct nvgpu_list_node *node) 100{ 101 return (struct dbg_session_channel_data *) 102 ((uintptr_t)node - offsetof(struct dbg_session_channel_data, ch_entry)); 103}; 104 105struct dbg_profiler_object_data { 106 int session_id; 107 u32 prof_handle; 108 struct channel_gk20a *ch; 109 bool has_reservation; 110 struct nvgpu_list_node prof_obj_entry; 111}; 112 113static inline struct dbg_profiler_object_data * 114dbg_profiler_object_data_from_prof_obj_entry(struct nvgpu_list_node *node) 115{ 116 return (struct dbg_profiler_object_data *) 117 ((uintptr_t)node - offsetof(struct dbg_profiler_object_data, prof_obj_entry)); 118}; 119 120bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch); 121int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch); 122 123int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powergate); 124bool nvgpu_check_and_set_global_reservation( 125 struct dbg_session_gk20a *dbg_s, 126 struct dbg_profiler_object_data *prof_obj); 127bool nvgpu_check_and_set_context_reservation( 128 struct dbg_session_gk20a *dbg_s, 129 struct dbg_profiler_object_data *prof_obj); 130void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s, 131 struct dbg_profiler_object_data *prof_obj); 132int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size); 133int gk20a_perfbuf_disable_locked(struct gk20a *g); 134 135void nvgpu_dbg_session_post_event(struct dbg_session_gk20a *dbg_s); 136u32 nvgpu_set_powergate_locked(struct dbg_session_gk20a *dbg_s, 137 bool mode); 138 139 /* PM Context Switch Mode */ 140/*This mode says that the pms are not to be context switched. */ 141#define NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW (0x00000000) 142/* This mode says that the pms in Mode-B are to be context switched */ 143#define NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW (0x00000001) 144/* This mode says that the pms in Mode-E (stream out) are to be context switched. */ 145#define NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW (0x00000002) 146 147#endif /* DBG_GPU_GK20A_H */
diff --git a/include/gk20a/fecs_trace_gk20a.c b/include/gk20a/fecs_trace_gk20a.c
deleted file mode 100644
index 5c1c5e0..0000000
--- a/include/gk20a/fecs_trace_gk20a.c
+++ /dev/null
@@ -1,744 +0,0 @@ 1/* 2 * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/kmem.h> 24#include <nvgpu/dma.h> 25#include <nvgpu/enabled.h> 26#include <nvgpu/bug.h> 27#include <nvgpu/hashtable.h> 28#include <nvgpu/circ_buf.h> 29#include <nvgpu/thread.h> 30#include <nvgpu/barrier.h> 31#include <nvgpu/mm.h> 32#include <nvgpu/enabled.h> 33#include <nvgpu/ctxsw_trace.h> 34#include <nvgpu/io.h> 35#include <nvgpu/utils.h> 36#include <nvgpu/timers.h> 37#include <nvgpu/channel.h> 38 39#include "fecs_trace_gk20a.h" 40#include "gk20a.h" 41#include "gr_gk20a.h" 42 43#include <nvgpu/log.h> 44#include <nvgpu/fecs_trace.h> 45 46#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h> 47#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 48 49struct gk20a_fecs_trace_hash_ent { 50 u32 context_ptr; 51 pid_t pid; 52 struct hlist_node node; 53}; 54 55struct gk20a_fecs_trace { 56 57 DECLARE_HASHTABLE(pid_hash_table, GK20A_FECS_TRACE_HASH_BITS); 58 struct nvgpu_mutex hash_lock; 59 struct nvgpu_mutex poll_lock; 60 struct nvgpu_thread poll_task; 61 bool init; 62 struct nvgpu_mutex enable_lock; 63 u32 enable_count; 64}; 65 66#ifdef CONFIG_GK20A_CTXSW_TRACE 67u32 gk20a_fecs_trace_record_ts_tag_invalid_ts_v(void) 68{ 69 return ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_v(); 70} 71 72u32 gk20a_fecs_trace_record_ts_tag_v(u64 ts) 73{ 74 return ctxsw_prog_record_timestamp_timestamp_hi_tag_v((u32) (ts >> 32)); 75} 76 77u64 gk20a_fecs_trace_record_ts_timestamp_v(u64 ts) 78{ 79 return ts & ~(((u64)ctxsw_prog_record_timestamp_timestamp_hi_tag_m()) << 32); 80} 81 82static u32 gk20a_fecs_trace_fecs_context_ptr(struct gk20a *g, struct channel_gk20a *ch) 83{ 84 return (u32) (nvgpu_inst_block_addr(g, &ch->inst_block) >> 12LL); 85} 86 87int gk20a_fecs_trace_num_ts(void) 88{ 89 return (ctxsw_prog_record_timestamp_record_size_in_bytes_v() 90 - sizeof(struct gk20a_fecs_trace_record)) / sizeof(u64); 91} 92 93struct gk20a_fecs_trace_record *gk20a_fecs_trace_get_record( 94 struct gk20a *g, int idx) 95{ 96 struct nvgpu_mem *mem = &g->gr.global_ctx_buffer[FECS_TRACE_BUFFER].mem; 97 98 return (struct gk20a_fecs_trace_record *) 99 ((u8 *) mem->cpu_va 100 + (idx * ctxsw_prog_record_timestamp_record_size_in_bytes_v())); 101} 102 103bool gk20a_fecs_trace_is_valid_record(struct gk20a_fecs_trace_record *r) 104{ 105 /* 106 * testing magic_hi should suffice. magic_lo is sometimes used 107 * as a sequence number in experimental ucode. 108 */ 109 return (r->magic_hi 110 == ctxsw_prog_record_timestamp_magic_value_hi_v_value_v()); 111} 112 113int gk20a_fecs_trace_get_read_index(struct gk20a *g) 114{ 115 return gr_gk20a_elpg_protected_call(g, 116 gk20a_readl(g, gr_fecs_mailbox1_r())); 117} 118 119int gk20a_fecs_trace_get_write_index(struct gk20a *g) 120{ 121 return gr_gk20a_elpg_protected_call(g, 122 gk20a_readl(g, gr_fecs_mailbox0_r())); 123} 124 125static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) 126{ 127 nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index); 128 return gr_gk20a_elpg_protected_call(g, 129 (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); 130} 131 132void gk20a_fecs_trace_hash_dump(struct gk20a *g) 133{ 134 u32 bkt; 135 struct gk20a_fecs_trace_hash_ent *ent; 136 struct gk20a_fecs_trace *trace = g->fecs_trace; 137 138 nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table"); 139 140 nvgpu_mutex_acquire(&trace->hash_lock); 141 hash_for_each(trace->pid_hash_table, bkt, ent, node) 142 { 143 nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", 144 ent, bkt, ent->context_ptr, ent->pid); 145 146 } 147 nvgpu_mutex_release(&trace->hash_lock); 148} 149 150static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid) 151{ 152 struct gk20a_fecs_trace_hash_ent *he; 153 struct gk20a_fecs_trace *trace = g->fecs_trace; 154 155 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, 156 "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); 157 158 he = nvgpu_kzalloc(g, sizeof(*he)); 159 if (unlikely(!he)) { 160 nvgpu_warn(g, 161 "can't alloc new hash entry for context_ptr=%x pid=%d", 162 context_ptr, pid); 163 return -ENOMEM; 164 } 165 166 he->context_ptr = context_ptr; 167 he->pid = pid; 168 nvgpu_mutex_acquire(&trace->hash_lock); 169 hash_add(trace->pid_hash_table, &he->node, context_ptr); 170 nvgpu_mutex_release(&trace->hash_lock); 171 return 0; 172} 173 174static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) 175{ 176 struct hlist_node *tmp; 177 struct gk20a_fecs_trace_hash_ent *ent; 178 struct gk20a_fecs_trace *trace = g->fecs_trace; 179 180 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, 181 "freeing hash entry context_ptr=%x", context_ptr); 182 183 nvgpu_mutex_acquire(&trace->hash_lock); 184 hash_for_each_possible_safe(trace->pid_hash_table, ent, tmp, node, 185 context_ptr) { 186 if (ent->context_ptr == context_ptr) { 187 hash_del(&ent->node); 188 nvgpu_log(g, gpu_dbg_ctxsw, 189 "freed hash entry=%p context_ptr=%x", ent, 190 ent->context_ptr); 191 nvgpu_kfree(g, ent); 192 break; 193 } 194 } 195 nvgpu_mutex_release(&trace->hash_lock); 196} 197 198static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) 199{ 200 u32 bkt; 201 struct hlist_node *tmp; 202 struct gk20a_fecs_trace_hash_ent *ent; 203 struct gk20a_fecs_trace *trace = g->fecs_trace; 204 205 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); 206 207 nvgpu_mutex_acquire(&trace->hash_lock); 208 hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { 209 hash_del(&ent->node); 210 nvgpu_kfree(g, ent); 211 } 212 nvgpu_mutex_release(&trace->hash_lock); 213 214} 215 216static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr) 217{ 218 struct gk20a_fecs_trace_hash_ent *ent; 219 struct gk20a_fecs_trace *trace = g->fecs_trace; 220 pid_t pid = 0; 221 222 nvgpu_mutex_acquire(&trace->hash_lock); 223 hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { 224 if (ent->context_ptr == context_ptr) { 225 nvgpu_log(g, gpu_dbg_ctxsw, 226 "found context_ptr=%x -> pid=%d", 227 ent->context_ptr, ent->pid); 228 pid = ent->pid; 229 break; 230 } 231 } 232 nvgpu_mutex_release(&trace->hash_lock); 233 234 return pid; 235} 236 237/* 238 * Converts HW entry format to userspace-facing format and pushes it to the 239 * queue. 240 */ 241static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) 242{ 243 int i; 244 struct nvgpu_gpu_ctxsw_trace_entry entry = { }; 245 struct gk20a_fecs_trace *trace = g->fecs_trace; 246 pid_t cur_pid; 247 pid_t new_pid; 248 int count = 0; 249 250 /* for now, only one VM */ 251 const int vmid = 0; 252 253 struct gk20a_fecs_trace_record *r = 254 gk20a_fecs_trace_get_record(g, index); 255 256 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, 257 "consuming record trace=%p read=%d record=%p", trace, index, r); 258 259 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { 260 nvgpu_warn(g, 261 "trace=%p read=%d record=%p magic_lo=%08x magic_hi=%08x (invalid)", 262 trace, index, r, r->magic_lo, r->magic_hi); 263 return -EINVAL; 264 } 265 266 /* Clear magic_hi to detect cases where CPU could read write index 267 * before FECS record is actually written to DRAM. This should not 268 * as we force FECS writes to SYSMEM by reading through PRAMIN. 269 */ 270 r->magic_hi = 0; 271 272 cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); 273 new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); 274 275 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, 276 "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", 277 r->context_ptr, cur_pid, r->new_context_ptr, new_pid); 278 279 entry.context_id = r->context_id; 280 entry.vmid = vmid; 281 282 /* break out FECS record into trace events */ 283 for (i = 0; i < gk20a_fecs_trace_num_ts(); i++) { 284 285 entry.tag = gk20a_fecs_trace_record_ts_tag_v(r->ts[i]); 286 entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); 287 entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; 288 289 nvgpu_log(g, gpu_dbg_ctxsw, 290 "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", 291 entry.tag, entry.timestamp, r->context_id, 292 r->new_context_id); 293 294 switch (nvgpu_gpu_ctxsw_tags_to_common_tags(entry.tag)) { 295 case NVGPU_GPU_CTXSW_TAG_RESTORE_START: 296 case NVGPU_GPU_CTXSW_TAG_CONTEXT_START: 297 entry.context_id = r->new_context_id; 298 entry.pid = new_pid; 299 break; 300 301 case NVGPU_GPU_CTXSW_TAG_CTXSW_REQ_BY_HOST: 302 case NVGPU_GPU_CTXSW_TAG_FE_ACK: 303 case NVGPU_GPU_CTXSW_TAG_FE_ACK_WFI: 304 case NVGPU_GPU_CTXSW_TAG_FE_ACK_GFXP: 305 case NVGPU_GPU_CTXSW_TAG_FE_ACK_CTAP: 306 case NVGPU_GPU_CTXSW_TAG_FE_ACK_CILP: 307 case NVGPU_GPU_CTXSW_TAG_SAVE_END: 308 entry.context_id = r->context_id; 309 entry.pid = cur_pid; 310 break; 311 312 default: 313 /* tags are not guaranteed to start at the beginning */ 314 WARN_ON(entry.tag && (entry.tag != NVGPU_GPU_CTXSW_TAG_INVALID_TIMESTAMP)); 315 continue; 316 } 317 318 nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", 319 entry.tag, entry.context_id, entry.pid); 320 321 if (!entry.context_id) 322 continue; 323 324 gk20a_ctxsw_trace_write(g, &entry); 325 count++; 326 } 327 328 gk20a_ctxsw_trace_wake_up(g, vmid); 329 return count; 330} 331 332int gk20a_fecs_trace_poll(struct gk20a *g) 333{ 334 struct gk20a_fecs_trace *trace = g->fecs_trace; 335 336 int read = 0; 337 int write = 0; 338 int cnt; 339 int err; 340 341 err = gk20a_busy(g); 342 if (unlikely(err)) 343 return err; 344 345 nvgpu_mutex_acquire(&trace->poll_lock); 346 write = gk20a_fecs_trace_get_write_index(g); 347 if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) { 348 nvgpu_err(g, 349 "failed to acquire write index, write=%d", write); 350 err = write; 351 goto done; 352 } 353 354 read = gk20a_fecs_trace_get_read_index(g); 355 356 cnt = CIRC_CNT(write, read, GK20A_FECS_TRACE_NUM_RECORDS); 357 if (!cnt) 358 goto done; 359 360 nvgpu_log(g, gpu_dbg_ctxsw, 361 "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", 362 read, gk20a_fecs_trace_get_read_index(g), write, cnt); 363 364 /* Ensure all FECS writes have made it to SYSMEM */ 365 g->ops.mm.fb_flush(g); 366 367 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) { 368 /* Bits 30:0 of MAILBOX1 represents actual read pointer value */ 369 read = read & (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); 370 } 371 372 while (read != write) { 373 cnt = gk20a_fecs_trace_ring_read(g, read); 374 if (cnt > 0) { 375 nvgpu_log(g, gpu_dbg_ctxsw, 376 "number of trace entries added: %d", cnt); 377 } 378 379 /* Get to next record. */ 380 read = (read + 1) & (GK20A_FECS_TRACE_NUM_RECORDS - 1); 381 } 382 383 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) { 384 /* 385 * In the next step, read pointer is going to be updated. 386 * So, MSB of read pointer should be set back to 1. This will 387 * keep FECS trace enabled. 388 */ 389 read = read | (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)); 390 } 391 392 /* ensure FECS records has been updated before incrementing read index */ 393 nvgpu_wmb(); 394 gk20a_fecs_trace_set_read_index(g, read); 395 396done: 397 nvgpu_mutex_release(&trace->poll_lock); 398 gk20a_idle(g); 399 return err; 400} 401 402static int gk20a_fecs_trace_periodic_polling(void *arg) 403{ 404 struct gk20a *g = (struct gk20a *)arg; 405 struct gk20a_fecs_trace *trace = g->fecs_trace; 406 407 pr_info("%s: running\n", __func__); 408 409 while (!nvgpu_thread_should_stop(&trace->poll_task)) { 410 411 nvgpu_usleep_range(GK20A_FECS_TRACE_FRAME_PERIOD_US, 412 GK20A_FECS_TRACE_FRAME_PERIOD_US * 2); 413 414 gk20a_fecs_trace_poll(g); 415 } 416 417 return 0; 418} 419 420size_t gk20a_fecs_trace_buffer_size(struct gk20a *g) 421{ 422 return GK20A_FECS_TRACE_NUM_RECORDS 423 * ctxsw_prog_record_timestamp_record_size_in_bytes_v(); 424} 425 426int gk20a_fecs_trace_init(struct gk20a *g) 427{ 428 struct gk20a_fecs_trace *trace; 429 int err; 430 431 trace = nvgpu_kzalloc(g, sizeof(struct gk20a_fecs_trace)); 432 if (!trace) { 433 nvgpu_warn(g, "failed to allocate fecs_trace"); 434 return -ENOMEM; 435 } 436 g->fecs_trace = trace; 437 438 err = nvgpu_mutex_init(&trace->poll_lock); 439 if (err) 440 goto clean; 441 err = nvgpu_mutex_init(&trace->hash_lock); 442 if (err) 443 goto clean_poll_lock; 444 445 err = nvgpu_mutex_init(&trace->enable_lock); 446 if (err) 447 goto clean_hash_lock; 448 449 BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)); 450 hash_init(trace->pid_hash_table); 451 452 __nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true); 453 454 trace->enable_count = 0; 455 trace->init = true; 456 457 return 0; 458 459clean_hash_lock: 460 nvgpu_mutex_destroy(&trace->hash_lock); 461 462clean_poll_lock: 463 nvgpu_mutex_destroy(&trace->poll_lock); 464clean: 465 nvgpu_kfree(g, trace); 466 g->fecs_trace = NULL; 467 return err; 468} 469 470int gk20a_fecs_trace_bind_channel(struct gk20a *g, 471 struct channel_gk20a *ch) 472{ 473 /* 474 * map our circ_buf to the context space and store the GPU VA 475 * in the context header. 476 */ 477 478 u32 lo; 479 u32 hi; 480 u64 addr; 481 struct tsg_gk20a *tsg; 482 struct nvgpu_gr_ctx *ch_ctx; 483 struct gk20a_fecs_trace *trace = g->fecs_trace; 484 struct nvgpu_mem *mem; 485 u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); 486 u32 aperture_mask; 487 488 tsg = tsg_gk20a_from_ch(ch); 489 if (tsg == NULL) { 490 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); 491 return -EINVAL; 492 } 493 494 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, 495 "chid=%d context_ptr=%x inst_block=%llx", 496 ch->chid, context_ptr, 497 nvgpu_inst_block_addr(g, &ch->inst_block)); 498 499 tsg = tsg_gk20a_from_ch(ch); 500 if (!tsg) 501 return -EINVAL; 502 503 ch_ctx = &tsg->gr_ctx; 504 mem = &ch_ctx->mem; 505 506 if (!trace) 507 return -ENOMEM; 508 509 mem = &g->gr.global_ctx_buffer[FECS_TRACE_BUFFER].mem; 510 511 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) { 512 addr = ch_ctx->global_ctx_buffer_va[FECS_TRACE_BUFFER_VA]; 513 nvgpu_log(g, gpu_dbg_ctxsw, "gpu_va=%llx", addr); 514 aperture_mask = 0; 515 } else { 516 addr = nvgpu_inst_block_addr(g, mem); 517 nvgpu_log(g, gpu_dbg_ctxsw, "pa=%llx", addr); 518 aperture_mask = nvgpu_aperture_mask(g, mem, 519 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(), 520 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_coherent_f(), 521 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f()); 522 } 523 if (!addr) 524 return -ENOMEM; 525 526 lo = u64_lo32(addr); 527 hi = u64_hi32(addr); 528 529 mem = &ch_ctx->mem; 530 531 nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, 532 lo, GK20A_FECS_TRACE_NUM_RECORDS); 533 534 nvgpu_mem_wr(g, mem, 535 ctxsw_prog_main_image_context_timestamp_buffer_control_o(), 536 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f( 537 GK20A_FECS_TRACE_NUM_RECORDS)); 538 539 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) 540 mem = &ch->ctx_header; 541 542 nvgpu_mem_wr(g, mem, 543 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(), 544 lo); 545 nvgpu_mem_wr(g, mem, 546 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(), 547 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(hi) | 548 aperture_mask); 549 550 /* pid (process identifier) in user space, corresponds to tgid (thread 551 * group id) in kernel space. 552 */ 553 gk20a_fecs_trace_hash_add(g, context_ptr, tsg->tgid); 554 555 return 0; 556} 557 558int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch) 559{ 560 u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); 561 562 if (g->fecs_trace) { 563 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, 564 "ch=%p context_ptr=%x", ch, context_ptr); 565 566 if (g->ops.fecs_trace.is_enabled(g)) { 567 if (g->ops.fecs_trace.flush) 568 g->ops.fecs_trace.flush(g); 569 gk20a_fecs_trace_poll(g); 570 } 571 gk20a_fecs_trace_hash_del(g, context_ptr); 572 } 573 return 0; 574} 575 576int gk20a_fecs_trace_reset(struct gk20a *g) 577{ 578 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); 579 580 if (!g->ops.fecs_trace.is_enabled(g)) 581 return 0; 582 583 gk20a_fecs_trace_poll(g); 584 return gk20a_fecs_trace_set_read_index(g, 0); 585} 586 587int gk20a_fecs_trace_deinit(struct gk20a *g) 588{ 589 struct gk20a_fecs_trace *trace = g->fecs_trace; 590 591 if (!trace->init) 592 return 0; 593 594 /* 595 * Check if tracer was enabled before attempting to stop the 596 * tracer thread. 597 */ 598 if (trace->enable_count > 0) { 599 nvgpu_thread_stop(&trace->poll_task); 600 } 601 gk20a_fecs_trace_free_hash_table(g); 602 603 nvgpu_mutex_destroy(&g->fecs_trace->hash_lock); 604 nvgpu_mutex_destroy(&g->fecs_trace->poll_lock); 605 nvgpu_mutex_destroy(&g->fecs_trace->enable_lock); 606 607 nvgpu_kfree(g, g->fecs_trace); 608 g->fecs_trace = NULL; 609 return 0; 610} 611 612int gk20a_gr_max_entries(struct gk20a *g, 613 struct nvgpu_gpu_ctxsw_trace_filter *filter) 614{ 615 int n; 616 int tag; 617 618 /* Compute number of entries per record, with given filter */ 619 for (n = 0, tag = 0; tag < gk20a_fecs_trace_num_ts(); tag++) 620 n += (NVGPU_GPU_CTXSW_FILTER_ISSET(tag, filter) != 0); 621 622 /* Return max number of entries generated for the whole ring */ 623 return n * GK20A_FECS_TRACE_NUM_RECORDS; 624} 625 626int gk20a_fecs_trace_enable(struct gk20a *g) 627{ 628 struct gk20a_fecs_trace *trace = g->fecs_trace; 629 int write; 630 int err = 0; 631 632 if (!trace) 633 return -EINVAL; 634 635 nvgpu_mutex_acquire(&trace->enable_lock); 636 trace->enable_count++; 637 638 if (trace->enable_count == 1U) { 639 /* drop data in hw buffer */ 640 if (g->ops.fecs_trace.flush) 641 g->ops.fecs_trace.flush(g); 642 643 write = gk20a_fecs_trace_get_write_index(g); 644 645 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) { 646 /* 647 * For enabling FECS trace support, MAILBOX1's MSB 648 * (Bit 31:31) should be set to 1. Bits 30:0 represents 649 * actual pointer value. 650 */ 651 write = write | 652 (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)); 653 } 654 gk20a_fecs_trace_set_read_index(g, write); 655 656 /* 657 * FECS ucode does a priv holdoff around the assertion of 658 * context reset. So, pri transactions (e.g. mailbox1 register 659 * write) might fail due to this. Hence, do write with ack 660 * i.e. write and read it back to make sure write happened for 661 * mailbox1. 662 */ 663 while (gk20a_fecs_trace_get_read_index(g) != write) { 664 nvgpu_log(g, gpu_dbg_ctxsw, "mailbox1 update failed"); 665 gk20a_fecs_trace_set_read_index(g, write); 666 } 667 668 err = nvgpu_thread_create(&trace->poll_task, g, 669 gk20a_fecs_trace_periodic_polling, __func__); 670 if (err) { 671 nvgpu_warn(g, 672 "failed to create FECS polling task"); 673 goto done; 674 } 675 } 676 677done: 678 nvgpu_mutex_release(&trace->enable_lock); 679 return err; 680} 681 682int gk20a_fecs_trace_disable(struct gk20a *g) 683{ 684 struct gk20a_fecs_trace *trace = g->fecs_trace; 685 int read = 0; 686 687 if (trace == NULL) { 688 return -EINVAL; 689 } 690 691 nvgpu_mutex_acquire(&trace->enable_lock); 692 if (trace->enable_count <= 0U) { 693 nvgpu_mutex_release(&trace->enable_lock); 694 return 0; 695 } 696 trace->enable_count--; 697 if (trace->enable_count == 0U) { 698 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) { 699 /* 700 * For disabling FECS trace support, MAILBOX1's MSB 701 * (Bit 31:31) should be set to 0. 702 */ 703 read = gk20a_fecs_trace_get_read_index(g) & 704 (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); 705 706 gk20a_fecs_trace_set_read_index(g, read); 707 708 /* 709 * FECS ucode does a priv holdoff around the assertion 710 * of context reset. So, pri transactions (e.g. 711 * mailbox1 register write) might fail due to this. 712 * Hence, do write with ack i.e. write and read it back 713 * to make sure write happened for mailbox1. 714 */ 715 while (gk20a_fecs_trace_get_read_index(g) != read) { 716 nvgpu_log(g, gpu_dbg_ctxsw, 717 "mailbox1 update failed"); 718 gk20a_fecs_trace_set_read_index(g, read); 719 } 720 } 721 722 nvgpu_thread_stop(&trace->poll_task); 723 724 } 725 nvgpu_mutex_release(&trace->enable_lock); 726 727 return -EPERM; 728} 729 730bool gk20a_fecs_trace_is_enabled(struct gk20a *g) 731{ 732 struct gk20a_fecs_trace *trace = g->fecs_trace; 733 734 return (trace && nvgpu_thread_is_running(&trace->poll_task)); 735} 736 737void gk20a_fecs_trace_reset_buffer(struct gk20a *g) 738{ 739 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); 740 741 gk20a_fecs_trace_set_read_index(g, 742 gk20a_fecs_trace_get_write_index(g)); 743} 744#endif /* CONFIG_GK20A_CTXSW_TRACE */
diff --git a/include/gk20a/fecs_trace_gk20a.h b/include/gk20a/fecs_trace_gk20a.h
deleted file mode 100644
index d33e619..0000000
--- a/include/gk20a/fecs_trace_gk20a.h
+++ /dev/null
@@ -1,45 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_GK20A_FECS_TRACE_GK20A_H 24#define NVGPU_GK20A_FECS_TRACE_GK20A_H 25 26struct gk20a; 27struct channel_gk20a; 28struct nvgpu_gpu_ctxsw_trace_filter; 29 30int gk20a_fecs_trace_poll(struct gk20a *g); 31int gk20a_fecs_trace_init(struct gk20a *g); 32int gk20a_fecs_trace_bind_channel(struct gk20a *g, 33 struct channel_gk20a *ch); 34int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch); 35int gk20a_fecs_trace_reset(struct gk20a *g); 36int gk20a_fecs_trace_deinit(struct gk20a *g); 37int gk20a_gr_max_entries(struct gk20a *g, 38 struct nvgpu_gpu_ctxsw_trace_filter *filter); 39int gk20a_fecs_trace_enable(struct gk20a *g); 40int gk20a_fecs_trace_disable(struct gk20a *g); 41bool gk20a_fecs_trace_is_enabled(struct gk20a *g); 42size_t gk20a_fecs_trace_buffer_size(struct gk20a *g); 43void gk20a_fecs_trace_reset_buffer(struct gk20a *g); 44 45#endif /* NVGPU_GK20A_FECS_TRACE_GK20A_H */
diff --git a/include/gk20a/fence_gk20a.c b/include/gk20a/fence_gk20a.c
deleted file mode 100644
index af42130..0000000
--- a/include/gk20a/fence_gk20a.c
+++ /dev/null
@@ -1,319 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include "fence_gk20a.h" 24 25#include <nvgpu/semaphore.h> 26#include <nvgpu/kmem.h> 27#include <nvgpu/soc.h> 28#include <nvgpu/nvhost.h> 29#include <nvgpu/barrier.h> 30#include <nvgpu/os_fence.h> 31#include <nvgpu/channel.h> 32 33#include "gk20a.h" 34 35struct gk20a_fence_ops { 36 int (*wait)(struct gk20a_fence *, long timeout); 37 bool (*is_expired)(struct gk20a_fence *); 38 void *(*free)(struct nvgpu_ref *); 39}; 40 41static void gk20a_fence_free(struct nvgpu_ref *ref) 42{ 43 struct gk20a_fence *f = 44 container_of(ref, struct gk20a_fence, ref); 45 struct gk20a *g = f->g; 46 47 if (nvgpu_os_fence_is_initialized(&f->os_fence)) { 48 f->os_fence.ops->drop_ref(&f->os_fence); 49 } 50 51 if (f->semaphore) { 52 nvgpu_semaphore_put(f->semaphore); 53 } 54 55 if (f->allocator) { 56 if (nvgpu_alloc_initialized(f->allocator)) { 57 nvgpu_free(f->allocator, (u64)(uintptr_t)f); 58 } 59 } else { 60 nvgpu_kfree(g, f); 61 } 62} 63 64void gk20a_fence_put(struct gk20a_fence *f) 65{ 66 if (f) { 67 nvgpu_ref_put(&f->ref, gk20a_fence_free); 68 } 69} 70 71struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f) 72{ 73 if (f) { 74 nvgpu_ref_get(&f->ref); 75 } 76 return f; 77} 78 79inline bool gk20a_fence_is_valid(struct gk20a_fence *f) 80{ 81 bool valid = f->valid; 82 83 nvgpu_smp_rmb(); 84 return valid; 85} 86 87int gk20a_fence_install_fd(struct gk20a_fence *f, int fd) 88{ 89 if (!f || !gk20a_fence_is_valid(f) || 90 !nvgpu_os_fence_is_initialized(&f->os_fence)) { 91 return -EINVAL; 92 } 93 94 f->os_fence.ops->install_fence(&f->os_fence, fd); 95 96 return 0; 97} 98 99int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f, 100 unsigned long timeout) 101{ 102 if (f && gk20a_fence_is_valid(f)) { 103 if (!nvgpu_platform_is_silicon(g)) { 104 timeout = MAX_SCHEDULE_TIMEOUT; 105 } 106 return f->ops->wait(f, timeout); 107 } 108 return 0; 109} 110 111bool gk20a_fence_is_expired(struct gk20a_fence *f) 112{ 113 if (f && gk20a_fence_is_valid(f) && f->ops) { 114 return f->ops->is_expired(f); 115 } else { 116 return true; 117 } 118} 119 120int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count) 121{ 122 int err; 123 size_t size; 124 struct gk20a_fence *fence_pool = NULL; 125 126 size = sizeof(struct gk20a_fence); 127 if (count <= UINT_MAX / size) { 128 size = count * size; 129 fence_pool = nvgpu_vzalloc(c->g, size); 130 } 131 132 if (!fence_pool) { 133 return -ENOMEM; 134 } 135 136 err = nvgpu_lockless_allocator_init(c->g, &c->fence_allocator, 137 "fence_pool", (size_t)fence_pool, size, 138 sizeof(struct gk20a_fence), 0); 139 if (err) { 140 goto fail; 141 } 142 143 return 0; 144 145fail: 146 nvgpu_vfree(c->g, fence_pool); 147 return err; 148} 149 150void gk20a_free_fence_pool(struct channel_gk20a *c) 151{ 152 if (nvgpu_alloc_initialized(&c->fence_allocator)) { 153 struct gk20a_fence *fence_pool; 154 fence_pool = (struct gk20a_fence *)(uintptr_t) 155 nvgpu_alloc_base(&c->fence_allocator); 156 nvgpu_alloc_destroy(&c->fence_allocator); 157 nvgpu_vfree(c->g, fence_pool); 158 } 159} 160 161struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c) 162{ 163 struct gk20a_fence *fence = NULL; 164 165 if (channel_gk20a_is_prealloc_enabled(c)) { 166 if (nvgpu_alloc_initialized(&c->fence_allocator)) { 167 fence = (struct gk20a_fence *)(uintptr_t) 168 nvgpu_alloc(&c->fence_allocator, 169 sizeof(struct gk20a_fence)); 170 171 /* clear the node and reset the allocator pointer */ 172 if (fence) { 173 memset(fence, 0, sizeof(*fence)); 174 fence->allocator = &c->fence_allocator; 175 } 176 } 177 } else { 178 fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence)); 179 } 180 181 if (fence) { 182 nvgpu_ref_init(&fence->ref); 183 fence->g = c->g; 184 } 185 186 return fence; 187} 188 189void gk20a_init_fence(struct gk20a_fence *f, 190 const struct gk20a_fence_ops *ops, 191 struct nvgpu_os_fence os_fence) 192{ 193 if (!f) { 194 return; 195 } 196 f->ops = ops; 197 f->syncpt_id = -1; 198 f->semaphore = NULL; 199 f->os_fence = os_fence; 200} 201 202/* Fences that are backed by GPU semaphores: */ 203 204static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout) 205{ 206 if (!nvgpu_semaphore_is_acquired(f->semaphore)) { 207 return 0; 208 } 209 210 return NVGPU_COND_WAIT_INTERRUPTIBLE( 211 f->semaphore_wq, 212 !nvgpu_semaphore_is_acquired(f->semaphore), 213 timeout); 214} 215 216static bool nvgpu_semaphore_fence_is_expired(struct gk20a_fence *f) 217{ 218 return !nvgpu_semaphore_is_acquired(f->semaphore); 219} 220 221static const struct gk20a_fence_ops nvgpu_semaphore_fence_ops = { 222 .wait = &nvgpu_semaphore_fence_wait, 223 .is_expired = &nvgpu_semaphore_fence_is_expired, 224}; 225 226/* This function takes ownership of the semaphore as well as the os_fence */ 227int gk20a_fence_from_semaphore( 228 struct gk20a_fence *fence_out, 229 struct nvgpu_semaphore *semaphore, 230 struct nvgpu_cond *semaphore_wq, 231 struct nvgpu_os_fence os_fence) 232{ 233 struct gk20a_fence *f = fence_out; 234 235 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, os_fence); 236 if (!f) { 237 return -EINVAL; 238 } 239 240 241 f->semaphore = semaphore; 242 f->semaphore_wq = semaphore_wq; 243 244 /* commit previous writes before setting the valid flag */ 245 nvgpu_smp_wmb(); 246 f->valid = true; 247 248 return 0; 249} 250 251#ifdef CONFIG_TEGRA_GK20A_NVHOST 252/* Fences that are backed by host1x syncpoints: */ 253 254static int gk20a_syncpt_fence_wait(struct gk20a_fence *f, long timeout) 255{ 256 return nvgpu_nvhost_syncpt_wait_timeout_ext( 257 f->nvhost_dev, f->syncpt_id, f->syncpt_value, 258 (u32)timeout, NULL, NULL); 259} 260 261static bool gk20a_syncpt_fence_is_expired(struct gk20a_fence *f) 262{ 263 264 /* 265 * In cases we don't register a notifier, we can't expect the 266 * syncpt value to be updated. For this case, we force a read 267 * of the value from HW, and then check for expiration. 268 */ 269 if (!nvgpu_nvhost_syncpt_is_expired_ext(f->nvhost_dev, f->syncpt_id, 270 f->syncpt_value)) { 271 u32 val; 272 273 if (!nvgpu_nvhost_syncpt_read_ext_check(f->nvhost_dev, 274 f->syncpt_id, &val)) { 275 return nvgpu_nvhost_syncpt_is_expired_ext( 276 f->nvhost_dev, 277 f->syncpt_id, f->syncpt_value); 278 } 279 } 280 281 return true; 282} 283 284static const struct gk20a_fence_ops gk20a_syncpt_fence_ops = { 285 .wait = &gk20a_syncpt_fence_wait, 286 .is_expired = &gk20a_syncpt_fence_is_expired, 287}; 288 289/* This function takes the ownership of the os_fence */ 290int gk20a_fence_from_syncpt( 291 struct gk20a_fence *fence_out, 292 struct nvgpu_nvhost_dev *nvhost_dev, 293 u32 id, u32 value, struct nvgpu_os_fence os_fence) 294{ 295 struct gk20a_fence *f = fence_out; 296 297 gk20a_init_fence(f, &gk20a_syncpt_fence_ops, os_fence); 298 if (!f) 299 return -EINVAL; 300 301 f->nvhost_dev = nvhost_dev; 302 f->syncpt_id = id; 303 f->syncpt_value = value; 304 305 /* commit previous writes before setting the valid flag */ 306 nvgpu_smp_wmb(); 307 f->valid = true; 308 309 return 0; 310} 311#else 312int gk20a_fence_from_syncpt( 313 struct gk20a_fence *fence_out, 314 struct nvgpu_nvhost_dev *nvhost_dev, 315 u32 id, u32 value, struct nvgpu_os_fence os_fence) 316{ 317 return -EINVAL; 318} 319#endif
diff --git a/include/gk20a/fence_gk20a.h b/include/gk20a/fence_gk20a.h
deleted file mode 100644
index 0311279..0000000
--- a/include/gk20a/fence_gk20a.h
+++ /dev/null
@@ -1,100 +0,0 @@ 1/* 2 * drivers/video/tegra/host/gk20a/fence_gk20a.h 3 * 4 * GK20A Fences 5 * 6 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26#ifndef NVGPU_GK20A_FENCE_GK20A_H 27#define NVGPU_GK20A_FENCE_GK20A_H 28 29#include <nvgpu/types.h> 30#include <nvgpu/kref.h> 31#include <nvgpu/os_fence.h> 32 33struct platform_device; 34struct nvgpu_semaphore; 35struct channel_gk20a; 36struct gk20a; 37struct nvgpu_os_fence; 38 39struct gk20a_fence_ops; 40 41struct gk20a_fence { 42 struct gk20a *g; 43 44 /* Valid for all fence types: */ 45 bool valid; 46 struct nvgpu_ref ref; 47 const struct gk20a_fence_ops *ops; 48 49 struct nvgpu_os_fence os_fence; 50 51 /* Valid for fences created from semaphores: */ 52 struct nvgpu_semaphore *semaphore; 53 struct nvgpu_cond *semaphore_wq; 54 55 /* Valid for fences created from syncpoints: */ 56 struct nvgpu_nvhost_dev *nvhost_dev; 57 u32 syncpt_id; 58 u32 syncpt_value; 59 60 /* Valid for fences part of a pre-allocated fence pool */ 61 struct nvgpu_allocator *allocator; 62}; 63 64/* Fences can be created from semaphores or syncpoint (id, value) pairs */ 65int gk20a_fence_from_semaphore( 66 struct gk20a_fence *fence_out, 67 struct nvgpu_semaphore *semaphore, 68 struct nvgpu_cond *semaphore_wq, 69 struct nvgpu_os_fence os_fence); 70 71int gk20a_fence_from_syncpt( 72 struct gk20a_fence *fence_out, 73 struct nvgpu_nvhost_dev *nvhost_dev, 74 u32 id, u32 value, 75 struct nvgpu_os_fence os_fence); 76 77int gk20a_alloc_fence_pool( 78 struct channel_gk20a *c, 79 unsigned int count); 80 81void gk20a_free_fence_pool( 82 struct channel_gk20a *c); 83 84struct gk20a_fence *gk20a_alloc_fence( 85 struct channel_gk20a *c); 86 87void gk20a_init_fence(struct gk20a_fence *f, 88 const struct gk20a_fence_ops *ops, 89 struct nvgpu_os_fence os_fence); 90 91/* Fence operations */ 92void gk20a_fence_put(struct gk20a_fence *f); 93struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f); 94int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f, 95 unsigned long timeout); 96bool gk20a_fence_is_expired(struct gk20a_fence *f); 97bool gk20a_fence_is_valid(struct gk20a_fence *f); 98int gk20a_fence_install_fd(struct gk20a_fence *f, int fd); 99 100#endif /* NVGPU_GK20A_FENCE_GK20A_H */
diff --git a/include/gk20a/fifo_gk20a.c b/include/gk20a/fifo_gk20a.c
deleted file mode 100644
index 77babc7..0000000
--- a/include/gk20a/fifo_gk20a.c
+++ /dev/null
@@ -1,4641 +0,0 @@ 1/* 2 * GK20A Graphics FIFO (gr host) 3 * 4 * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <trace/events/gk20a.h> 26 27#include <nvgpu/mm.h> 28#include <nvgpu/dma.h> 29#include <nvgpu/timers.h> 30#include <nvgpu/semaphore.h> 31#include <nvgpu/enabled.h> 32#include <nvgpu/kmem.h> 33#include <nvgpu/log.h> 34#include <nvgpu/soc.h> 35#include <nvgpu/atomic.h> 36#include <nvgpu/bug.h> 37#include <nvgpu/log2.h> 38#include <nvgpu/debug.h> 39#include <nvgpu/nvhost.h> 40#include <nvgpu/barrier.h> 41#include <nvgpu/ctxsw_trace.h> 42#include <nvgpu/error_notifier.h> 43#include <nvgpu/ptimer.h> 44#include <nvgpu/io.h> 45#include <nvgpu/utils.h> 46#include <nvgpu/channel.h> 47#include <nvgpu/unit.h> 48#include <nvgpu/power_features/power_features.h> 49#include <nvgpu/power_features/cg.h> 50 51#include "gk20a.h" 52#include "mm_gk20a.h" 53 54#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h> 55#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 56#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 57#include <nvgpu/hw/gk20a/hw_ram_gk20a.h> 58#include <nvgpu/hw/gk20a/hw_top_gk20a.h> 59#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 60 61#define FECS_METHOD_WFI_RESTORE 0x80000 62#define FECS_MAILBOX_0_ACK_RESTORE 0x4 63 64 65static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); 66 67static const char *const pbdma_intr_fault_type_desc[] = { 68 "MEMREQ timeout", "MEMACK_TIMEOUT", "MEMACK_EXTRA acks", 69 "MEMDAT_TIMEOUT", "MEMDAT_EXTRA acks", "MEMFLUSH noack", 70 "MEMOP noack", "LBCONNECT noack", "NONE - was LBREQ", 71 "LBACK_TIMEOUT", "LBACK_EXTRA acks", "LBDAT_TIMEOUT", 72 "LBDAT_EXTRA acks", "GPFIFO won't fit", "GPPTR invalid", 73 "GPENTRY invalid", "GPCRC mismatch", "PBPTR get>put", 74 "PBENTRY invld", "PBCRC mismatch", "NONE - was XBARC", 75 "METHOD invld", "METHODCRC mismat", "DEVICE sw method", 76 "[ENGINE]", "SEMAPHORE invlid", "ACQUIRE timeout", 77 "PRI forbidden", "ILLEGAL SYNCPT", "[NO_CTXSW_SEG]", 78 "PBSEG badsplit", "SIGNATURE bad" 79}; 80 81u32 gk20a_fifo_get_engine_ids(struct gk20a *g, 82 u32 engine_id[], u32 engine_id_sz, 83 u32 engine_enum) 84{ 85 struct fifo_gk20a *f = NULL; 86 u32 instance_cnt = 0; 87 u32 engine_id_idx; 88 u32 active_engine_id = 0; 89 struct fifo_engine_info_gk20a *info = NULL; 90 91 if (g && engine_id_sz && (engine_enum < ENGINE_INVAL_GK20A)) { 92 f = &g->fifo; 93 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { 94 active_engine_id = f->active_engines_list[engine_id_idx]; 95 info = &f->engine_info[active_engine_id]; 96 97 if (info->engine_enum == engine_enum) { 98 if (instance_cnt < engine_id_sz) { 99 engine_id[instance_cnt] = active_engine_id; 100 ++instance_cnt; 101 } else { 102 nvgpu_log_info(g, "warning engine_id table sz is small %d", 103 engine_id_sz); 104 } 105 } 106 } 107 } 108 return instance_cnt; 109} 110 111struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 engine_id) 112{ 113 struct fifo_gk20a *f = NULL; 114 u32 engine_id_idx; 115 struct fifo_engine_info_gk20a *info = NULL; 116 117 if (!g) { 118 return info; 119 } 120 121 f = &g->fifo; 122 123 if (engine_id < f->max_engines) { 124 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { 125 if (engine_id == f->active_engines_list[engine_id_idx]) { 126 info = &f->engine_info[engine_id]; 127 break; 128 } 129 } 130 } 131 132 if (!info) { 133 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); 134 } 135 136 return info; 137} 138 139bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id) 140{ 141 struct fifo_gk20a *f = NULL; 142 u32 engine_id_idx; 143 bool valid = false; 144 145 if (!g) { 146 return valid; 147 } 148 149 f = &g->fifo; 150 151 if (engine_id < f->max_engines) { 152 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { 153 if (engine_id == f->active_engines_list[engine_id_idx]) { 154 valid = true; 155 break; 156 } 157 } 158 } 159 160 if (!valid) { 161 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); 162 } 163 164 return valid; 165} 166 167u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g) 168{ 169 u32 gr_engine_cnt = 0; 170 u32 gr_engine_id = FIFO_INVAL_ENGINE_ID; 171 172 /* Consider 1st available GR engine */ 173 gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id, 174 1, ENGINE_GR_GK20A); 175 176 if (!gr_engine_cnt) { 177 nvgpu_err(g, "No GR engine available on this device!"); 178 } 179 180 return gr_engine_id; 181} 182 183u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g) 184{ 185 u32 reset_mask = 0; 186 u32 engine_enum = ENGINE_INVAL_GK20A; 187 struct fifo_gk20a *f = NULL; 188 u32 engine_id_idx; 189 struct fifo_engine_info_gk20a *engine_info; 190 u32 active_engine_id = 0; 191 192 if (!g) { 193 return reset_mask; 194 } 195 196 f = &g->fifo; 197 198 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { 199 active_engine_id = f->active_engines_list[engine_id_idx]; 200 engine_info = &f->engine_info[active_engine_id]; 201 engine_enum = engine_info->engine_enum; 202 203 if ((engine_enum == ENGINE_GRCE_GK20A) || 204 (engine_enum == ENGINE_ASYNC_CE_GK20A)) { 205 reset_mask |= engine_info->reset_mask; 206 } 207 } 208 209 return reset_mask; 210} 211 212u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g) 213{ 214 u32 ce_runlist_id = gk20a_fifo_get_gr_runlist_id(g); 215 u32 engine_enum = ENGINE_INVAL_GK20A; 216 struct fifo_gk20a *f = NULL; 217 u32 engine_id_idx; 218 struct fifo_engine_info_gk20a *engine_info; 219 u32 active_engine_id = 0; 220 221 if (!g) { 222 return ce_runlist_id; 223 } 224 225 f = &g->fifo; 226 227 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { 228 active_engine_id = f->active_engines_list[engine_id_idx]; 229 engine_info = &f->engine_info[active_engine_id]; 230 engine_enum = engine_info->engine_enum; 231 232 /* selecet last available ASYNC_CE if available */ 233 if (engine_enum == ENGINE_ASYNC_CE_GK20A) { 234 ce_runlist_id = engine_info->runlist_id; 235 } 236 } 237 238 return ce_runlist_id; 239} 240 241u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g) 242{ 243 u32 gr_engine_cnt = 0; 244 u32 gr_engine_id = FIFO_INVAL_ENGINE_ID; 245 struct fifo_engine_info_gk20a *engine_info; 246 u32 gr_runlist_id = ~0; 247 248 /* Consider 1st available GR engine */ 249 gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id, 250 1, ENGINE_GR_GK20A); 251 252 if (!gr_engine_cnt) { 253 nvgpu_err(g, 254 "No GR engine available on this device!"); 255 goto end; 256 } 257 258 engine_info = gk20a_fifo_get_engine_info(g, gr_engine_id); 259 260 if (engine_info) { 261 gr_runlist_id = engine_info->runlist_id; 262 } else { 263 nvgpu_err(g, 264 "gr_engine_id is not in active list/invalid %d", gr_engine_id); 265 } 266 267end: 268 return gr_runlist_id; 269} 270 271bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id) 272{ 273 struct fifo_gk20a *f = NULL; 274 u32 engine_id_idx; 275 u32 active_engine_id; 276 struct fifo_engine_info_gk20a *engine_info; 277 278 if (!g) { 279 return false; 280 } 281 282 f = &g->fifo; 283 284 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { 285 active_engine_id = f->active_engines_list[engine_id_idx]; 286 engine_info = gk20a_fifo_get_engine_info(g, active_engine_id); 287 if (engine_info && (engine_info->runlist_id == runlist_id)) { 288 return true; 289 } 290 } 291 292 return false; 293} 294 295/* 296 * Link engine IDs to MMU IDs and vice versa. 297 */ 298 299static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) 300{ 301 u32 fault_id = FIFO_INVAL_ENGINE_ID; 302 struct fifo_engine_info_gk20a *engine_info; 303 304 engine_info = gk20a_fifo_get_engine_info(g, engine_id); 305 306 if (engine_info) { 307 fault_id = engine_info->fault_id; 308 } else { 309 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); 310 } 311 return fault_id; 312} 313 314static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id) 315{ 316 u32 engine_id; 317 u32 active_engine_id; 318 struct fifo_engine_info_gk20a *engine_info; 319 struct fifo_gk20a *f = &g->fifo; 320 321 for (engine_id = 0; engine_id < f->num_engines; engine_id++) { 322 active_engine_id = f->active_engines_list[engine_id]; 323 engine_info = &g->fifo.engine_info[active_engine_id]; 324 325 if (engine_info->fault_id == fault_id) { 326 break; 327 } 328 active_engine_id = FIFO_INVAL_ENGINE_ID; 329 } 330 return active_engine_id; 331} 332 333int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, 334 u32 *inst_id) 335{ 336 int ret = ENGINE_INVAL_GK20A; 337 338 nvgpu_log_info(g, "engine type %d", engine_type); 339 if (engine_type == top_device_info_type_enum_graphics_v()) { 340 ret = ENGINE_GR_GK20A; 341 } else if ((engine_type >= top_device_info_type_enum_copy0_v()) && 342 (engine_type <= top_device_info_type_enum_copy2_v())) { 343 /* Lets consider all the CE engine have separate runlist at this point 344 * We can identify the ENGINE_GRCE_GK20A type CE using runlist_id 345 * comparsion logic with GR runlist_id in init_engine_info() */ 346 ret = ENGINE_ASYNC_CE_GK20A; 347 /* inst_id starts from CE0 to CE2 */ 348 if (inst_id) { 349 *inst_id = (engine_type - top_device_info_type_enum_copy0_v()); 350 } 351 } 352 353 return ret; 354} 355 356int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) 357{ 358 struct gk20a *g = f->g; 359 u32 i; 360 u32 max_info_entries = top_device_info__size_1_v(); 361 u32 engine_enum = ENGINE_INVAL_GK20A; 362 u32 engine_id = FIFO_INVAL_ENGINE_ID; 363 u32 runlist_id = ~0; 364 u32 pbdma_id = ~0; 365 u32 intr_id = ~0; 366 u32 reset_id = ~0; 367 u32 inst_id = 0; 368 u32 pri_base = 0; 369 u32 fault_id = 0; 370 u32 gr_runlist_id = ~0; 371 bool found_pbdma_for_runlist = false; 372 373 nvgpu_log_fn(g, " "); 374 375 f->num_engines = 0; 376 377 for (i = 0; i < max_info_entries; i++) { 378 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i)); 379 u32 entry = top_device_info_entry_v(table_entry); 380 u32 runlist_bit; 381 382 if (entry == top_device_info_entry_enum_v()) { 383 if (top_device_info_engine_v(table_entry)) { 384 engine_id = 385 top_device_info_engine_enum_v(table_entry); 386 nvgpu_log_info(g, "info: engine_id %d", 387 top_device_info_engine_enum_v(table_entry)); 388 } 389 390 391 if (top_device_info_runlist_v(table_entry)) { 392 runlist_id = 393 top_device_info_runlist_enum_v(table_entry); 394 nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id); 395 396 runlist_bit = BIT(runlist_id); 397 398 found_pbdma_for_runlist = false; 399 for (pbdma_id = 0; pbdma_id < f->num_pbdma; 400 pbdma_id++) { 401 if (f->pbdma_map[pbdma_id] & 402 runlist_bit) { 403 nvgpu_log_info(g, 404 "gr info: pbdma_map[%d]=%d", 405 pbdma_id, 406 f->pbdma_map[pbdma_id]); 407 found_pbdma_for_runlist = true; 408 break; 409 } 410 } 411 412 if (!found_pbdma_for_runlist) { 413 nvgpu_err(g, "busted pbdma map"); 414 return -EINVAL; 415 } 416 } 417 418 if (top_device_info_intr_v(table_entry)) { 419 intr_id = 420 top_device_info_intr_enum_v(table_entry); 421 nvgpu_log_info(g, "gr info: intr_id %d", intr_id); 422 } 423 424 if (top_device_info_reset_v(table_entry)) { 425 reset_id = 426 top_device_info_reset_enum_v(table_entry); 427 nvgpu_log_info(g, "gr info: reset_id %d", 428 reset_id); 429 } 430 } else if (entry == top_device_info_entry_engine_type_v()) { 431 u32 engine_type = 432 top_device_info_type_enum_v(table_entry); 433 engine_enum = 434 g->ops.fifo.engine_enum_from_type(g, 435 engine_type, &inst_id); 436 } else if (entry == top_device_info_entry_data_v()) { 437 /* gk20a doesn't support device_info_data packet parsing */ 438 if (g->ops.fifo.device_info_data_parse) { 439 g->ops.fifo.device_info_data_parse(g, 440 table_entry, &inst_id, &pri_base, 441 &fault_id); 442 } 443 } 444 445 if (!top_device_info_chain_v(table_entry)) { 446 if (engine_enum < ENGINE_INVAL_GK20A) { 447 struct fifo_engine_info_gk20a *info = 448 &g->fifo.engine_info[engine_id]; 449 450 info->intr_mask |= BIT(intr_id); 451 info->reset_mask |= BIT(reset_id); 452 info->runlist_id = runlist_id; 453 info->pbdma_id = pbdma_id; 454 info->inst_id = inst_id; 455 info->pri_base = pri_base; 456 457 if (engine_enum == ENGINE_GR_GK20A) { 458 gr_runlist_id = runlist_id; 459 } 460 461 /* GR and GR_COPY shares same runlist_id */ 462 if ((engine_enum == ENGINE_ASYNC_CE_GK20A) && 463 (gr_runlist_id == runlist_id)) { 464 engine_enum = ENGINE_GRCE_GK20A; 465 } 466 467 info->engine_enum = engine_enum; 468 469 if (!fault_id && (engine_enum == ENGINE_GRCE_GK20A)) { 470 fault_id = 0x1b; 471 } 472 info->fault_id = fault_id; 473 474 /* engine_id starts from 0 to NV_HOST_NUM_ENGINES */ 475 f->active_engines_list[f->num_engines] = engine_id; 476 477 ++f->num_engines; 478 479 engine_enum = ENGINE_INVAL_GK20A; 480 } 481 } 482 } 483 484 return 0; 485} 486 487u32 gk20a_fifo_act_eng_interrupt_mask(struct gk20a *g, u32 act_eng_id) 488{ 489 struct fifo_engine_info_gk20a *engine_info = NULL; 490 491 engine_info = gk20a_fifo_get_engine_info(g, act_eng_id); 492 if (engine_info) { 493 return engine_info->intr_mask; 494 } 495 496 return 0; 497} 498 499u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) 500{ 501 u32 eng_intr_mask = 0; 502 unsigned int i; 503 u32 active_engine_id = 0; 504 u32 engine_enum = ENGINE_INVAL_GK20A; 505 506 for (i = 0; i < g->fifo.num_engines; i++) { 507 u32 intr_mask; 508 active_engine_id = g->fifo.active_engines_list[i]; 509 intr_mask = g->fifo.engine_info[active_engine_id].intr_mask; 510 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 511 if (((engine_enum == ENGINE_GRCE_GK20A) || 512 (engine_enum == ENGINE_ASYNC_CE_GK20A)) && 513 (!g->ops.ce2.isr_stall || !g->ops.ce2.isr_nonstall)) { 514 continue; 515 } 516 517 eng_intr_mask |= intr_mask; 518 } 519 520 return eng_intr_mask; 521} 522 523void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) 524{ 525 u32 i; 526 u32 runlist_id; 527 struct fifo_runlist_info_gk20a *runlist; 528 struct gk20a *g = NULL; 529 530 if (!f || !f->runlist_info) { 531 return; 532 } 533 534 g = f->g; 535 536 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { 537 runlist = &f->runlist_info[runlist_id]; 538 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 539 nvgpu_dma_free(g, &runlist->mem[i]); 540 } 541 542 nvgpu_kfree(g, runlist->active_channels); 543 runlist->active_channels = NULL; 544 545 nvgpu_kfree(g, runlist->active_tsgs); 546 runlist->active_tsgs = NULL; 547 548 nvgpu_mutex_destroy(&runlist->runlist_lock); 549 550 } 551 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * 552 f->max_runlists)); 553 554 nvgpu_kfree(g, f->runlist_info); 555 f->runlist_info = NULL; 556 f->max_runlists = 0; 557} 558 559static void gk20a_remove_fifo_support(struct fifo_gk20a *f) 560{ 561 struct gk20a *g = f->g; 562 unsigned int i = 0; 563 564 nvgpu_log_fn(g, " "); 565 566 nvgpu_channel_worker_deinit(g); 567 /* 568 * Make sure all channels are closed before deleting them. 569 */ 570 for (; i < f->num_channels; i++) { 571 struct channel_gk20a *c = f->channel + i; 572 struct tsg_gk20a *tsg = f->tsg + i; 573 574 /* 575 * Could race but worst that happens is we get an error message 576 * from gk20a_free_channel() complaining about multiple closes. 577 */ 578 if (c->referenceable) { 579 __gk20a_channel_kill(c); 580 } 581 582 nvgpu_mutex_destroy(&tsg->event_id_list_lock); 583 584 nvgpu_mutex_destroy(&c->ioctl_lock); 585 nvgpu_mutex_destroy(&c->joblist.cleanup_lock); 586 nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); 587 nvgpu_mutex_destroy(&c->sync_lock); 588#if defined(CONFIG_GK20A_CYCLE_STATS) 589 nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); 590 nvgpu_mutex_destroy(&c->cs_client_mutex); 591#endif 592 nvgpu_mutex_destroy(&c->dbg_s_lock); 593 594 } 595 596 nvgpu_vfree(g, f->channel); 597 nvgpu_vfree(g, f->tsg); 598 if (g->ops.mm.is_bar1_supported(g)) { 599 nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); 600 } else { 601 nvgpu_dma_free(g, &f->userd); 602 } 603 604 gk20a_fifo_delete_runlist(f); 605 606 nvgpu_kfree(g, f->pbdma_map); 607 f->pbdma_map = NULL; 608 nvgpu_kfree(g, f->engine_info); 609 f->engine_info = NULL; 610 nvgpu_kfree(g, f->active_engines_list); 611 f->active_engines_list = NULL; 612} 613 614/* reads info from hardware and fills in pbmda exception info record */ 615static inline void get_exception_pbdma_info( 616 struct gk20a *g, 617 struct fifo_engine_info_gk20a *eng_info) 618{ 619 struct fifo_pbdma_exception_info_gk20a *e = 620 &eng_info->pbdma_exception_info; 621 622 u32 pbdma_status_r = e->status_r = gk20a_readl(g, 623 fifo_pbdma_status_r(eng_info->pbdma_id)); 624 e->id = fifo_pbdma_status_id_v(pbdma_status_r); /* vs. id_hw_v()? */ 625 e->id_is_chid = fifo_pbdma_status_id_type_v(pbdma_status_r) == 626 fifo_pbdma_status_id_type_chid_v(); 627 e->chan_status_v = fifo_pbdma_status_chan_status_v(pbdma_status_r); 628 e->next_id_is_chid = 629 fifo_pbdma_status_next_id_type_v(pbdma_status_r) == 630 fifo_pbdma_status_next_id_type_chid_v(); 631 e->next_id = fifo_pbdma_status_next_id_v(pbdma_status_r); 632 e->chsw_in_progress = 633 fifo_pbdma_status_chsw_v(pbdma_status_r) == 634 fifo_pbdma_status_chsw_in_progress_v(); 635} 636 637static void fifo_pbdma_exception_status(struct gk20a *g, 638 struct fifo_engine_info_gk20a *eng_info) 639{ 640 struct fifo_pbdma_exception_info_gk20a *e; 641 get_exception_pbdma_info(g, eng_info); 642 e = &eng_info->pbdma_exception_info; 643 644 nvgpu_log_fn(g, "pbdma_id %d, " 645 "id_type %s, id %d, chan_status %d, " 646 "next_id_type %s, next_id %d, " 647 "chsw_in_progress %d", 648 eng_info->pbdma_id, 649 e->id_is_chid ? "chid" : "tsgid", e->id, e->chan_status_v, 650 e->next_id_is_chid ? "chid" : "tsgid", e->next_id, 651 e->chsw_in_progress); 652} 653 654/* reads info from hardware and fills in pbmda exception info record */ 655static inline void get_exception_engine_info( 656 struct gk20a *g, 657 struct fifo_engine_info_gk20a *eng_info) 658{ 659 struct fifo_engine_exception_info_gk20a *e = 660 &eng_info->engine_exception_info; 661 u32 engine_status_r = e->status_r = 662 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); 663 e->id = fifo_engine_status_id_v(engine_status_r); /* vs. id_hw_v()? */ 664 e->id_is_chid = fifo_engine_status_id_type_v(engine_status_r) == 665 fifo_engine_status_id_type_chid_v(); 666 e->ctx_status_v = fifo_engine_status_ctx_status_v(engine_status_r); 667 e->faulted = 668 fifo_engine_status_faulted_v(engine_status_r) == 669 fifo_engine_status_faulted_true_v(); 670 e->idle = 671 fifo_engine_status_engine_v(engine_status_r) == 672 fifo_engine_status_engine_idle_v(); 673 e->ctxsw_in_progress = 674 fifo_engine_status_ctxsw_v(engine_status_r) == 675 fifo_engine_status_ctxsw_in_progress_v(); 676} 677 678static void fifo_engine_exception_status(struct gk20a *g, 679 struct fifo_engine_info_gk20a *eng_info) 680{ 681 struct fifo_engine_exception_info_gk20a *e; 682 get_exception_engine_info(g, eng_info); 683 e = &eng_info->engine_exception_info; 684 685 nvgpu_log_fn(g, "engine_id %d, id_type %s, id %d, ctx_status %d, " 686 "faulted %d, idle %d, ctxsw_in_progress %d, ", 687 eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid", 688 e->id, e->ctx_status_v, 689 e->faulted, e->idle, e->ctxsw_in_progress); 690} 691 692static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) 693{ 694 struct fifo_runlist_info_gk20a *runlist; 695 struct fifo_engine_info_gk20a *engine_info; 696 unsigned int runlist_id; 697 u32 i; 698 size_t runlist_size; 699 u32 active_engine_id, pbdma_id, engine_id; 700 int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ? 701 NVGPU_DMA_FORCE_CONTIGUOUS : 0; 702 int err = 0; 703 704 nvgpu_log_fn(g, " "); 705 706 f->max_runlists = g->ops.fifo.eng_runlist_base_size(); 707 f->runlist_info = nvgpu_kzalloc(g, 708 sizeof(struct fifo_runlist_info_gk20a) * 709 f->max_runlists); 710 if (!f->runlist_info) { 711 goto clean_up_runlist; 712 } 713 714 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * 715 f->max_runlists)); 716 717 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { 718 runlist = &f->runlist_info[runlist_id]; 719 720 runlist->active_channels = 721 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, 722 BITS_PER_BYTE)); 723 if (!runlist->active_channels) { 724 goto clean_up_runlist; 725 } 726 727 runlist->active_tsgs = 728 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, 729 BITS_PER_BYTE)); 730 if (!runlist->active_tsgs) { 731 goto clean_up_runlist; 732 } 733 734 runlist_size = f->runlist_entry_size * f->num_runlist_entries; 735 nvgpu_log(g, gpu_dbg_info, 736 "runlist_entries %d runlist size %zu", 737 f->num_runlist_entries, runlist_size); 738 739 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 740 err = nvgpu_dma_alloc_flags_sys(g, flags, 741 runlist_size, 742 &runlist->mem[i]); 743 if (err) { 744 nvgpu_err(g, "memory allocation failed"); 745 goto clean_up_runlist; 746 } 747 } 748 749 err = nvgpu_mutex_init(&runlist->runlist_lock); 750 if (err != 0) { 751 nvgpu_err(g, 752 "Error in runlist_lock mutex initialization"); 753 goto clean_up_runlist; 754 } 755 756 /* None of buffers is pinned if this value doesn't change. 757 Otherwise, one of them (cur_buffer) must have been pinned. */ 758 runlist->cur_buffer = MAX_RUNLIST_BUFFERS; 759 760 for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) { 761 if (f->pbdma_map[pbdma_id] & BIT(runlist_id)) { 762 runlist->pbdma_bitmask |= BIT(pbdma_id); 763 } 764 } 765 nvgpu_log(g, gpu_dbg_info, "runlist %d : pbdma bitmask 0x%x", 766 runlist_id, runlist->pbdma_bitmask); 767 768 for (engine_id = 0; engine_id < f->num_engines; ++engine_id) { 769 active_engine_id = f->active_engines_list[engine_id]; 770 engine_info = &f->engine_info[active_engine_id]; 771 772 if (engine_info && engine_info->runlist_id == runlist_id) { 773 runlist->eng_bitmask |= BIT(active_engine_id); 774 } 775 } 776 nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x", 777 runlist_id, runlist->eng_bitmask); 778 } 779 780 nvgpu_log_fn(g, "done"); 781 return 0; 782 783clean_up_runlist: 784 gk20a_fifo_delete_runlist(f); 785 nvgpu_log_fn(g, "fail"); 786 return err; 787} 788 789u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g) 790{ 791 u32 intr_0_error_mask = 792 fifo_intr_0_bind_error_pending_f() | 793 fifo_intr_0_sched_error_pending_f() | 794 fifo_intr_0_chsw_error_pending_f() | 795 fifo_intr_0_fb_flush_timeout_pending_f() | 796 fifo_intr_0_dropped_mmu_fault_pending_f() | 797 fifo_intr_0_mmu_fault_pending_f() | 798 fifo_intr_0_lb_error_pending_f() | 799 fifo_intr_0_pio_error_pending_f(); 800 801 return intr_0_error_mask; 802} 803 804static u32 gk20a_fifo_intr_0_en_mask(struct gk20a *g) 805{ 806 u32 intr_0_en_mask; 807 808 intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g); 809 810 intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() | 811 fifo_intr_0_pbdma_intr_pending_f(); 812 813 return intr_0_en_mask; 814} 815 816int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) 817{ 818 u32 intr_stall; 819 u32 mask; 820 u32 timeout; 821 unsigned int i; 822 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 823 824 nvgpu_log_fn(g, " "); 825 826 /* enable pmc pfifo */ 827 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_FIFO)); 828 829 nvgpu_cg_slcg_fifo_load_enable(g); 830 831 nvgpu_cg_blcg_fifo_load_enable(g); 832 833 timeout = gk20a_readl(g, fifo_fb_timeout_r()); 834 timeout = set_field(timeout, fifo_fb_timeout_period_m(), 835 fifo_fb_timeout_period_max_f()); 836 nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); 837 gk20a_writel(g, fifo_fb_timeout_r(), timeout); 838 839 /* write pbdma timeout value */ 840 for (i = 0; i < host_num_pbdma; i++) { 841 timeout = gk20a_readl(g, pbdma_timeout_r(i)); 842 timeout = set_field(timeout, pbdma_timeout_period_m(), 843 pbdma_timeout_period_max_f()); 844 nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", timeout); 845 gk20a_writel(g, pbdma_timeout_r(i), timeout); 846 } 847 if (g->ops.fifo.apply_pb_timeout) { 848 g->ops.fifo.apply_pb_timeout(g); 849 } 850 851 if (g->ops.fifo.apply_ctxsw_timeout_intr) { 852 g->ops.fifo.apply_ctxsw_timeout_intr(g); 853 } else { 854 timeout = g->fifo_eng_timeout_us; 855 timeout = scale_ptimer(timeout, 856 ptimer_scalingfactor10x(g->ptimer_src_freq)); 857 timeout |= fifo_eng_timeout_detection_enabled_f(); 858 gk20a_writel(g, fifo_eng_timeout_r(), timeout); 859 } 860 861 /* clear and enable pbdma interrupt */ 862 for (i = 0; i < host_num_pbdma; i++) { 863 gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF); 864 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); 865 866 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); 867 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f(); 868 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall); 869 nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); 870 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); 871 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); 872 /* 873 * For bug 2082123 874 * Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt. 875 */ 876 intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f(); 877 nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); 878 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); 879 } 880 881 /* reset runlist interrupts */ 882 gk20a_writel(g, fifo_intr_runlist_r(), ~0); 883 884 /* clear and enable pfifo interrupt */ 885 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); 886 mask = gk20a_fifo_intr_0_en_mask(g); 887 nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask); 888 gk20a_writel(g, fifo_intr_en_0_r(), mask); 889 nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000"); 890 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); 891 892 nvgpu_log_fn(g, "done"); 893 894 return 0; 895} 896 897int gk20a_init_fifo_setup_sw_common(struct gk20a *g) 898{ 899 struct fifo_gk20a *f = &g->fifo; 900 unsigned int chid, i; 901 int err = 0; 902 903 nvgpu_log_fn(g, " "); 904 905 f->g = g; 906 907 err = nvgpu_mutex_init(&f->intr.isr.mutex); 908 if (err) { 909 nvgpu_err(g, "failed to init isr.mutex"); 910 return err; 911 } 912 913 err = nvgpu_mutex_init(&f->engines_reset_mutex); 914 if (err) { 915 nvgpu_err(g, "failed to init engines_reset_mutex"); 916 return err; 917 } 918 919 g->ops.fifo.init_pbdma_intr_descs(f); /* just filling in data/tables */ 920 921 f->num_channels = g->ops.fifo.get_num_fifos(g); 922 f->runlist_entry_size = g->ops.fifo.runlist_entry_size(); 923 f->num_runlist_entries = fifo_eng_runlist_length_max_v(); 924 f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 925 f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); 926 927 f->userd_entry_size = 1 << ram_userd_base_shift_v(); 928 929 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); 930 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); 931 f->pbdma_map = nvgpu_kzalloc(g, f->num_pbdma * sizeof(*f->pbdma_map)); 932 f->engine_info = nvgpu_kzalloc(g, f->max_engines * 933 sizeof(*f->engine_info)); 934 f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32)); 935 936 if (!(f->channel && f->tsg && f->pbdma_map && f->engine_info && 937 f->active_engines_list)) { 938 err = -ENOMEM; 939 goto clean_up; 940 } 941 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32))); 942 943 /* pbdma map needs to be in place before calling engine info init */ 944 for (i = 0; i < f->num_pbdma; ++i) { 945 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i)); 946 } 947 948 g->ops.fifo.init_engine_info(f); 949 950 err = init_runlist(g, f); 951 if (err) { 952 nvgpu_err(g, "failed to init runlist"); 953 goto clean_up; 954 } 955 956 nvgpu_init_list_node(&f->free_chs); 957 958 err = nvgpu_mutex_init(&f->free_chs_mutex); 959 if (err) { 960 nvgpu_err(g, "failed to init free_chs_mutex"); 961 goto clean_up; 962 } 963 964 for (chid = 0; chid < f->num_channels; chid++) { 965 gk20a_init_channel_support(g, chid); 966 gk20a_init_tsg_support(g, chid); 967 } 968 969 err = nvgpu_mutex_init(&f->tsg_inuse_mutex); 970 if (err) { 971 nvgpu_err(g, "failed to init tsg_inuse_mutex"); 972 goto clean_up; 973 } 974 975 f->remove_support = gk20a_remove_fifo_support; 976 977 f->deferred_reset_pending = false; 978 979 err = nvgpu_mutex_init(&f->deferred_reset_mutex); 980 if (err) { 981 nvgpu_err(g, "failed to init deferred_reset_mutex"); 982 goto clean_up; 983 } 984 985 nvgpu_log_fn(g, "done"); 986 return 0; 987 988clean_up: 989 nvgpu_err(g, "fail"); 990 991 nvgpu_vfree(g, f->channel); 992 f->channel = NULL; 993 nvgpu_vfree(g, f->tsg); 994 f->tsg = NULL; 995 nvgpu_kfree(g, f->pbdma_map); 996 f->pbdma_map = NULL; 997 nvgpu_kfree(g, f->engine_info); 998 f->engine_info = NULL; 999 nvgpu_kfree(g, f->active_engines_list); 1000 f->active_engines_list = NULL; 1001 1002 return err; 1003} 1004 1005int gk20a_init_fifo_setup_sw(struct gk20a *g) 1006{ 1007 struct fifo_gk20a *f = &g->fifo; 1008 unsigned int chid; 1009 u64 userd_base; 1010 int err = 0; 1011 1012 nvgpu_log_fn(g, " "); 1013 1014 if (f->sw_ready) { 1015 nvgpu_log_fn(g, "skip init"); 1016 return 0; 1017 } 1018 1019 err = gk20a_init_fifo_setup_sw_common(g); 1020 if (err) { 1021 nvgpu_err(g, "fail: err: %d", err); 1022 return err; 1023 } 1024 1025 if (g->ops.mm.is_bar1_supported(g)) { 1026 err = nvgpu_dma_alloc_map_sys(g->mm.bar1.vm, 1027 f->userd_entry_size * f->num_channels, 1028 &f->userd); 1029 } else { 1030 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * 1031 f->num_channels, &f->userd); 1032 } 1033 if (err) { 1034 nvgpu_err(g, "userd memory allocation failed"); 1035 goto clean_up; 1036 } 1037 nvgpu_log(g, gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 1038 1039 userd_base = nvgpu_mem_get_addr(g, &f->userd); 1040 for (chid = 0; chid < f->num_channels; chid++) { 1041 f->channel[chid].userd_iova = userd_base + 1042 chid * f->userd_entry_size; 1043 f->channel[chid].userd_gpu_va = 1044 f->userd.gpu_va + chid * f->userd_entry_size; 1045 } 1046 1047 err = nvgpu_channel_worker_init(g); 1048 if (err) { 1049 goto clean_up; 1050 } 1051 1052 f->sw_ready = true; 1053 1054 nvgpu_log_fn(g, "done"); 1055 return 0; 1056 1057clean_up: 1058 nvgpu_log_fn(g, "fail"); 1059 if (nvgpu_mem_is_valid(&f->userd)) { 1060 if (g->ops.mm.is_bar1_supported(g)) { 1061 nvgpu_dma_unmap_free(g->mm.bar1.vm, &f->userd); 1062 } else { 1063 nvgpu_dma_free(g, &f->userd); 1064 } 1065 } 1066 1067 return err; 1068} 1069 1070void gk20a_fifo_handle_runlist_event(struct gk20a *g) 1071{ 1072 u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); 1073 1074 nvgpu_log(g, gpu_dbg_intr, "runlist event %08x", 1075 runlist_event); 1076 1077 gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); 1078} 1079 1080int gk20a_init_fifo_setup_hw(struct gk20a *g) 1081{ 1082 struct fifo_gk20a *f = &g->fifo; 1083 1084 nvgpu_log_fn(g, " "); 1085 1086 /* test write, read through bar1 @ userd region before 1087 * turning on the snooping */ 1088 { 1089 struct fifo_gk20a *f = &g->fifo; 1090 u32 v, v1 = 0x33, v2 = 0x55; 1091 1092 u32 bar1_vaddr = f->userd.gpu_va; 1093 volatile u32 *cpu_vaddr = f->userd.cpu_va; 1094 1095 nvgpu_log_info(g, "test bar1 @ vaddr 0x%x", 1096 bar1_vaddr); 1097 1098 v = gk20a_bar1_readl(g, bar1_vaddr); 1099 1100 *cpu_vaddr = v1; 1101 nvgpu_mb(); 1102 1103 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { 1104 nvgpu_err(g, "bar1 broken @ gk20a: CPU wrote 0x%x, \ 1105 GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr)); 1106 return -EINVAL; 1107 } 1108 1109 gk20a_bar1_writel(g, bar1_vaddr, v2); 1110 1111 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { 1112 nvgpu_err(g, "bar1 broken @ gk20a: GPU wrote 0x%x, \ 1113 CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr); 1114 return -EINVAL; 1115 } 1116 1117 /* is it visible to the cpu? */ 1118 if (*cpu_vaddr != v2) { 1119 nvgpu_err(g, 1120 "cpu didn't see bar1 write @ %p!", 1121 cpu_vaddr); 1122 } 1123 1124 /* put it back */ 1125 gk20a_bar1_writel(g, bar1_vaddr, v); 1126 } 1127 1128 /*XXX all manner of flushes and caching worries, etc */ 1129 1130 /* set the base for the userd region now */ 1131 gk20a_writel(g, fifo_bar1_base_r(), 1132 fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) | 1133 fifo_bar1_base_valid_true_f()); 1134 1135 nvgpu_log_fn(g, "done"); 1136 1137 return 0; 1138} 1139 1140int gk20a_init_fifo_support(struct gk20a *g) 1141{ 1142 u32 err; 1143 1144 err = g->ops.fifo.setup_sw(g); 1145 if (err) { 1146 return err; 1147 } 1148 1149 if (g->ops.fifo.init_fifo_setup_hw) { 1150 err = g->ops.fifo.init_fifo_setup_hw(g); 1151 } 1152 if (err) { 1153 return err; 1154 } 1155 1156 return err; 1157} 1158 1159/* return with a reference to the channel, caller must put it back */ 1160struct channel_gk20a * 1161gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr) 1162{ 1163 struct fifo_gk20a *f = &g->fifo; 1164 unsigned int ci; 1165 if (unlikely(!f->channel)) { 1166 return NULL; 1167 } 1168 for (ci = 0; ci < f->num_channels; ci++) { 1169 struct channel_gk20a *ch; 1170 u64 ch_inst_ptr; 1171 1172 ch = gk20a_channel_from_id(g, ci); 1173 /* only alive channels are searched */ 1174 if (!ch) { 1175 continue; 1176 } 1177 1178 ch_inst_ptr = nvgpu_inst_block_addr(g, &ch->inst_block); 1179 if (inst_ptr == ch_inst_ptr) { 1180 return ch; 1181 } 1182 1183 gk20a_channel_put(ch); 1184 } 1185 return NULL; 1186} 1187 1188/* fault info/descriptions. 1189 * tbd: move to setup 1190 * */ 1191static const char * const gk20a_fault_type_descs[] = { 1192 "pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */ 1193 "pde size", 1194 "pte", 1195 "va limit viol", 1196 "unbound inst", 1197 "priv viol", 1198 "ro viol", 1199 "wo viol", 1200 "pitch mask", 1201 "work creation", 1202 "bad aperture", 1203 "compression failure", 1204 "bad kind", 1205 "region viol", 1206 "dual ptes", 1207 "poisoned", 1208}; 1209/* engine descriptions */ 1210static const char * const engine_subid_descs[] = { 1211 "gpc", 1212 "hub", 1213}; 1214 1215static const char * const gk20a_hub_client_descs[] = { 1216 "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu", 1217 "host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld", 1218 "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc", 1219 "scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb", 1220 "msenc", "d falcon", "sked", "a falcon", "n/a", 1221}; 1222 1223static const char * const gk20a_gpc_client_descs[] = { 1224 "l1 0", "t1 0", "pe 0", 1225 "l1 1", "t1 1", "pe 1", 1226 "l1 2", "t1 2", "pe 2", 1227 "l1 3", "t1 3", "pe 3", 1228 "rast", "gcc", "gpccs", 1229 "prop 0", "prop 1", "prop 2", "prop 3", 1230 "l1 4", "t1 4", "pe 4", 1231 "l1 5", "t1 5", "pe 5", 1232 "l1 6", "t1 6", "pe 6", 1233 "l1 7", "t1 7", "pe 7", 1234}; 1235 1236static const char * const does_not_exist[] = { 1237 "does not exist" 1238}; 1239 1240/* fill in mmu fault desc */ 1241void gk20a_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault) 1242{ 1243 if (mmfault->fault_type >= ARRAY_SIZE(gk20a_fault_type_descs)) { 1244 WARN_ON(mmfault->fault_type >= 1245 ARRAY_SIZE(gk20a_fault_type_descs)); 1246 } else { 1247 mmfault->fault_type_desc = 1248 gk20a_fault_type_descs[mmfault->fault_type]; 1249 } 1250} 1251 1252/* fill in mmu fault client description */ 1253void gk20a_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault) 1254{ 1255 if (mmfault->client_id >= ARRAY_SIZE(gk20a_hub_client_descs)) { 1256 WARN_ON(mmfault->client_id >= 1257 ARRAY_SIZE(gk20a_hub_client_descs)); 1258 } else { 1259 mmfault->client_id_desc = 1260 gk20a_hub_client_descs[mmfault->client_id]; 1261 } 1262} 1263 1264/* fill in mmu fault gpc description */ 1265void gk20a_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault) 1266{ 1267 if (mmfault->client_id >= ARRAY_SIZE(gk20a_gpc_client_descs)) { 1268 WARN_ON(mmfault->client_id >= 1269 ARRAY_SIZE(gk20a_gpc_client_descs)); 1270 } else { 1271 mmfault->client_id_desc = 1272 gk20a_gpc_client_descs[mmfault->client_id]; 1273 } 1274} 1275 1276static void get_exception_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, 1277 struct mmu_fault_info *mmfault) 1278{ 1279 g->ops.fifo.get_mmu_fault_info(g, mmu_fault_id, mmfault); 1280 1281 /* parse info */ 1282 mmfault->fault_type_desc = does_not_exist[0]; 1283 if (g->ops.fifo.get_mmu_fault_desc) { 1284 g->ops.fifo.get_mmu_fault_desc(mmfault); 1285 } 1286 1287 if (mmfault->client_type >= ARRAY_SIZE(engine_subid_descs)) { 1288 WARN_ON(mmfault->client_type >= ARRAY_SIZE(engine_subid_descs)); 1289 mmfault->client_type_desc = does_not_exist[0]; 1290 } else { 1291 mmfault->client_type_desc = 1292 engine_subid_descs[mmfault->client_type]; 1293 } 1294 1295 mmfault->client_id_desc = does_not_exist[0]; 1296 if ((mmfault->client_type == 1297 fifo_intr_mmu_fault_info_engine_subid_hub_v()) 1298 && g->ops.fifo.get_mmu_fault_client_desc) { 1299 g->ops.fifo.get_mmu_fault_client_desc(mmfault); 1300 } else if ((mmfault->client_type == 1301 fifo_intr_mmu_fault_info_engine_subid_gpc_v()) 1302 && g->ops.fifo.get_mmu_fault_gpc_desc) { 1303 g->ops.fifo.get_mmu_fault_gpc_desc(mmfault); 1304 } 1305} 1306 1307/* reads info from hardware and fills in mmu fault info record */ 1308void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, 1309 struct mmu_fault_info *mmfault) 1310{ 1311 u32 fault_info; 1312 u32 addr_lo, addr_hi; 1313 1314 nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); 1315 1316 memset(mmfault, 0, sizeof(*mmfault)); 1317 1318 fault_info = gk20a_readl(g, 1319 fifo_intr_mmu_fault_info_r(mmu_fault_id)); 1320 mmfault->fault_type = 1321 fifo_intr_mmu_fault_info_type_v(fault_info); 1322 mmfault->access_type = 1323 fifo_intr_mmu_fault_info_write_v(fault_info); 1324 mmfault->client_type = 1325 fifo_intr_mmu_fault_info_engine_subid_v(fault_info); 1326 mmfault->client_id = 1327 fifo_intr_mmu_fault_info_client_v(fault_info); 1328 1329 addr_lo = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(mmu_fault_id)); 1330 addr_hi = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(mmu_fault_id)); 1331 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo); 1332 /* note:ignoring aperture on gk20a... */ 1333 mmfault->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v( 1334 gk20a_readl(g, fifo_intr_mmu_fault_inst_r(mmu_fault_id))); 1335 /* note: inst_ptr is a 40b phys addr. */ 1336 mmfault->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v(); 1337} 1338 1339void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) 1340{ 1341 u32 engine_enum = ENGINE_INVAL_GK20A; 1342 struct fifo_engine_info_gk20a *engine_info; 1343 1344 nvgpu_log_fn(g, " "); 1345 1346 if (!g) { 1347 return; 1348 } 1349 1350 engine_info = gk20a_fifo_get_engine_info(g, engine_id); 1351 1352 if (engine_info) { 1353 engine_enum = engine_info->engine_enum; 1354 } 1355 1356 if (engine_enum == ENGINE_INVAL_GK20A) { 1357 nvgpu_err(g, "unsupported engine_id %d", engine_id); 1358 } 1359 1360 if (engine_enum == ENGINE_GR_GK20A) { 1361 if (g->support_pmu) { 1362 if (nvgpu_pg_elpg_disable(g) != 0 ) { 1363 nvgpu_err(g, "failed to set disable elpg"); 1364 } 1365 } 1366 1367#ifdef CONFIG_GK20A_CTXSW_TRACE 1368 /* 1369 * Resetting engine will alter read/write index. Need to flush 1370 * circular buffer before re-enabling FECS. 1371 */ 1372 if (g->ops.fecs_trace.reset) 1373 g->ops.fecs_trace.reset(g); 1374#endif 1375 if (!nvgpu_platform_is_simulation(g)) { 1376 /*HALT_PIPELINE method, halt GR engine*/ 1377 if (gr_gk20a_halt_pipe(g)) { 1378 nvgpu_err(g, "failed to HALT gr pipe"); 1379 } 1380 /* 1381 * resetting engine using mc_enable_r() is not 1382 * enough, we do full init sequence 1383 */ 1384 nvgpu_log(g, gpu_dbg_info, "resetting gr engine"); 1385 gk20a_gr_reset(g); 1386 } else { 1387 nvgpu_log(g, gpu_dbg_info, 1388 "HALT gr pipe not supported and " 1389 "gr cannot be reset without halting gr pipe"); 1390 } 1391 if (g->support_pmu) { 1392 if (nvgpu_pg_elpg_enable(g) != 0 ) { 1393 nvgpu_err(g, "failed to set enable elpg"); 1394 } 1395 } 1396 } 1397 if ((engine_enum == ENGINE_GRCE_GK20A) || 1398 (engine_enum == ENGINE_ASYNC_CE_GK20A)) { 1399 g->ops.mc.reset(g, engine_info->reset_mask); 1400 } 1401} 1402 1403static void gk20a_fifo_handle_chsw_fault(struct gk20a *g) 1404{ 1405 u32 intr; 1406 1407 intr = gk20a_readl(g, fifo_intr_chsw_error_r()); 1408 nvgpu_err(g, "chsw: %08x", intr); 1409 gk20a_fecs_dump_falcon_stats(g); 1410 gk20a_gpccs_dump_falcon_stats(g); 1411 gk20a_writel(g, fifo_intr_chsw_error_r(), intr); 1412} 1413 1414static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g) 1415{ 1416 u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); 1417 nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id); 1418} 1419 1420bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) 1421{ 1422 return (engine_subid == fifo_intr_mmu_fault_info_engine_subid_gpc_v()); 1423} 1424 1425bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id, 1426 u32 engine_subid, bool fake_fault) 1427{ 1428 u32 engine_enum = ENGINE_INVAL_GK20A; 1429 struct fifo_engine_info_gk20a *engine_info; 1430 1431 if (!g) { 1432 return false; 1433 } 1434 1435 engine_info = gk20a_fifo_get_engine_info(g, engine_id); 1436 1437 if (engine_info) { 1438 engine_enum = engine_info->engine_enum; 1439 } 1440 1441 if (engine_enum == ENGINE_INVAL_GK20A) { 1442 return false; 1443 } 1444 1445 /* channel recovery is only deferred if an sm debugger 1446 is attached and has MMU debug mode is enabled */ 1447 if (!g->ops.gr.sm_debugger_attached(g) || 1448 !g->ops.fb.is_debug_mode_enabled(g)) { 1449 return false; 1450 } 1451 1452 /* if this fault is fake (due to RC recovery), don't defer recovery */ 1453 if (fake_fault) { 1454 return false; 1455 } 1456 1457 if (engine_enum != ENGINE_GR_GK20A) { 1458 return false; 1459 } 1460 1461 return g->ops.fifo.is_fault_engine_subid_gpc(g, engine_subid); 1462} 1463 1464/* caller must hold a channel reference */ 1465static bool gk20a_fifo_ch_timeout_debug_dump_state(struct gk20a *g, 1466 struct channel_gk20a *refch) 1467{ 1468 bool verbose = false; 1469 if (!refch) { 1470 return verbose; 1471 } 1472 1473 if (nvgpu_is_error_notifier_set(refch, 1474 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT)) { 1475 verbose = refch->timeout_debug_dump; 1476 } 1477 1478 return verbose; 1479} 1480 1481/* caller must hold a channel reference */ 1482static void gk20a_fifo_set_has_timedout_and_wake_up_wqs(struct gk20a *g, 1483 struct channel_gk20a *refch) 1484{ 1485 if (refch) { 1486 /* mark channel as faulted */ 1487 gk20a_channel_set_timedout(refch); 1488 1489 /* unblock pending waits */ 1490 nvgpu_cond_broadcast_interruptible(&refch->semaphore_wq); 1491 nvgpu_cond_broadcast_interruptible(&refch->notifier_wq); 1492 } 1493} 1494 1495/* caller must hold a channel reference */ 1496bool gk20a_fifo_error_ch(struct gk20a *g, 1497 struct channel_gk20a *refch) 1498{ 1499 bool verbose; 1500 1501 verbose = gk20a_fifo_ch_timeout_debug_dump_state(g, refch); 1502 gk20a_fifo_set_has_timedout_and_wake_up_wqs(g, refch); 1503 1504 return verbose; 1505} 1506 1507bool gk20a_fifo_error_tsg(struct gk20a *g, 1508 struct tsg_gk20a *tsg) 1509{ 1510 struct channel_gk20a *ch = NULL; 1511 bool verbose = false; 1512 1513 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1514 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 1515 if (gk20a_channel_get(ch)) { 1516 if (gk20a_fifo_error_ch(g, ch)) { 1517 verbose = true; 1518 } 1519 gk20a_channel_put(ch); 1520 } 1521 } 1522 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 1523 1524 return verbose; 1525 1526} 1527/* caller must hold a channel reference */ 1528void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, 1529 struct channel_gk20a *refch) 1530{ 1531 nvgpu_err(g, 1532 "channel %d generated a mmu fault", refch->chid); 1533 g->ops.fifo.set_error_notifier(refch, 1534 NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); 1535} 1536 1537void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, 1538 struct tsg_gk20a *tsg) 1539{ 1540 struct channel_gk20a *ch = NULL; 1541 1542 nvgpu_err(g, 1543 "TSG %d generated a mmu fault", tsg->tsgid); 1544 1545 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1546 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 1547 if (gk20a_channel_get(ch)) { 1548 gk20a_fifo_set_ctx_mmu_error_ch(g, ch); 1549 gk20a_channel_put(ch); 1550 } 1551 } 1552 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 1553 1554} 1555 1556void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt) 1557{ 1558 struct channel_gk20a *ch = NULL; 1559 1560 nvgpu_log_fn(g, " "); 1561 1562 g->ops.fifo.disable_tsg(tsg); 1563 1564 if (preempt) { 1565 g->ops.fifo.preempt_tsg(g, tsg); 1566 } 1567 1568 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1569 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 1570 if (gk20a_channel_get(ch)) { 1571 gk20a_channel_set_timedout(ch); 1572 if (ch->g->ops.fifo.ch_abort_clean_up) { 1573 ch->g->ops.fifo.ch_abort_clean_up(ch); 1574 } 1575 gk20a_channel_put(ch); 1576 } 1577 } 1578 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 1579} 1580 1581int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) 1582{ 1583 unsigned long engine_id, engines = 0U; 1584 struct tsg_gk20a *tsg; 1585 bool deferred_reset_pending; 1586 struct fifo_gk20a *f = &g->fifo; 1587 1588 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1589 1590 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 1591 deferred_reset_pending = g->fifo.deferred_reset_pending; 1592 nvgpu_mutex_release(&f->deferred_reset_mutex); 1593 1594 if (!deferred_reset_pending) { 1595 nvgpu_mutex_release(&g->dbg_sessions_lock); 1596 return 0; 1597 } 1598 1599 gr_gk20a_disable_ctxsw(g); 1600 1601 tsg = tsg_gk20a_from_ch(ch); 1602 if (tsg != NULL) { 1603 engines = gk20a_fifo_engines_on_id(g, tsg->tsgid, true); 1604 } else { 1605 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); 1606 engines = g->fifo.deferred_fault_engines; 1607 } 1608 1609 if (engines == 0U) { 1610 goto clean_up; 1611 } 1612 1613 /* 1614 * If deferred reset is set for an engine, and channel is running 1615 * on that engine, reset it 1616 */ 1617 for_each_set_bit(engine_id, &g->fifo.deferred_fault_engines, 32) { 1618 if (BIT(engine_id) & engines) { 1619 gk20a_fifo_reset_engine(g, engine_id); 1620 } 1621 } 1622 1623 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 1624 g->fifo.deferred_fault_engines = 0; 1625 g->fifo.deferred_reset_pending = false; 1626 nvgpu_mutex_release(&f->deferred_reset_mutex); 1627 1628clean_up: 1629 gr_gk20a_enable_ctxsw(g); 1630 nvgpu_mutex_release(&g->dbg_sessions_lock); 1631 1632 return 0; 1633} 1634 1635static bool gk20a_fifo_handle_mmu_fault_locked( 1636 struct gk20a *g, 1637 u32 mmu_fault_engines, /* queried from HW if 0 */ 1638 u32 hw_id, /* queried from HW if ~(u32)0 OR mmu_fault_engines == 0*/ 1639 bool id_is_tsg) 1640{ 1641 bool fake_fault; 1642 unsigned long fault_id; 1643 unsigned long engine_mmu_fault_id; 1644 bool verbose = true; 1645 u32 grfifo_ctl; 1646 1647 bool deferred_reset_pending = false; 1648 struct fifo_gk20a *f = &g->fifo; 1649 1650 nvgpu_log_fn(g, " "); 1651 1652 /* Disable power management */ 1653 if (g->support_pmu) { 1654 if (nvgpu_cg_pg_disable(g) != 0) { 1655 nvgpu_warn(g, "fail to disable power mgmt"); 1656 } 1657 } 1658 1659 /* Disable fifo access */ 1660 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r()); 1661 grfifo_ctl &= ~gr_gpfifo_ctl_semaphore_access_f(1); 1662 grfifo_ctl &= ~gr_gpfifo_ctl_access_f(1); 1663 1664 gk20a_writel(g, gr_gpfifo_ctl_r(), 1665 grfifo_ctl | gr_gpfifo_ctl_access_f(0) | 1666 gr_gpfifo_ctl_semaphore_access_f(0)); 1667 1668 if (mmu_fault_engines) { 1669 fault_id = mmu_fault_engines; 1670 fake_fault = true; 1671 } else { 1672 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); 1673 fake_fault = false; 1674 gk20a_debug_dump(g); 1675 } 1676 1677 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 1678 g->fifo.deferred_reset_pending = false; 1679 nvgpu_mutex_release(&f->deferred_reset_mutex); 1680 1681 /* go through all faulted engines */ 1682 for_each_set_bit(engine_mmu_fault_id, &fault_id, 32) { 1683 /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to 1684 * engines. Convert engine_mmu_id to engine_id */ 1685 u32 engine_id = gk20a_mmu_id_to_engine_id(g, 1686 engine_mmu_fault_id); 1687 struct mmu_fault_info mmfault_info; 1688 struct channel_gk20a *ch = NULL; 1689 struct tsg_gk20a *tsg = NULL; 1690 struct channel_gk20a *refch = NULL; 1691 /* read and parse engine status */ 1692 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id)); 1693 u32 ctx_status = fifo_engine_status_ctx_status_v(status); 1694 bool ctxsw = (ctx_status == 1695 fifo_engine_status_ctx_status_ctxsw_switch_v() 1696 || ctx_status == 1697 fifo_engine_status_ctx_status_ctxsw_save_v() 1698 || ctx_status == 1699 fifo_engine_status_ctx_status_ctxsw_load_v()); 1700 1701 get_exception_mmu_fault_info(g, engine_mmu_fault_id, 1702 &mmfault_info); 1703 trace_gk20a_mmu_fault(mmfault_info.fault_addr, 1704 mmfault_info.fault_type, 1705 mmfault_info.access_type, 1706 mmfault_info.inst_ptr, 1707 engine_id, 1708 mmfault_info.client_type_desc, 1709 mmfault_info.client_id_desc, 1710 mmfault_info.fault_type_desc); 1711 nvgpu_err(g, "%s mmu fault on engine %d, " 1712 "engine subid %d (%s), client %d (%s), " 1713 "addr 0x%llx, type %d (%s), access_type 0x%08x," 1714 "inst_ptr 0x%llx", 1715 fake_fault ? "fake" : "", 1716 engine_id, 1717 mmfault_info.client_type, 1718 mmfault_info.client_type_desc, 1719 mmfault_info.client_id, mmfault_info.client_id_desc, 1720 mmfault_info.fault_addr, 1721 mmfault_info.fault_type, 1722 mmfault_info.fault_type_desc, 1723 mmfault_info.access_type, mmfault_info.inst_ptr); 1724 1725 if (ctxsw) { 1726 gk20a_fecs_dump_falcon_stats(g); 1727 gk20a_gpccs_dump_falcon_stats(g); 1728 nvgpu_err(g, "gr_status_r : 0x%x", 1729 gk20a_readl(g, gr_status_r())); 1730 } 1731 1732 /* get the channel/TSG */ 1733 if (fake_fault) { 1734 /* use next_id if context load is failing */ 1735 u32 id, type; 1736 1737 if (hw_id == ~(u32)0) { 1738 id = (ctx_status == 1739 fifo_engine_status_ctx_status_ctxsw_load_v()) ? 1740 fifo_engine_status_next_id_v(status) : 1741 fifo_engine_status_id_v(status); 1742 type = (ctx_status == 1743 fifo_engine_status_ctx_status_ctxsw_load_v()) ? 1744 fifo_engine_status_next_id_type_v(status) : 1745 fifo_engine_status_id_type_v(status); 1746 } else { 1747 id = hw_id; 1748 type = id_is_tsg ? 1749 fifo_engine_status_id_type_tsgid_v() : 1750 fifo_engine_status_id_type_chid_v(); 1751 } 1752 1753 if (type == fifo_engine_status_id_type_tsgid_v()) { 1754 tsg = &g->fifo.tsg[id]; 1755 } else if (type == fifo_engine_status_id_type_chid_v()) { 1756 ch = &g->fifo.channel[id]; 1757 refch = gk20a_channel_get(ch); 1758 if (refch != NULL) { 1759 tsg = tsg_gk20a_from_ch(refch); 1760 } 1761 } 1762 } else { 1763 /* read channel based on instruction pointer */ 1764 ch = gk20a_refch_from_inst_ptr(g, 1765 mmfault_info.inst_ptr); 1766 refch = ch; 1767 if (refch != NULL) { 1768 tsg = tsg_gk20a_from_ch(refch); 1769 } 1770 } 1771 1772 /* check if engine reset should be deferred */ 1773 if (engine_id != FIFO_INVAL_ENGINE_ID) { 1774 bool defer = gk20a_fifo_should_defer_engine_reset(g, 1775 engine_id, mmfault_info.client_type, 1776 fake_fault); 1777 if ((ch || tsg) && defer) { 1778 g->fifo.deferred_fault_engines |= BIT(engine_id); 1779 1780 /* handled during channel free */ 1781 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 1782 g->fifo.deferred_reset_pending = true; 1783 nvgpu_mutex_release(&f->deferred_reset_mutex); 1784 1785 deferred_reset_pending = true; 1786 1787 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 1788 "sm debugger attached," 1789 " deferring channel recovery to channel free"); 1790 } else { 1791 gk20a_fifo_reset_engine(g, engine_id); 1792 } 1793 } 1794 1795#ifdef CONFIG_GK20A_CTXSW_TRACE 1796 if (tsg) { 1797 gk20a_ctxsw_trace_tsg_reset(g, tsg); 1798 } 1799#endif 1800 /* 1801 * Disable the channel/TSG from hw and increment syncpoints. 1802 */ 1803 if (tsg) { 1804 if (deferred_reset_pending) { 1805 gk20a_disable_tsg(tsg); 1806 } else { 1807 if (!fake_fault) { 1808 gk20a_fifo_set_ctx_mmu_error_tsg(g, 1809 tsg); 1810 } 1811 verbose = gk20a_fifo_error_tsg(g, tsg); 1812 gk20a_fifo_abort_tsg(g, tsg, false); 1813 } 1814 1815 /* put back the ref taken early above */ 1816 if (refch) { 1817 gk20a_channel_put(ch); 1818 } 1819 } else if (refch != NULL) { 1820 nvgpu_err(g, "mmu error in unbound channel %d", 1821 ch->chid); 1822 gk20a_channel_put(ch); 1823 } else if (mmfault_info.inst_ptr == 1824 nvgpu_inst_block_addr(g, &g->mm.bar1.inst_block)) { 1825 nvgpu_err(g, "mmu fault from bar1"); 1826 } else if (mmfault_info.inst_ptr == 1827 nvgpu_inst_block_addr(g, &g->mm.pmu.inst_block)) { 1828 nvgpu_err(g, "mmu fault from pmu"); 1829 } else { 1830 nvgpu_err(g, "couldn't locate channel for mmu fault"); 1831 } 1832 } 1833 1834 /* clear interrupt */ 1835 gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id); 1836 1837 /* resume scheduler */ 1838 gk20a_writel(g, fifo_error_sched_disable_r(), 1839 gk20a_readl(g, fifo_error_sched_disable_r())); 1840 1841 /* Re-enable fifo access */ 1842 gk20a_writel(g, gr_gpfifo_ctl_r(), 1843 gr_gpfifo_ctl_access_enabled_f() | 1844 gr_gpfifo_ctl_semaphore_access_enabled_f()); 1845 1846 /* It is safe to enable ELPG again. */ 1847 if (g->support_pmu) { 1848 if (nvgpu_cg_pg_enable(g) != 0) { 1849 nvgpu_warn(g, "fail to enable power mgmt"); 1850 } 1851 } 1852 1853 return verbose; 1854} 1855 1856static bool gk20a_fifo_handle_mmu_fault( 1857 struct gk20a *g, 1858 u32 mmu_fault_engines, /* queried from HW if 0 */ 1859 u32 hw_id, /* queried from HW if ~(u32)0 OR mmu_fault_engines == 0*/ 1860 bool id_is_tsg) 1861{ 1862 u32 rlid; 1863 bool verbose; 1864 1865 nvgpu_log_fn(g, " "); 1866 1867 nvgpu_log_info(g, "acquire engines_reset_mutex"); 1868 nvgpu_mutex_acquire(&g->fifo.engines_reset_mutex); 1869 1870 nvgpu_log_info(g, "acquire runlist_lock for all runlists"); 1871 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { 1872 nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid].runlist_lock); 1873 } 1874 1875 verbose = gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, 1876 hw_id, id_is_tsg); 1877 1878 nvgpu_log_info(g, "release runlist_lock for all runlists"); 1879 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { 1880 nvgpu_mutex_release(&g->fifo.runlist_info[rlid].runlist_lock); 1881 } 1882 1883 nvgpu_log_info(g, "release engines_reset_mutex"); 1884 nvgpu_mutex_release(&g->fifo.engines_reset_mutex); 1885 1886 return verbose; 1887} 1888 1889static void gk20a_fifo_get_faulty_id_type(struct gk20a *g, int engine_id, 1890 u32 *id, u32 *type) 1891{ 1892 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id)); 1893 u32 ctx_status = fifo_engine_status_ctx_status_v(status); 1894 1895 /* use next_id if context load is failing */ 1896 *id = (ctx_status == 1897 fifo_engine_status_ctx_status_ctxsw_load_v()) ? 1898 fifo_engine_status_next_id_v(status) : 1899 fifo_engine_status_id_v(status); 1900 1901 *type = (ctx_status == 1902 fifo_engine_status_ctx_status_ctxsw_load_v()) ? 1903 fifo_engine_status_next_id_type_v(status) : 1904 fifo_engine_status_id_type_v(status); 1905} 1906 1907static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) 1908{ 1909 unsigned int i; 1910 u32 engines = 0; 1911 1912 for (i = 0; i < g->fifo.num_engines; i++) { 1913 u32 active_engine_id = g->fifo.active_engines_list[i]; 1914 u32 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id)); 1915 u32 ctx_status = 1916 fifo_engine_status_ctx_status_v(status); 1917 u32 ctx_id = (ctx_status == 1918 fifo_engine_status_ctx_status_ctxsw_load_v()) ? 1919 fifo_engine_status_next_id_v(status) : 1920 fifo_engine_status_id_v(status); 1921 u32 type = (ctx_status == 1922 fifo_engine_status_ctx_status_ctxsw_load_v()) ? 1923 fifo_engine_status_next_id_type_v(status) : 1924 fifo_engine_status_id_type_v(status); 1925 bool busy = fifo_engine_status_engine_v(status) == 1926 fifo_engine_status_engine_busy_v(); 1927 if (busy && ctx_id == id) { 1928 if ((is_tsg && type == 1929 fifo_engine_status_id_type_tsgid_v()) || 1930 (!is_tsg && type == 1931 fifo_engine_status_id_type_chid_v())) { 1932 engines |= BIT(active_engine_id); 1933 } 1934 } 1935 } 1936 1937 return engines; 1938} 1939 1940void gk20a_fifo_recover_ch(struct gk20a *g, struct channel_gk20a *ch, 1941 bool verbose, u32 rc_type) 1942{ 1943 u32 engines; 1944 1945 /* stop context switching to prevent engine assignments from 1946 changing until channel is recovered */ 1947 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1948 gr_gk20a_disable_ctxsw(g); 1949 1950 engines = gk20a_fifo_engines_on_id(g, ch->chid, false); 1951 1952 if (engines) { 1953 gk20a_fifo_recover(g, engines, ch->chid, false, true, verbose, 1954 rc_type); 1955 } else { 1956 gk20a_channel_abort(ch, false); 1957 1958 if (gk20a_fifo_error_ch(g, ch)) { 1959 gk20a_debug_dump(g); 1960 } 1961 } 1962 1963 gr_gk20a_enable_ctxsw(g); 1964 nvgpu_mutex_release(&g->dbg_sessions_lock); 1965} 1966 1967void gk20a_fifo_recover_tsg(struct gk20a *g, struct tsg_gk20a *tsg, 1968 bool verbose, u32 rc_type) 1969{ 1970 u32 engines = 0U; 1971 int err; 1972 1973 /* stop context switching to prevent engine assignments from 1974 changing until TSG is recovered */ 1975 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1976 1977 /* disable tsg so that it does not get scheduled again */ 1978 g->ops.fifo.disable_tsg(tsg); 1979 1980 /* 1981 * On hitting engine reset, h/w drops the ctxsw_status to INVALID in 1982 * fifo_engine_status register. Also while the engine is held in reset 1983 * h/w passes busy/idle straight through. fifo_engine_status registers 1984 * are correct in that there is no context switch outstanding 1985 * as the CTXSW is aborted when reset is asserted. 1986 */ 1987 nvgpu_log_info(g, "acquire engines_reset_mutex"); 1988 nvgpu_mutex_acquire(&g->fifo.engines_reset_mutex); 1989 1990 /* 1991 * stop context switching to prevent engine assignments from 1992 * changing until engine status is checked to make sure tsg 1993 * being recovered is not loaded on the engines 1994 */ 1995 err = gr_gk20a_disable_ctxsw(g); 1996 1997 if (err != 0) { 1998 /* if failed to disable ctxsw, just abort tsg */ 1999 nvgpu_err(g, "failed to disable ctxsw"); 2000 } else { 2001 /* recover engines if tsg is loaded on the engines */ 2002 engines = gk20a_fifo_engines_on_id(g, tsg->tsgid, true); 2003 2004 /* 2005 * it is ok to enable ctxsw before tsg is recovered. If engines 2006 * is 0, no engine recovery is needed and if it is non zero, 2007 * gk20a_fifo_recover will call get_engines_mask_on_id again. 2008 * By that time if tsg is not on the engine, engine need not 2009 * be reset. 2010 */ 2011 err = gr_gk20a_enable_ctxsw(g); 2012 if (err != 0) { 2013 nvgpu_err(g, "failed to enable ctxsw"); 2014 } 2015 } 2016 2017 nvgpu_log_info(g, "release engines_reset_mutex"); 2018 nvgpu_mutex_release(&g->fifo.engines_reset_mutex); 2019 2020 if (engines) { 2021 gk20a_fifo_recover(g, engines, tsg->tsgid, true, true, verbose, 2022 rc_type); 2023 } else { 2024 if (gk20a_fifo_error_tsg(g, tsg) && verbose) { 2025 gk20a_debug_dump(g); 2026 } 2027 2028 gk20a_fifo_abort_tsg(g, tsg, false); 2029 } 2030 2031 nvgpu_mutex_release(&g->dbg_sessions_lock); 2032} 2033 2034void gk20a_fifo_teardown_mask_intr(struct gk20a *g) 2035{ 2036 u32 val; 2037 2038 val = gk20a_readl(g, fifo_intr_en_0_r()); 2039 val &= ~(fifo_intr_en_0_sched_error_m() | 2040 fifo_intr_en_0_mmu_fault_m()); 2041 gk20a_writel(g, fifo_intr_en_0_r(), val); 2042 gk20a_writel(g, fifo_intr_0_r(), fifo_intr_0_sched_error_reset_f()); 2043} 2044 2045void gk20a_fifo_teardown_unmask_intr(struct gk20a *g) 2046{ 2047 u32 val; 2048 2049 val = gk20a_readl(g, fifo_intr_en_0_r()); 2050 val |= fifo_intr_en_0_mmu_fault_f(1) | fifo_intr_en_0_sched_error_f(1); 2051 gk20a_writel(g, fifo_intr_en_0_r(), val); 2052 2053} 2054 2055void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, 2056 u32 hw_id, unsigned int id_type, unsigned int rc_type, 2057 struct mmu_fault_info *mmfault) 2058{ 2059 unsigned long engine_id, i; 2060 unsigned long _engine_ids = __engine_ids; 2061 unsigned long engine_ids = 0; 2062 u32 mmu_fault_engines = 0; 2063 u32 ref_type; 2064 u32 ref_id; 2065 u32 ref_id_is_tsg = false; 2066 bool id_is_known = (id_type != ID_TYPE_UNKNOWN) ? true : false; 2067 bool id_is_tsg = (id_type == ID_TYPE_TSG) ? true : false; 2068 u32 rlid; 2069 2070 nvgpu_log_info(g, "acquire engines_reset_mutex"); 2071 nvgpu_mutex_acquire(&g->fifo.engines_reset_mutex); 2072 2073 nvgpu_log_info(g, "acquire runlist_lock for all runlists"); 2074 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { 2075 nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid].runlist_lock); 2076 } 2077 2078 if (id_is_known) { 2079 engine_ids = gk20a_fifo_engines_on_id(g, hw_id, id_is_tsg); 2080 ref_id = hw_id; 2081 ref_type = id_is_tsg ? 2082 fifo_engine_status_id_type_tsgid_v() : 2083 fifo_engine_status_id_type_chid_v(); 2084 ref_id_is_tsg = id_is_tsg; 2085 /* atleast one engine will get passed during sched err*/ 2086 engine_ids |= __engine_ids; 2087 for_each_set_bit(engine_id, &engine_ids, 32) { 2088 u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); 2089 2090 if (mmu_id != FIFO_INVAL_ENGINE_ID) { 2091 mmu_fault_engines |= BIT(mmu_id); 2092 } 2093 } 2094 } else { 2095 /* store faulted engines in advance */ 2096 for_each_set_bit(engine_id, &_engine_ids, 32) { 2097 gk20a_fifo_get_faulty_id_type(g, engine_id, &ref_id, 2098 &ref_type); 2099 if (ref_type == fifo_engine_status_id_type_tsgid_v()) { 2100 ref_id_is_tsg = true; 2101 } else { 2102 ref_id_is_tsg = false; 2103 } 2104 /* Reset *all* engines that use the 2105 * same channel as faulty engine */ 2106 for (i = 0; i < g->fifo.num_engines; i++) { 2107 u32 active_engine_id = g->fifo.active_engines_list[i]; 2108 u32 type; 2109 u32 id; 2110 2111 gk20a_fifo_get_faulty_id_type(g, active_engine_id, &id, &type); 2112 if (ref_type == type && ref_id == id) { 2113 u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); 2114 2115 engine_ids |= BIT(active_engine_id); 2116 if (mmu_id != FIFO_INVAL_ENGINE_ID) { 2117 mmu_fault_engines |= BIT(mmu_id); 2118 } 2119 } 2120 } 2121 } 2122 } 2123 2124 if (mmu_fault_engines) { 2125 g->ops.fifo.teardown_mask_intr(g); 2126 g->ops.fifo.trigger_mmu_fault(g, engine_ids); 2127 gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, ref_id, 2128 ref_id_is_tsg); 2129 2130 g->ops.fifo.teardown_unmask_intr(g); 2131 } 2132 2133 nvgpu_log_info(g, "release runlist_lock for all runlists"); 2134 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { 2135 nvgpu_mutex_release(&g->fifo.runlist_info[rlid].runlist_lock); 2136 } 2137 2138 nvgpu_log_info(g, "release engines_reset_mutex"); 2139 nvgpu_mutex_release(&g->fifo.engines_reset_mutex); 2140} 2141 2142void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, 2143 u32 hw_id, bool id_is_tsg, 2144 bool id_is_known, bool verbose, int rc_type) 2145{ 2146 unsigned int id_type; 2147 2148 if (verbose) { 2149 gk20a_debug_dump(g); 2150 } 2151 2152 if (g->ops.ltc.flush) { 2153 g->ops.ltc.flush(g); 2154 } 2155 2156 if (id_is_known) { 2157 id_type = id_is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; 2158 } else { 2159 id_type = ID_TYPE_UNKNOWN; 2160 } 2161 2162 g->ops.fifo.teardown_ch_tsg(g, __engine_ids, hw_id, id_type, 2163 rc_type, NULL); 2164} 2165 2166/* force reset channel and tsg */ 2167int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, 2168 u32 err_code, bool verbose) 2169{ 2170 struct channel_gk20a *ch_tsg = NULL; 2171 struct gk20a *g = ch->g; 2172 2173 struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); 2174 2175 if (tsg != NULL) { 2176 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2177 2178 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, 2179 channel_gk20a, ch_entry) { 2180 if (gk20a_channel_get(ch_tsg)) { 2181 g->ops.fifo.set_error_notifier(ch_tsg, 2182 err_code); 2183 gk20a_channel_put(ch_tsg); 2184 } 2185 } 2186 2187 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 2188 gk20a_fifo_recover_tsg(g, tsg, verbose, 2189 RC_TYPE_FORCE_RESET); 2190 } else { 2191 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); 2192 } 2193 2194 return 0; 2195} 2196 2197int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch) 2198{ 2199 struct gk20a *g = ch->g; 2200 2201 if (gk20a_fifo_channel_status_is_next(g, ch->chid)) { 2202 nvgpu_log_info(g, "Channel %d to be removed from TSG %d has NEXT set!", 2203 ch->chid, ch->tsgid); 2204 return -EAGAIN; 2205 } 2206 2207 if (g->ops.fifo.tsg_verify_status_ctx_reload) { 2208 g->ops.fifo.tsg_verify_status_ctx_reload(ch); 2209 } 2210 2211 if (g->ops.fifo.tsg_verify_status_faulted) { 2212 g->ops.fifo.tsg_verify_status_faulted(ch); 2213 } 2214 2215 return 0; 2216} 2217 2218static bool gk20a_fifo_tsg_is_multi_channel(struct tsg_gk20a *tsg) 2219{ 2220 bool ret = false; 2221 2222 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2223 if (nvgpu_list_first_entry(&tsg->ch_list, channel_gk20a, 2224 ch_entry) != 2225 nvgpu_list_last_entry(&tsg->ch_list, channel_gk20a, 2226 ch_entry)) { 2227 ret = true; 2228 } 2229 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 2230 2231 return ret; 2232} 2233 2234int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch) 2235{ 2236 struct gk20a *g = ch->g; 2237 struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); 2238 int err; 2239 bool tsg_timedout = false; 2240 2241 if (tsg == NULL) { 2242 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); 2243 return 0; 2244 } 2245 2246 /* If one channel in TSG times out, we disable all channels */ 2247 nvgpu_rwsem_down_write(&tsg->ch_list_lock); 2248 tsg_timedout = gk20a_channel_check_timedout(ch); 2249 nvgpu_rwsem_up_write(&tsg->ch_list_lock); 2250 2251 /* Disable TSG and examine status before unbinding channel */ 2252 g->ops.fifo.disable_tsg(tsg); 2253 2254 err = g->ops.fifo.preempt_tsg(g, tsg); 2255 if (err != 0) { 2256 goto fail_enable_tsg; 2257 } 2258 2259 /* 2260 * State validation is only necessary if there are multiple channels in 2261 * the TSG. 2262 */ 2263 if (gk20a_fifo_tsg_is_multi_channel(tsg) && 2264 g->ops.fifo.tsg_verify_channel_status && !tsg_timedout) { 2265 err = g->ops.fifo.tsg_verify_channel_status(ch); 2266 if (err) { 2267 goto fail_enable_tsg; 2268 } 2269 } 2270 2271 /* Channel should be seen as TSG channel while updating runlist */ 2272 err = channel_gk20a_update_runlist(ch, false); 2273 if (err) { 2274 goto fail_enable_tsg; 2275 } 2276 2277 while (ch->mmu_debug_mode_refcnt > 0U) { 2278 err = nvgpu_tsg_set_mmu_debug_mode(ch, false); 2279 if (err != 0) { 2280 nvgpu_err(g, "disable mmu debug mode failed ch:%u", 2281 ch->chid); 2282 break; 2283 } 2284 } 2285 2286 /* Remove channel from TSG and re-enable rest of the channels */ 2287 nvgpu_rwsem_down_write(&tsg->ch_list_lock); 2288 nvgpu_list_del(&ch->ch_entry); 2289 ch->tsgid = NVGPU_INVALID_TSG_ID; 2290 2291 /* another thread could have re-enabled the channel because it was 2292 * still on the list at that time, so make sure it's truly disabled 2293 */ 2294 g->ops.fifo.disable_channel(ch); 2295 nvgpu_rwsem_up_write(&tsg->ch_list_lock); 2296 2297 /* 2298 * Don't re-enable all channels if TSG has timed out already 2299 * 2300 * Note that we can skip disabling and preempting TSG too in case of 2301 * time out, but we keep that to ensure TSG is kicked out 2302 */ 2303 if (!tsg_timedout) { 2304 g->ops.fifo.enable_tsg(tsg); 2305 } 2306 2307 if (ch->g->ops.fifo.ch_abort_clean_up) { 2308 ch->g->ops.fifo.ch_abort_clean_up(ch); 2309 } 2310 2311 return 0; 2312 2313fail_enable_tsg: 2314 if (!tsg_timedout) { 2315 g->ops.fifo.enable_tsg(tsg); 2316 } 2317 return err; 2318} 2319 2320u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g, 2321 int *__id, bool *__is_tsg) 2322{ 2323 u32 engine_id; 2324 int id = -1; 2325 bool is_tsg = false; 2326 u32 mailbox2; 2327 u32 active_engine_id = FIFO_INVAL_ENGINE_ID; 2328 2329 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { 2330 u32 status; 2331 u32 ctx_status; 2332 bool failing_engine; 2333 2334 active_engine_id = g->fifo.active_engines_list[engine_id]; 2335 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id)); 2336 ctx_status = fifo_engine_status_ctx_status_v(status); 2337 2338 /* we are interested in busy engines */ 2339 failing_engine = fifo_engine_status_engine_v(status) == 2340 fifo_engine_status_engine_busy_v(); 2341 2342 /* ..that are doing context switch */ 2343 failing_engine = failing_engine && 2344 (ctx_status == 2345 fifo_engine_status_ctx_status_ctxsw_switch_v() 2346 || ctx_status == 2347 fifo_engine_status_ctx_status_ctxsw_save_v() 2348 || ctx_status == 2349 fifo_engine_status_ctx_status_ctxsw_load_v()); 2350 2351 if (!failing_engine) { 2352 active_engine_id = FIFO_INVAL_ENGINE_ID; 2353 continue; 2354 } 2355 2356 if (ctx_status == 2357 fifo_engine_status_ctx_status_ctxsw_load_v()) { 2358 id = fifo_engine_status_next_id_v(status); 2359 is_tsg = fifo_engine_status_next_id_type_v(status) != 2360 fifo_engine_status_next_id_type_chid_v(); 2361 } else if (ctx_status == 2362 fifo_engine_status_ctx_status_ctxsw_switch_v()) { 2363 mailbox2 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(2)); 2364 if (mailbox2 & FECS_METHOD_WFI_RESTORE) { 2365 id = fifo_engine_status_next_id_v(status); 2366 is_tsg = fifo_engine_status_next_id_type_v(status) != 2367 fifo_engine_status_next_id_type_chid_v(); 2368 } else { 2369 id = fifo_engine_status_id_v(status); 2370 is_tsg = fifo_engine_status_id_type_v(status) != 2371 fifo_engine_status_id_type_chid_v(); 2372 } 2373 } else { 2374 id = fifo_engine_status_id_v(status); 2375 is_tsg = fifo_engine_status_id_type_v(status) != 2376 fifo_engine_status_id_type_chid_v(); 2377 } 2378 break; 2379 } 2380 2381 *__id = id; 2382 *__is_tsg = is_tsg; 2383 2384 return active_engine_id; 2385} 2386 2387bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch, 2388 bool *verbose, u32 *ms) 2389{ 2390 bool recover = false; 2391 bool progress = false; 2392 struct gk20a *g = ch->g; 2393 2394 if (gk20a_channel_get(ch)) { 2395 recover = gk20a_channel_update_and_check_timeout(ch, 2396 g->fifo_eng_timeout_us / 1000, 2397 &progress); 2398 *verbose = ch->timeout_debug_dump; 2399 *ms = ch->timeout_accumulated_ms; 2400 if (recover) { 2401 g->ops.fifo.set_error_notifier(ch, 2402 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 2403 } 2404 2405 gk20a_channel_put(ch); 2406 } 2407 return recover; 2408} 2409 2410bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, 2411 bool *verbose, u32 *ms) 2412{ 2413 struct channel_gk20a *ch; 2414 bool recover = false; 2415 bool progress = false; 2416 struct gk20a *g = tsg->g; 2417 2418 *verbose = false; 2419 *ms = g->fifo_eng_timeout_us / 1000; 2420 2421 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2422 2423 /* check if there was some progress on any of the TSG channels. 2424 * fifo recovery is needed if at least one channel reached the 2425 * maximum timeout without progress (update in gpfifo pointers). 2426 */ 2427 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 2428 if (gk20a_channel_get(ch)) { 2429 recover = gk20a_channel_update_and_check_timeout(ch, 2430 *ms, &progress); 2431 if (progress || recover) { 2432 break; 2433 } 2434 gk20a_channel_put(ch); 2435 } 2436 } 2437 2438 if (recover) { 2439 /* 2440 * if one channel is presumed dead (no progress for too long), 2441 * then fifo recovery is needed. we can't really figure out 2442 * which channel caused the problem, so set timeout error 2443 * notifier for all channels. 2444 */ 2445 nvgpu_log_info(g, "timeout on tsg=%d ch=%d", 2446 tsg->tsgid, ch->chid); 2447 *ms = ch->timeout_accumulated_ms; 2448 gk20a_channel_put(ch); 2449 nvgpu_list_for_each_entry(ch, &tsg->ch_list, 2450 channel_gk20a, ch_entry) { 2451 if (gk20a_channel_get(ch)) { 2452 ch->g->ops.fifo.set_error_notifier(ch, 2453 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 2454 if (ch->timeout_debug_dump) { 2455 *verbose = true; 2456 } 2457 gk20a_channel_put(ch); 2458 } 2459 } 2460 } else if (progress) { 2461 /* 2462 * if at least one channel in the TSG made some progress, reset 2463 * accumulated timeout for all channels in the TSG. In 2464 * particular, this resets timeout for channels that already 2465 * completed their work 2466 */ 2467 nvgpu_log_info(g, "progress on tsg=%d ch=%d", 2468 tsg->tsgid, ch->chid); 2469 gk20a_channel_put(ch); 2470 *ms = g->fifo_eng_timeout_us / 1000; 2471 nvgpu_list_for_each_entry(ch, &tsg->ch_list, 2472 channel_gk20a, ch_entry) { 2473 if (gk20a_channel_get(ch)) { 2474 ch->timeout_accumulated_ms = *ms; 2475 gk20a_channel_put(ch); 2476 } 2477 } 2478 } 2479 2480 /* if we could not detect progress on any of the channel, but none 2481 * of them has reached the timeout, there is nothing more to do: 2482 * timeout_accumulated_ms has been updated for all of them. 2483 */ 2484 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 2485 return recover; 2486} 2487 2488bool gk20a_fifo_handle_sched_error(struct gk20a *g) 2489{ 2490 u32 sched_error; 2491 u32 engine_id; 2492 int id = -1; 2493 bool is_tsg = false; 2494 bool ret = false; 2495 2496 /* read the scheduler error register */ 2497 sched_error = gk20a_readl(g, fifo_intr_sched_error_r()); 2498 2499 engine_id = gk20a_fifo_get_failing_engine_data(g, &id, &is_tsg); 2500 /* 2501 * Could not find the engine 2502 * Possible Causes: 2503 * a) 2504 * On hitting engine reset, h/w drops the ctxsw_status to INVALID in 2505 * fifo_engine_status register. Also while the engine is held in reset 2506 * h/w passes busy/idle straight through. fifo_engine_status registers 2507 * are correct in that there is no context switch outstanding 2508 * as the CTXSW is aborted when reset is asserted. 2509 * This is just a side effect of how gv100 and earlier versions of 2510 * ctxsw_timeout behave. 2511 * With gv11b and later, h/w snaps the context at the point of error 2512 * so that s/w can see the tsg_id which caused the HW timeout. 2513 * b) 2514 * If engines are not busy and ctxsw state is valid then intr occurred 2515 * in the past and if the ctxsw state has moved on to VALID from LOAD 2516 * or SAVE, it means that whatever timed out eventually finished 2517 * anyways. The problem with this is that s/w cannot conclude which 2518 * context caused the problem as maybe more switches occurred before 2519 * intr is handled. 2520 */ 2521 if (engine_id == FIFO_INVAL_ENGINE_ID) { 2522 nvgpu_info(g, "fifo sched error: 0x%08x, failed to find engine " 2523 "that is busy doing ctxsw. " 2524 "May be ctxsw already happened", sched_error); 2525 ret = false; 2526 goto err; 2527 } 2528 2529 /* could not find the engine - should never happen */ 2530 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { 2531 nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine", 2532 sched_error); 2533 ret = false; 2534 goto err; 2535 } 2536 2537 if (fifo_intr_sched_error_code_f(sched_error) == 2538 fifo_intr_sched_error_code_ctxsw_timeout_v()) { 2539 struct fifo_gk20a *f = &g->fifo; 2540 u32 ms = 0; 2541 bool verbose = false; 2542 2543 if (is_tsg) { 2544 ret = g->ops.fifo.check_tsg_ctxsw_timeout( 2545 &f->tsg[id], &verbose, &ms); 2546 } else { 2547 ret = g->ops.fifo.check_ch_ctxsw_timeout( 2548 &f->channel[id], &verbose, &ms); 2549 } 2550 2551 if (ret) { 2552 nvgpu_err(g, 2553 "fifo sched ctxsw timeout error: " 2554 "engine=%u, %s=%d, ms=%u", 2555 engine_id, is_tsg ? "tsg" : "ch", id, ms); 2556 /* 2557 * Cancel all channels' timeout since SCHED error might 2558 * trigger multiple watchdogs at a time 2559 */ 2560 gk20a_channel_timeout_restart_all_channels(g); 2561 gk20a_fifo_recover(g, BIT(engine_id), id, 2562 is_tsg, true, verbose, 2563 RC_TYPE_CTXSW_TIMEOUT); 2564 } else { 2565 nvgpu_log_info(g, 2566 "fifo is waiting for ctx switch for %d ms, " 2567 "%s=%d", ms, is_tsg ? "tsg" : "ch", id); 2568 } 2569 } else { 2570 nvgpu_err(g, 2571 "fifo sched error : 0x%08x, engine=%u, %s=%d", 2572 sched_error, engine_id, is_tsg ? "tsg" : "ch", id); 2573 } 2574 2575err: 2576 return ret; 2577} 2578 2579static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) 2580{ 2581 bool print_channel_reset_log = false; 2582 u32 handled = 0; 2583 2584 nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr); 2585 2586 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { 2587 /* pio mode is unused. this shouldn't happen, ever. */ 2588 /* should we clear it or just leave it pending? */ 2589 nvgpu_err(g, "fifo pio error!"); 2590 BUG_ON(1); 2591 } 2592 2593 if (fifo_intr & fifo_intr_0_bind_error_pending_f()) { 2594 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r()); 2595 nvgpu_err(g, "fifo bind error: 0x%08x", bind_error); 2596 print_channel_reset_log = true; 2597 handled |= fifo_intr_0_bind_error_pending_f(); 2598 } 2599 2600 if (fifo_intr & fifo_intr_0_sched_error_pending_f()) { 2601 print_channel_reset_log = g->ops.fifo.handle_sched_error(g); 2602 handled |= fifo_intr_0_sched_error_pending_f(); 2603 } 2604 2605 if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) { 2606 gk20a_fifo_handle_chsw_fault(g); 2607 handled |= fifo_intr_0_chsw_error_pending_f(); 2608 } 2609 2610 if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) { 2611 if (gk20a_fifo_handle_mmu_fault(g, 0, ~(u32)0, false)) { 2612 print_channel_reset_log = true; 2613 } 2614 handled |= fifo_intr_0_mmu_fault_pending_f(); 2615 } 2616 2617 if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) { 2618 gk20a_fifo_handle_dropped_mmu_fault(g); 2619 handled |= fifo_intr_0_dropped_mmu_fault_pending_f(); 2620 } 2621 2622 print_channel_reset_log = !g->fifo.deferred_reset_pending 2623 && print_channel_reset_log; 2624 2625 if (print_channel_reset_log) { 2626 unsigned int engine_id; 2627 nvgpu_err(g, 2628 "channel reset initiated from %s; intr=0x%08x", 2629 __func__, fifo_intr); 2630 for (engine_id = 0; 2631 engine_id < g->fifo.num_engines; 2632 engine_id++) { 2633 u32 active_engine_id = g->fifo.active_engines_list[engine_id]; 2634 u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 2635 nvgpu_log_fn(g, "enum:%d -> engine_id:%d", engine_enum, 2636 active_engine_id); 2637 fifo_pbdma_exception_status(g, 2638 &g->fifo.engine_info[active_engine_id]); 2639 fifo_engine_exception_status(g, 2640 &g->fifo.engine_info[active_engine_id]); 2641 } 2642 } 2643 2644 return handled; 2645} 2646 2647static inline void gk20a_fifo_reset_pbdma_header(struct gk20a *g, int pbdma_id) 2648{ 2649 gk20a_writel(g, pbdma_pb_header_r(pbdma_id), 2650 pbdma_pb_header_first_true_f() | 2651 pbdma_pb_header_type_non_inc_f()); 2652} 2653 2654void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id, 2655 int pbdma_method_index) 2656{ 2657 u32 pbdma_method_stride; 2658 u32 pbdma_method_reg; 2659 2660 pbdma_method_stride = pbdma_method1_r(pbdma_id) - 2661 pbdma_method0_r(pbdma_id); 2662 2663 pbdma_method_reg = pbdma_method0_r(pbdma_id) + 2664 (pbdma_method_index * pbdma_method_stride); 2665 2666 gk20a_writel(g, pbdma_method_reg, 2667 pbdma_method0_valid_true_f() | 2668 pbdma_method0_first_true_f() | 2669 pbdma_method0_addr_f( 2670 pbdma_udma_nop_r() >> 2)); 2671} 2672 2673static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id, 2674 int pbdma_method_index) 2675{ 2676 u32 pbdma_method_stride; 2677 u32 pbdma_method_reg, pbdma_method_subch; 2678 2679 pbdma_method_stride = pbdma_method1_r(pbdma_id) - 2680 pbdma_method0_r(pbdma_id); 2681 2682 pbdma_method_reg = pbdma_method0_r(pbdma_id) + 2683 (pbdma_method_index * pbdma_method_stride); 2684 2685 pbdma_method_subch = pbdma_method0_subch_v( 2686 gk20a_readl(g, pbdma_method_reg)); 2687 2688 if (pbdma_method_subch == 5 || 2689 pbdma_method_subch == 6 || 2690 pbdma_method_subch == 7) { 2691 return true; 2692 } 2693 2694 return false; 2695} 2696 2697unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, 2698 u32 pbdma_intr_0, u32 *handled, u32 *error_notifier) 2699{ 2700 struct fifo_gk20a *f = &g->fifo; 2701 unsigned int rc_type = RC_TYPE_NO_RC; 2702 int i; 2703 unsigned long pbdma_intr_err; 2704 u32 bit; 2705 2706 if ((f->intr.pbdma.device_fatal_0 | 2707 f->intr.pbdma.channel_fatal_0 | 2708 f->intr.pbdma.restartable_0) & pbdma_intr_0) { 2709 2710 pbdma_intr_err = (unsigned long)pbdma_intr_0; 2711 for_each_set_bit(bit, &pbdma_intr_err, 32) { 2712 nvgpu_err(g, "PBDMA intr %s Error", 2713 pbdma_intr_fault_type_desc[bit]); 2714 } 2715 2716 nvgpu_err(g, 2717 "pbdma_intr_0(%d):0x%08x PBH: %08x " 2718 "SHADOW: %08x gp shadow0: %08x gp shadow1: %08x" 2719 "M0: %08x %08x %08x %08x ", 2720 pbdma_id, pbdma_intr_0, 2721 gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), 2722 gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)), 2723 gk20a_readl(g, pbdma_gp_shadow_0_r(pbdma_id)), 2724 gk20a_readl(g, pbdma_gp_shadow_1_r(pbdma_id)), 2725 gk20a_readl(g, pbdma_method0_r(pbdma_id)), 2726 gk20a_readl(g, pbdma_method1_r(pbdma_id)), 2727 gk20a_readl(g, pbdma_method2_r(pbdma_id)), 2728 gk20a_readl(g, pbdma_method3_r(pbdma_id)) 2729 ); 2730 2731 rc_type = RC_TYPE_PBDMA_FAULT; 2732 *handled |= ((f->intr.pbdma.device_fatal_0 | 2733 f->intr.pbdma.channel_fatal_0 | 2734 f->intr.pbdma.restartable_0) & 2735 pbdma_intr_0); 2736 } 2737 2738 if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) { 2739 u32 val = gk20a_readl(g, pbdma_acquire_r(pbdma_id)); 2740 2741 val &= ~pbdma_acquire_timeout_en_enable_f(); 2742 gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); 2743 if (nvgpu_is_timeouts_enabled(g)) { 2744 rc_type = RC_TYPE_PBDMA_FAULT; 2745 nvgpu_err(g, 2746 "semaphore acquire timeout!"); 2747 *error_notifier = NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT; 2748 } 2749 *handled |= pbdma_intr_0_acquire_pending_f(); 2750 } 2751 2752 if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) { 2753 gk20a_fifo_reset_pbdma_header(g, pbdma_id); 2754 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); 2755 rc_type = RC_TYPE_PBDMA_FAULT; 2756 } 2757 2758 if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) { 2759 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); 2760 rc_type = RC_TYPE_PBDMA_FAULT; 2761 } 2762 2763 if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { 2764 *error_notifier = 2765 NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH; 2766 rc_type = RC_TYPE_PBDMA_FAULT; 2767 } 2768 2769 if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) { 2770 gk20a_fifo_reset_pbdma_header(g, pbdma_id); 2771 2772 for (i = 0; i < 4; i++) { 2773 if (gk20a_fifo_is_sw_method_subch(g, 2774 pbdma_id, i)) { 2775 gk20a_fifo_reset_pbdma_method(g, 2776 pbdma_id, i); 2777 } 2778 } 2779 rc_type = RC_TYPE_PBDMA_FAULT; 2780 } 2781 2782 return rc_type; 2783} 2784 2785unsigned int gk20a_fifo_handle_pbdma_intr_1(struct gk20a *g, 2786 u32 pbdma_id, u32 pbdma_intr_1, 2787 u32 *handled, u32 *error_notifier) 2788{ 2789 unsigned int rc_type = RC_TYPE_PBDMA_FAULT; 2790 2791 /* 2792 * all of the interrupts in _intr_1 are "host copy engine" 2793 * related, which is not supported. For now just make them 2794 * channel fatal. 2795 */ 2796 nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x", 2797 pbdma_id, pbdma_intr_1); 2798 *handled |= pbdma_intr_1; 2799 2800 return rc_type; 2801} 2802 2803static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g, 2804 struct fifo_gk20a *f, u32 pbdma_id, 2805 u32 error_notifier, u32 status) 2806{ 2807 u32 id; 2808 2809 nvgpu_log(g, gpu_dbg_info, "pbdma id %d error notifier %d", 2810 pbdma_id, error_notifier); 2811 /* Remove channel from runlist */ 2812 id = fifo_pbdma_status_id_v(status); 2813 if (fifo_pbdma_status_id_type_v(status) 2814 == fifo_pbdma_status_id_type_chid_v()) { 2815 struct channel_gk20a *ch = gk20a_channel_from_id(g, id); 2816 2817 if (ch != NULL) { 2818 g->ops.fifo.set_error_notifier(ch, error_notifier); 2819 gk20a_fifo_recover_ch(g, ch, true, RC_TYPE_PBDMA_FAULT); 2820 gk20a_channel_put(ch); 2821 } 2822 } else if (fifo_pbdma_status_id_type_v(status) 2823 == fifo_pbdma_status_id_type_tsgid_v()) { 2824 struct tsg_gk20a *tsg = &f->tsg[id]; 2825 struct channel_gk20a *ch = NULL; 2826 2827 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2828 nvgpu_list_for_each_entry(ch, &tsg->ch_list, 2829 channel_gk20a, ch_entry) { 2830 if (gk20a_channel_get(ch)) { 2831 g->ops.fifo.set_error_notifier(ch, 2832 error_notifier); 2833 gk20a_channel_put(ch); 2834 } 2835 } 2836 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 2837 gk20a_fifo_recover_tsg(g, tsg, true, RC_TYPE_PBDMA_FAULT); 2838 } 2839} 2840 2841u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f, 2842 u32 pbdma_id, unsigned int rc) 2843{ 2844 u32 pbdma_intr_0 = gk20a_readl(g, pbdma_intr_0_r(pbdma_id)); 2845 u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); 2846 2847 u32 handled = 0; 2848 u32 error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR; 2849 unsigned int rc_type = RC_TYPE_NO_RC; 2850 u32 pbdma_status_info = 0; 2851 2852 if (pbdma_intr_0) { 2853 nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, 2854 "pbdma id %d intr_0 0x%08x pending", 2855 pbdma_id, pbdma_intr_0); 2856 2857 if (g->ops.fifo.handle_pbdma_intr_0(g, pbdma_id, pbdma_intr_0, 2858 &handled, &error_notifier) != RC_TYPE_NO_RC) { 2859 rc_type = RC_TYPE_PBDMA_FAULT; 2860 2861 pbdma_status_info = gk20a_readl(g, 2862 fifo_pbdma_status_r(pbdma_id)); 2863 } 2864 gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0); 2865 } 2866 2867 if (pbdma_intr_1) { 2868 nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, 2869 "pbdma id %d intr_1 0x%08x pending", 2870 pbdma_id, pbdma_intr_1); 2871 2872 if (g->ops.fifo.handle_pbdma_intr_1(g, pbdma_id, pbdma_intr_1, 2873 &handled, &error_notifier) != RC_TYPE_NO_RC) { 2874 rc_type = RC_TYPE_PBDMA_FAULT; 2875 2876 pbdma_status_info = gk20a_readl(g, 2877 fifo_pbdma_status_r(pbdma_id)); 2878 } 2879 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); 2880 } 2881 2882 if (rc == RC_YES && rc_type == RC_TYPE_PBDMA_FAULT) { 2883 gk20a_fifo_pbdma_fault_rc(g, f, pbdma_id, error_notifier, 2884 pbdma_status_info); 2885 } 2886 2887 return handled; 2888} 2889 2890static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) 2891{ 2892 struct fifo_gk20a *f = &g->fifo; 2893 u32 clear_intr = 0, i; 2894 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 2895 u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r()); 2896 2897 for (i = 0; i < host_num_pbdma; i++) { 2898 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { 2899 nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i); 2900 clear_intr |= 2901 gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES); 2902 } 2903 } 2904 return fifo_intr_0_pbdma_intr_pending_f(); 2905} 2906 2907void gk20a_fifo_isr(struct gk20a *g) 2908{ 2909 u32 error_intr_mask; 2910 u32 clear_intr = 0; 2911 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); 2912 2913 error_intr_mask = g->ops.fifo.intr_0_error_mask(g); 2914 2915 if (g->fifo.sw_ready) { 2916 /* note we're not actually in an "isr", but rather 2917 * in a threaded interrupt context... */ 2918 nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); 2919 2920 nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); 2921 2922 /* handle runlist update */ 2923 if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { 2924 gk20a_fifo_handle_runlist_event(g); 2925 clear_intr |= fifo_intr_0_runlist_event_pending_f(); 2926 } 2927 if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) { 2928 clear_intr |= fifo_pbdma_isr(g, fifo_intr); 2929 } 2930 2931 if (g->ops.fifo.handle_ctxsw_timeout) { 2932 g->ops.fifo.handle_ctxsw_timeout(g, fifo_intr); 2933 } 2934 2935 if (unlikely((fifo_intr & error_intr_mask) != 0U)) { 2936 clear_intr |= fifo_error_isr(g, fifo_intr); 2937 } 2938 2939 nvgpu_mutex_release(&g->fifo.intr.isr.mutex); 2940 } 2941 gk20a_writel(g, fifo_intr_0_r(), clear_intr); 2942 2943 return; 2944} 2945 2946u32 gk20a_fifo_nonstall_isr(struct gk20a *g) 2947{ 2948 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); 2949 u32 clear_intr = 0; 2950 2951 nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); 2952 2953 if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) { 2954 clear_intr = fifo_intr_0_channel_intr_pending_f(); 2955 } 2956 2957 gk20a_writel(g, fifo_intr_0_r(), clear_intr); 2958 2959 return GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE; 2960} 2961 2962void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg) 2963{ 2964 if (is_tsg) { 2965 gk20a_writel(g, fifo_preempt_r(), 2966 fifo_preempt_id_f(id) | 2967 fifo_preempt_type_tsg_f()); 2968 } else { 2969 gk20a_writel(g, fifo_preempt_r(), 2970 fifo_preempt_chid_f(id) | 2971 fifo_preempt_type_channel_f()); 2972 } 2973} 2974 2975static u32 gk20a_fifo_get_preempt_timeout(struct gk20a *g) 2976{ 2977 /* Use fifo_eng_timeout converted to ms for preempt 2978 * polling. gr_idle_timeout i.e 3000 ms is and not appropriate 2979 * for polling preempt done as context switch timeout gets 2980 * triggered every 100 ms and context switch recovery 2981 * happens every 3000 ms */ 2982 2983 return g->fifo_eng_timeout_us / 1000; 2984} 2985 2986int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, 2987 unsigned int id_type, bool preempt_retries_left) 2988{ 2989 struct nvgpu_timeout timeout; 2990 u32 delay = GR_IDLE_CHECK_DEFAULT; 2991 int ret = -EBUSY; 2992 2993 nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g), 2994 NVGPU_TIMER_CPU_TIMER); 2995 do { 2996 if (!(gk20a_readl(g, fifo_preempt_r()) & 2997 fifo_preempt_pending_true_f())) { 2998 ret = 0; 2999 break; 3000 } 3001 3002 nvgpu_usleep_range(delay, delay * 2); 3003 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 3004 } while (!nvgpu_timeout_expired(&timeout)); 3005 3006 if (ret) { 3007 nvgpu_err(g, "preempt timeout: id: %u id_type: %d ", 3008 id, id_type); 3009 } 3010 return ret; 3011} 3012 3013void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg) 3014{ 3015 struct channel_gk20a *ch = NULL; 3016 3017 nvgpu_err(g, "preempt TSG %d timeout", tsg->tsgid); 3018 3019 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 3020 nvgpu_list_for_each_entry(ch, &tsg->ch_list, 3021 channel_gk20a, ch_entry) { 3022 if (!gk20a_channel_get(ch)) { 3023 continue; 3024 } 3025 g->ops.fifo.set_error_notifier(ch, 3026 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 3027 gk20a_channel_put(ch); 3028 } 3029 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 3030 gk20a_fifo_recover_tsg(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT); 3031} 3032 3033void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch) 3034{ 3035 nvgpu_err(g, "preempt channel %d timeout", ch->chid); 3036 3037 g->ops.fifo.set_error_notifier(ch, 3038 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 3039 gk20a_fifo_recover_ch(g, ch, true, 3040 RC_TYPE_PREEMPT_TIMEOUT); 3041} 3042 3043int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg, 3044 bool preempt_retries_left) 3045{ 3046 int ret; 3047 unsigned int id_type; 3048 3049 nvgpu_log_fn(g, "id: %d is_tsg: %d", id, is_tsg); 3050 3051 /* issue preempt */ 3052 gk20a_fifo_issue_preempt(g, id, is_tsg); 3053 3054 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; 3055 3056 /* 3057 * Poll for preempt done. if stalling interrupts are pending 3058 * while preempt is in progress we poll for stalling interrupts 3059 * to finish based on return value from this function and 3060 * retry preempt again. 3061 * If HW is hung, on the last retry instance we try to identify 3062 * the engines hung and set the runlist reset_eng_bitmask 3063 * and mark preemption completion. 3064 */ 3065 ret = g->ops.fifo.is_preempt_pending(g, id, id_type, 3066 preempt_retries_left); 3067 3068 return ret; 3069} 3070 3071int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) 3072{ 3073 struct fifo_gk20a *f = &g->fifo; 3074 u32 ret = 0; 3075 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3076 u32 mutex_ret = 0; 3077 u32 i; 3078 3079 nvgpu_log_fn(g, "chid: %d", ch->chid); 3080 3081 /* we have no idea which runlist we are using. lock all */ 3082 for (i = 0; i < g->fifo.max_runlists; i++) { 3083 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); 3084 } 3085 3086 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3087 3088 ret = __locked_fifo_preempt(g, ch->chid, false, false); 3089 3090 if (!mutex_ret) { 3091 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3092 } 3093 3094 for (i = 0; i < g->fifo.max_runlists; i++) { 3095 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); 3096 } 3097 3098 if (ret) { 3099 if (nvgpu_platform_is_silicon(g)) { 3100 nvgpu_err(g, "preempt timed out for chid: %u, " 3101 "ctxsw timeout will trigger recovery if needed", 3102 ch->chid); 3103 } else { 3104 gk20a_fifo_preempt_timeout_rc(g, ch); 3105 } 3106 } 3107 3108 return ret; 3109} 3110 3111int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) 3112{ 3113 struct fifo_gk20a *f = &g->fifo; 3114 u32 ret = 0; 3115 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3116 u32 mutex_ret = 0; 3117 u32 i; 3118 3119 nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); 3120 3121 /* we have no idea which runlist we are using. lock all */ 3122 for (i = 0; i < g->fifo.max_runlists; i++) { 3123 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); 3124 } 3125 3126 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3127 3128 ret = __locked_fifo_preempt(g, tsg->tsgid, true, false); 3129 3130 if (!mutex_ret) { 3131 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3132 } 3133 3134 for (i = 0; i < g->fifo.max_runlists; i++) { 3135 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); 3136 } 3137 3138 if (ret) { 3139 if (nvgpu_platform_is_silicon(g)) { 3140 nvgpu_err(g, "preempt timed out for tsgid: %u, " 3141 "ctxsw timeout will trigger recovery if needed", 3142 tsg->tsgid); 3143 } else { 3144 gk20a_fifo_preempt_timeout_rc_tsg(g, tsg); 3145 } 3146 } 3147 3148 return ret; 3149} 3150 3151int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch) 3152{ 3153 int err; 3154 struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); 3155 3156 if (tsg != NULL) { 3157 err = g->ops.fifo.preempt_tsg(ch->g, tsg); 3158 } else { 3159 err = g->ops.fifo.preempt_channel(ch->g, ch); 3160 } 3161 3162 return err; 3163} 3164 3165static void gk20a_fifo_sched_disable_rw(struct gk20a *g, u32 runlists_mask, 3166 u32 runlist_state) 3167{ 3168 u32 reg_val; 3169 3170 reg_val = gk20a_readl(g, fifo_sched_disable_r()); 3171 3172 if (runlist_state == RUNLIST_DISABLED) { 3173 reg_val |= runlists_mask; 3174 } else { 3175 reg_val &= (~runlists_mask); 3176 } 3177 3178 gk20a_writel(g, fifo_sched_disable_r(), reg_val); 3179 3180} 3181 3182void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, 3183 u32 runlist_state) 3184{ 3185 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3186 u32 mutex_ret; 3187 3188 nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x", 3189 runlists_mask, runlist_state); 3190 3191 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3192 3193 gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); 3194 3195 if (!mutex_ret) { 3196 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3197 } 3198} 3199 3200void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg) 3201{ 3202 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 3203 tsg->runlist_id), RUNLIST_ENABLED); 3204 3205} 3206 3207void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg) 3208{ 3209 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 3210 tsg->runlist_id), RUNLIST_DISABLED); 3211} 3212 3213int gk20a_fifo_enable_engine_activity(struct gk20a *g, 3214 struct fifo_engine_info_gk20a *eng_info) 3215{ 3216 nvgpu_log(g, gpu_dbg_info, "start"); 3217 3218 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 3219 eng_info->runlist_id), RUNLIST_ENABLED); 3220 return 0; 3221} 3222 3223int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) 3224{ 3225 unsigned int i; 3226 int err = 0, ret = 0; 3227 3228 for (i = 0; i < g->fifo.num_engines; i++) { 3229 u32 active_engine_id = g->fifo.active_engines_list[i]; 3230 err = gk20a_fifo_enable_engine_activity(g, 3231 &g->fifo.engine_info[active_engine_id]); 3232 if (err) { 3233 nvgpu_err(g, 3234 "failed to enable engine %d activity", active_engine_id); 3235 ret = err; 3236 } 3237 } 3238 3239 return ret; 3240} 3241 3242int gk20a_fifo_disable_engine_activity(struct gk20a *g, 3243 struct fifo_engine_info_gk20a *eng_info, 3244 bool wait_for_idle) 3245{ 3246 u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; 3247 u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; 3248 u32 engine_chid = FIFO_INVAL_CHANNEL_ID; 3249 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3250 int mutex_ret; 3251 struct channel_gk20a *ch = NULL; 3252 int err = 0; 3253 3254 nvgpu_log_fn(g, " "); 3255 3256 gr_stat = 3257 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); 3258 if (fifo_engine_status_engine_v(gr_stat) == 3259 fifo_engine_status_engine_busy_v() && !wait_for_idle) { 3260 return -EBUSY; 3261 } 3262 3263 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3264 3265 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 3266 eng_info->runlist_id), RUNLIST_DISABLED); 3267 3268 /* chid from pbdma status */ 3269 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id)); 3270 chan_stat = fifo_pbdma_status_chan_status_v(pbdma_stat); 3271 if (chan_stat == fifo_pbdma_status_chan_status_valid_v() || 3272 chan_stat == fifo_pbdma_status_chan_status_chsw_save_v()) { 3273 pbdma_chid = fifo_pbdma_status_id_v(pbdma_stat); 3274 } else if (chan_stat == fifo_pbdma_status_chan_status_chsw_load_v() || 3275 chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) { 3276 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); 3277 } 3278 3279 if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { 3280 ch = gk20a_channel_from_id(g, pbdma_chid); 3281 if (ch != NULL) { 3282 err = g->ops.fifo.preempt_channel(g, ch); 3283 gk20a_channel_put(ch); 3284 } 3285 if (err != 0) { 3286 goto clean_up; 3287 } 3288 } 3289 3290 /* chid from engine status */ 3291 eng_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id)); 3292 ctx_stat = fifo_engine_status_ctx_status_v(eng_stat); 3293 if (ctx_stat == fifo_engine_status_ctx_status_valid_v() || 3294 ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v()) { 3295 engine_chid = fifo_engine_status_id_v(eng_stat); 3296 } else if (ctx_stat == fifo_engine_status_ctx_status_ctxsw_load_v() || 3297 ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) { 3298 engine_chid = fifo_engine_status_next_id_v(eng_stat); 3299 } 3300 3301 if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { 3302 ch = gk20a_channel_from_id(g, engine_chid); 3303 if (ch != NULL) { 3304 err = g->ops.fifo.preempt_channel(g, ch); 3305 gk20a_channel_put(ch); 3306 } 3307 if (err != 0) { 3308 goto clean_up; 3309 } 3310 } 3311 3312clean_up: 3313 if (!mutex_ret) { 3314 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3315 } 3316 3317 if (err) { 3318 nvgpu_log_fn(g, "failed"); 3319 if (gk20a_fifo_enable_engine_activity(g, eng_info)) { 3320 nvgpu_err(g, 3321 "failed to enable gr engine activity"); 3322 } 3323 } else { 3324 nvgpu_log_fn(g, "done"); 3325 } 3326 return err; 3327} 3328 3329int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, 3330 bool wait_for_idle) 3331{ 3332 unsigned int i; 3333 int err = 0, ret = 0; 3334 u32 active_engine_id; 3335 3336 for (i = 0; i < g->fifo.num_engines; i++) { 3337 active_engine_id = g->fifo.active_engines_list[i]; 3338 err = gk20a_fifo_disable_engine_activity(g, 3339 &g->fifo.engine_info[active_engine_id], 3340 wait_for_idle); 3341 if (err) { 3342 nvgpu_err(g, "failed to disable engine %d activity", 3343 active_engine_id); 3344 ret = err; 3345 break; 3346 } 3347 } 3348 3349 if (err) { 3350 while (i-- != 0) { 3351 active_engine_id = g->fifo.active_engines_list[i]; 3352 err = gk20a_fifo_enable_engine_activity(g, 3353 &g->fifo.engine_info[active_engine_id]); 3354 if (err) { 3355 nvgpu_err(g, 3356 "failed to re-enable engine %d activity", 3357 active_engine_id); 3358 } 3359 } 3360 } 3361 3362 return ret; 3363} 3364 3365static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) 3366{ 3367 struct fifo_gk20a *f = &g->fifo; 3368 u32 engines = 0; 3369 unsigned int i; 3370 3371 for (i = 0; i < f->num_engines; i++) { 3372 u32 active_engine_id = g->fifo.active_engines_list[i]; 3373 u32 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id)); 3374 bool engine_busy = fifo_engine_status_engine_v(status) == 3375 fifo_engine_status_engine_busy_v(); 3376 3377 if (engine_busy && 3378 (f->engine_info[active_engine_id].runlist_id == runlist_id)) { 3379 engines |= BIT(active_engine_id); 3380 } 3381 } 3382 3383 if (engines) { 3384 gk20a_fifo_recover(g, engines, ~(u32)0, false, false, true, 3385 RC_TYPE_RUNLIST_UPDATE_TIMEOUT); 3386 } 3387} 3388 3389int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) 3390{ 3391 struct nvgpu_timeout timeout; 3392 unsigned long delay = GR_IDLE_CHECK_DEFAULT; 3393 int ret = -ETIMEDOUT; 3394 3395 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 3396 NVGPU_TIMER_CPU_TIMER); 3397 3398 do { 3399 if ((gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) & 3400 fifo_eng_runlist_pending_true_f()) == 0) { 3401 ret = 0; 3402 break; 3403 } 3404 3405 nvgpu_usleep_range(delay, delay * 2); 3406 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 3407 } while (!nvgpu_timeout_expired(&timeout)); 3408 3409 if (ret) { 3410 nvgpu_err(g, "runlist wait timeout: runlist id: %u", 3411 runlist_id); 3412 } 3413 3414 return ret; 3415} 3416 3417void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) 3418{ 3419 3420 u32 runlist_entry_0 = ram_rl_entry_id_f(tsg->tsgid) | 3421 ram_rl_entry_type_tsg_f() | 3422 ram_rl_entry_tsg_length_f(tsg->num_active_channels); 3423 3424 if (tsg->timeslice_timeout) { 3425 runlist_entry_0 |= 3426 ram_rl_entry_timeslice_scale_f(tsg->timeslice_scale) | 3427 ram_rl_entry_timeslice_timeout_f(tsg->timeslice_timeout); 3428 } else { 3429 runlist_entry_0 |= 3430 ram_rl_entry_timeslice_scale_f( 3431 NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE) | 3432 ram_rl_entry_timeslice_timeout_f( 3433 NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT); 3434 } 3435 3436 runlist[0] = runlist_entry_0; 3437 runlist[1] = 0; 3438 3439} 3440 3441u32 gk20a_fifo_default_timeslice_us(struct gk20a *g) 3442{ 3443 return (((u64)(NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT << 3444 NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE) * 3445 (u64)g->ptimer_src_freq) / 3446 (u64)PTIMER_REF_FREQ_HZ); 3447} 3448 3449void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist) 3450{ 3451 runlist[0] = ram_rl_entry_chid_f(ch->chid); 3452 runlist[1] = 0; 3453} 3454 3455/* recursively construct a runlist with interleaved bare channels and TSGs */ 3456u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, 3457 struct fifo_runlist_info_gk20a *runlist, 3458 u32 cur_level, 3459 u32 *runlist_entry, 3460 bool interleave_enabled, 3461 bool prev_empty, 3462 u32 *entries_left) 3463{ 3464 bool last_level = cur_level == NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH; 3465 struct channel_gk20a *ch; 3466 bool skip_next = false; 3467 u32 tsgid, count = 0; 3468 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); 3469 struct gk20a *g = f->g; 3470 3471 nvgpu_log_fn(g, " "); 3472 3473 /* for each TSG, T, on this level, insert all higher-level channels 3474 and TSGs before inserting T. */ 3475 for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { 3476 struct tsg_gk20a *tsg = &f->tsg[tsgid]; 3477 3478 if (tsg->interleave_level != cur_level) { 3479 continue; 3480 } 3481 3482 if (!last_level && !skip_next) { 3483 runlist_entry = gk20a_runlist_construct_locked(f, 3484 runlist, 3485 cur_level + 1, 3486 runlist_entry, 3487 interleave_enabled, 3488 false, 3489 entries_left); 3490 if (!interleave_enabled) { 3491 skip_next = true; 3492 } 3493 } 3494 3495 if (*entries_left == 0U) { 3496 return NULL; 3497 } 3498 3499 /* add TSG entry */ 3500 nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid); 3501 f->g->ops.fifo.get_tsg_runlist_entry(tsg, runlist_entry); 3502 nvgpu_log_info(g, "tsg runlist count %d runlist [0] %x [1] %x\n", 3503 count, runlist_entry[0], runlist_entry[1]); 3504 runlist_entry += runlist_entry_words; 3505 count++; 3506 (*entries_left)--; 3507 3508 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 3509 /* add runnable channels bound to this TSG */ 3510 nvgpu_list_for_each_entry(ch, &tsg->ch_list, 3511 channel_gk20a, ch_entry) { 3512 if (!test_bit((int)ch->chid, 3513 runlist->active_channels)) { 3514 continue; 3515 } 3516 3517 if (*entries_left == 0U) { 3518 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 3519 return NULL; 3520 } 3521 3522 nvgpu_log_info(g, "add channel %d to runlist", 3523 ch->chid); 3524 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); 3525 nvgpu_log_info(g, 3526 "run list count %d runlist [0] %x [1] %x\n", 3527 count, runlist_entry[0], runlist_entry[1]); 3528 count++; 3529 runlist_entry += runlist_entry_words; 3530 (*entries_left)--; 3531 } 3532 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 3533 } 3534 3535 /* append entries from higher level if this level is empty */ 3536 if (!count && !last_level) { 3537 runlist_entry = gk20a_runlist_construct_locked(f, 3538 runlist, 3539 cur_level + 1, 3540 runlist_entry, 3541 interleave_enabled, 3542 true, 3543 entries_left); 3544 } 3545 3546 /* 3547 * if previous and this level have entries, append 3548 * entries from higher level. 3549 * 3550 * ex. dropping from MEDIUM to LOW, need to insert HIGH 3551 */ 3552 if (interleave_enabled && count && !prev_empty && !last_level) { 3553 runlist_entry = gk20a_runlist_construct_locked(f, 3554 runlist, 3555 cur_level + 1, 3556 runlist_entry, 3557 interleave_enabled, 3558 false, 3559 entries_left); 3560 } 3561 return runlist_entry; 3562} 3563 3564int gk20a_fifo_set_runlist_interleave(struct gk20a *g, 3565 u32 id, 3566 u32 runlist_id, 3567 u32 new_level) 3568{ 3569 nvgpu_log_fn(g, " "); 3570 3571 g->fifo.tsg[id].interleave_level = new_level; 3572 3573 return 0; 3574} 3575 3576int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) 3577{ 3578 struct gk20a *g = tsg->g; 3579 3580 if (timeslice < g->min_timeslice_us || 3581 timeslice > g->max_timeslice_us) { 3582 return -EINVAL; 3583 } 3584 3585 gk20a_channel_get_timescale_from_timeslice(g, timeslice, 3586 &tsg->timeslice_timeout, &tsg->timeslice_scale); 3587 3588 tsg->timeslice_us = timeslice; 3589 3590 return g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); 3591} 3592 3593void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id, 3594 u32 count, u32 buffer_index) 3595{ 3596 struct fifo_runlist_info_gk20a *runlist = NULL; 3597 u64 runlist_iova; 3598 3599 runlist = &g->fifo.runlist_info[runlist_id]; 3600 runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]); 3601 3602 if (count != 0) { 3603 gk20a_writel(g, fifo_runlist_base_r(), 3604 fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) | 3605 nvgpu_aperture_mask(g, &runlist->mem[buffer_index], 3606 fifo_runlist_base_target_sys_mem_ncoh_f(), 3607 fifo_runlist_base_target_sys_mem_coh_f(), 3608 fifo_runlist_base_target_vid_mem_f())); 3609 } 3610 3611 gk20a_writel(g, fifo_runlist_r(), 3612 fifo_runlist_engine_f(runlist_id) | 3613 fifo_eng_runlist_length_f(count)); 3614} 3615 3616int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 3617 u32 chid, bool add, 3618 bool wait_for_finish) 3619{ 3620 int ret = 0; 3621 struct fifo_gk20a *f = &g->fifo; 3622 struct fifo_runlist_info_gk20a *runlist = NULL; 3623 u32 *runlist_entry_base = NULL; 3624 u64 runlist_iova; 3625 u32 new_buf; 3626 struct channel_gk20a *ch = NULL; 3627 struct tsg_gk20a *tsg = NULL; 3628 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); 3629 3630 runlist = &f->runlist_info[runlist_id]; 3631 3632 /* valid channel, add/remove it from active list. 3633 Otherwise, keep active list untouched for suspend/resume. */ 3634 if (chid != FIFO_INVAL_CHANNEL_ID) { 3635 ch = &f->channel[chid]; 3636 tsg = tsg_gk20a_from_ch(ch); 3637 3638 if (add) { 3639 if (test_and_set_bit(chid, 3640 runlist->active_channels) == 1) { 3641 return 0; 3642 } 3643 if (tsg && ++tsg->num_active_channels) { 3644 set_bit((int)f->channel[chid].tsgid, 3645 runlist->active_tsgs); 3646 } 3647 } else { 3648 if (test_and_clear_bit(chid, 3649 runlist->active_channels) == 0) { 3650 return 0; 3651 } 3652 if (tsg && --tsg->num_active_channels == 0) { 3653 clear_bit((int)f->channel[chid].tsgid, 3654 runlist->active_tsgs); 3655 } 3656 } 3657 } 3658 3659 new_buf = !runlist->cur_buffer; 3660 3661 runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[new_buf]); 3662 3663 nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx", 3664 runlist_id, (u64)runlist_iova); 3665 3666 if (!runlist_iova) { 3667 ret = -EINVAL; 3668 goto clean_up; 3669 } 3670 3671 runlist_entry_base = runlist->mem[new_buf].cpu_va; 3672 if (!runlist_entry_base) { 3673 ret = -ENOMEM; 3674 goto clean_up; 3675 } 3676 3677 if (chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ 3678 add /* resume to add all channels back */) { 3679 u32 max_entries = f->num_runlist_entries; 3680 u32 *runlist_end; 3681 3682 runlist_end = gk20a_runlist_construct_locked(f, 3683 runlist, 3684 0, 3685 runlist_entry_base, 3686 g->runlist_interleave, 3687 true, 3688 &max_entries); 3689 if (!runlist_end) { 3690 ret = -E2BIG; 3691 goto clean_up; 3692 } 3693 runlist->count = (runlist_end - runlist_entry_base) / 3694 runlist_entry_words; 3695 WARN_ON(runlist->count > f->num_runlist_entries); 3696 } else { 3697 /* suspend to remove all channels */ 3698 runlist->count = 0; 3699 } 3700 3701 g->ops.fifo.runlist_hw_submit(g, runlist_id, runlist->count, new_buf); 3702 3703 if (wait_for_finish) { 3704 ret = g->ops.fifo.runlist_wait_pending(g, runlist_id); 3705 3706 if (ret == -ETIMEDOUT) { 3707 nvgpu_err(g, "runlist %d update timeout", runlist_id); 3708 /* trigger runlist update timeout recovery */ 3709 return ret; 3710 3711 } else if (ret == -EINTR) { 3712 nvgpu_err(g, "runlist update interrupted"); 3713 } 3714 } 3715 3716 runlist->cur_buffer = new_buf; 3717 3718clean_up: 3719 return ret; 3720} 3721 3722int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid, 3723 bool add, bool wait_for_finish) 3724{ 3725 u32 ret = -EINVAL; 3726 u32 runlist_id = 0; 3727 u32 errcode; 3728 unsigned long ulong_runlist_ids = (unsigned long)runlist_ids; 3729 3730 if (!g) { 3731 goto end; 3732 } 3733 3734 ret = 0; 3735 for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) { 3736 /* Capture the last failure error code */ 3737 errcode = g->ops.fifo.update_runlist(g, runlist_id, chid, add, wait_for_finish); 3738 if (errcode) { 3739 nvgpu_err(g, 3740 "failed to update_runlist %d %d", runlist_id, errcode); 3741 ret = errcode; 3742 } 3743 } 3744end: 3745 return ret; 3746} 3747 3748/* trigger host preempt of GR pending load ctx if that ctx is not for ch */ 3749static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch, 3750 bool wait_preempt) 3751{ 3752 struct gk20a *g = ch->g; 3753 struct fifo_runlist_info_gk20a *runlist = 3754 &g->fifo.runlist_info[ch->runlist_id]; 3755 int ret = 0; 3756 u32 gr_eng_id = 0; 3757 u32 engstat = 0, ctxstat = 0, fecsstat0 = 0, fecsstat1 = 0; 3758 u32 preempt_id; 3759 u32 preempt_type = 0; 3760 3761 if (1 != gk20a_fifo_get_engine_ids( 3762 g, &gr_eng_id, 1, ENGINE_GR_GK20A)) { 3763 return ret; 3764 } 3765 if (!(runlist->eng_bitmask & (1 << gr_eng_id))) { 3766 return ret; 3767 } 3768 3769 if (wait_preempt && gk20a_readl(g, fifo_preempt_r()) & 3770 fifo_preempt_pending_true_f()) { 3771 return ret; 3772 } 3773 3774 fecsstat0 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); 3775 engstat = gk20a_readl(g, fifo_engine_status_r(gr_eng_id)); 3776 ctxstat = fifo_engine_status_ctx_status_v(engstat); 3777 if (ctxstat == fifo_engine_status_ctx_status_ctxsw_switch_v()) { 3778 /* host switching to next context, preempt that if needed */ 3779 preempt_id = fifo_engine_status_next_id_v(engstat); 3780 preempt_type = fifo_engine_status_next_id_type_v(engstat); 3781 } else { 3782 return ret; 3783 } 3784 if (preempt_id == ch->tsgid && preempt_type) { 3785 return ret; 3786 } 3787 fecsstat1 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); 3788 if (fecsstat0 != FECS_MAILBOX_0_ACK_RESTORE || 3789 fecsstat1 != FECS_MAILBOX_0_ACK_RESTORE) { 3790 /* preempt useless if FECS acked save and started restore */ 3791 return ret; 3792 } 3793 3794 gk20a_fifo_issue_preempt(g, preempt_id, preempt_type); 3795#ifdef TRACEPOINTS_ENABLED 3796 trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0, engstat, 3797 fecsstat1, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)), 3798 gk20a_readl(g, fifo_preempt_r())); 3799#endif 3800 if (wait_preempt) { 3801 g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type, false); 3802 } 3803#ifdef TRACEPOINTS_ENABLED 3804 trace_gk20a_reschedule_preempted_next(ch->chid); 3805#endif 3806 return ret; 3807} 3808 3809int gk20a_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next) 3810{ 3811 return nvgpu_fifo_reschedule_runlist(ch, preempt_next, true); 3812} 3813 3814/* trigger host to expire current timeslice and reschedule runlist from front */ 3815int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next, 3816 bool wait_preempt) 3817{ 3818 struct gk20a *g = ch->g; 3819 struct fifo_runlist_info_gk20a *runlist; 3820 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3821 u32 mutex_ret; 3822 int ret = 0; 3823 3824 runlist = &g->fifo.runlist_info[ch->runlist_id]; 3825 if (!nvgpu_mutex_tryacquire(&runlist->runlist_lock)) { 3826 return -EBUSY; 3827 } 3828 3829 mutex_ret = nvgpu_pmu_mutex_acquire( 3830 &g->pmu, PMU_MUTEX_ID_FIFO, &token); 3831 3832 g->ops.fifo.runlist_hw_submit( 3833 g, ch->runlist_id, runlist->count, runlist->cur_buffer); 3834 3835 if (preempt_next) { 3836 __locked_fifo_reschedule_preempt_next(ch, wait_preempt); 3837 } 3838 3839 gk20a_fifo_runlist_wait_pending(g, ch->runlist_id); 3840 3841 if (!mutex_ret) { 3842 nvgpu_pmu_mutex_release( 3843 &g->pmu, PMU_MUTEX_ID_FIFO, &token); 3844 } 3845 nvgpu_mutex_release(&runlist->runlist_lock); 3846 3847 return ret; 3848} 3849 3850/* add/remove a channel from runlist 3851 special cases below: runlist->active_channels will NOT be changed. 3852 (chid == ~0 && !add) means remove all active channels from runlist. 3853 (chid == ~0 && add) means restore all active channels on runlist. */ 3854int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, 3855 bool add, bool wait_for_finish) 3856{ 3857 struct fifo_runlist_info_gk20a *runlist = NULL; 3858 struct fifo_gk20a *f = &g->fifo; 3859 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3860 u32 mutex_ret; 3861 int ret = 0; 3862 3863 nvgpu_log_fn(g, " "); 3864 3865 runlist = &f->runlist_info[runlist_id]; 3866 3867 nvgpu_mutex_acquire(&runlist->runlist_lock); 3868 3869 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3870 3871 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add, 3872 wait_for_finish); 3873 3874 if (!mutex_ret) { 3875 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3876 } 3877 3878 nvgpu_mutex_release(&runlist->runlist_lock); 3879 3880 if (ret == -ETIMEDOUT) { 3881 gk20a_fifo_runlist_reset_engines(g, runlist_id); 3882 } 3883 3884 return ret; 3885} 3886 3887int gk20a_fifo_suspend(struct gk20a *g) 3888{ 3889 nvgpu_log_fn(g, " "); 3890 3891 /* stop bar1 snooping */ 3892 if (g->ops.mm.is_bar1_supported(g)) { 3893 gk20a_writel(g, fifo_bar1_base_r(), 3894 fifo_bar1_base_valid_false_f()); 3895 } 3896 3897 /* disable fifo intr */ 3898 gk20a_writel(g, fifo_intr_en_0_r(), 0); 3899 gk20a_writel(g, fifo_intr_en_1_r(), 0); 3900 3901 nvgpu_log_fn(g, "done"); 3902 return 0; 3903} 3904 3905bool gk20a_fifo_mmu_fault_pending(struct gk20a *g) 3906{ 3907 if (gk20a_readl(g, fifo_intr_0_r()) & 3908 fifo_intr_0_mmu_fault_pending_f()) { 3909 return true; 3910 } else { 3911 return false; 3912 } 3913} 3914 3915bool gk20a_fifo_is_engine_busy(struct gk20a *g) 3916{ 3917 u32 i, host_num_engines; 3918 3919 host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); 3920 3921 for (i = 0; i < host_num_engines; i++) { 3922 u32 status = gk20a_readl(g, fifo_engine_status_r(i)); 3923 if (fifo_engine_status_engine_v(status) == 3924 fifo_engine_status_engine_busy_v()) { 3925 return true; 3926 } 3927 } 3928 return false; 3929} 3930 3931int gk20a_fifo_wait_engine_idle(struct gk20a *g) 3932{ 3933 struct nvgpu_timeout timeout; 3934 unsigned long delay = GR_IDLE_CHECK_DEFAULT; 3935 int ret = -ETIMEDOUT; 3936 u32 i, host_num_engines; 3937 3938 nvgpu_log_fn(g, " "); 3939 3940 host_num_engines = 3941 nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); 3942 3943 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 3944 NVGPU_TIMER_CPU_TIMER); 3945 3946 for (i = 0; i < host_num_engines; i++) { 3947 do { 3948 u32 status = gk20a_readl(g, fifo_engine_status_r(i)); 3949 if (!fifo_engine_status_engine_v(status)) { 3950 ret = 0; 3951 break; 3952 } 3953 3954 nvgpu_usleep_range(delay, delay * 2); 3955 delay = min_t(unsigned long, 3956 delay << 1, GR_IDLE_CHECK_MAX); 3957 } while (!nvgpu_timeout_expired(&timeout)); 3958 3959 if (ret) { 3960 nvgpu_log_info(g, "cannot idle engine %u", i); 3961 break; 3962 } 3963 } 3964 3965 nvgpu_log_fn(g, "done"); 3966 3967 return ret; 3968} 3969 3970u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g) 3971{ 3972 return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f(); 3973} 3974 3975static const char * const ccsr_chan_status_str[] = { 3976 "idle", 3977 "pending", 3978 "pending_ctx_reload", 3979 "pending_acquire", 3980 "pending_acq_ctx_reload", 3981 "on_pbdma", 3982 "on_pbdma_and_eng", 3983 "on_eng", 3984 "on_eng_pending_acquire", 3985 "on_eng_pending", 3986 "on_pbdma_ctx_reload", 3987 "on_pbdma_and_eng_ctx_reload", 3988 "on_eng_ctx_reload", 3989 "on_eng_pending_ctx_reload", 3990 "on_eng_pending_acq_ctx_reload", 3991}; 3992 3993static const char * const pbdma_chan_eng_ctx_status_str[] = { 3994 "invalid", 3995 "valid", 3996 "NA", 3997 "NA", 3998 "NA", 3999 "load", 4000 "save", 4001 "switch", 4002}; 4003 4004static const char * const not_found_str[] = { 4005 "NOT FOUND" 4006}; 4007 4008const char *gk20a_decode_ccsr_chan_status(u32 index) 4009{ 4010 if (index >= ARRAY_SIZE(ccsr_chan_status_str)) { 4011 return not_found_str[0]; 4012 } else { 4013 return ccsr_chan_status_str[index]; 4014 } 4015} 4016 4017const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index) 4018{ 4019 if (index >= ARRAY_SIZE(pbdma_chan_eng_ctx_status_str)) { 4020 return not_found_str[0]; 4021 } else { 4022 return pbdma_chan_eng_ctx_status_str[index]; 4023 } 4024} 4025 4026bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid) 4027{ 4028 u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); 4029 4030 return ccsr_channel_next_v(channel) == ccsr_channel_next_true_v(); 4031} 4032 4033bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid) 4034{ 4035 u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); 4036 u32 status = ccsr_channel_status_v(channel); 4037 4038 return (status == ccsr_channel_status_pending_ctx_reload_v() || 4039 status == ccsr_channel_status_pending_acq_ctx_reload_v() || 4040 status == ccsr_channel_status_on_pbdma_ctx_reload_v() || 4041 status == ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v() || 4042 status == ccsr_channel_status_on_eng_ctx_reload_v() || 4043 status == ccsr_channel_status_on_eng_pending_ctx_reload_v() || 4044 status == ccsr_channel_status_on_eng_pending_acq_ctx_reload_v()); 4045} 4046 4047void gk20a_dump_channel_status_ramfc(struct gk20a *g, 4048 struct gk20a_debug_output *o, 4049 u32 chid, 4050 struct ch_state *ch_state) 4051{ 4052 u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); 4053 u32 status = ccsr_channel_status_v(channel); 4054 u32 syncpointa, syncpointb; 4055 u32 *inst_mem; 4056 struct channel_gk20a *c = g->fifo.channel + chid; 4057 struct nvgpu_semaphore_int *hw_sema = NULL; 4058 4059 if (c->hw_sema) { 4060 hw_sema = c->hw_sema; 4061 } 4062 4063 if (!ch_state) { 4064 return; 4065 } 4066 4067 inst_mem = &ch_state->inst_block[0]; 4068 4069 syncpointa = inst_mem[ram_fc_syncpointa_w()]; 4070 syncpointb = inst_mem[ram_fc_syncpointb_w()]; 4071 4072 gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", chid, 4073 g->name, 4074 ch_state->pid, 4075 ch_state->refs, 4076 ch_state->deterministic ? ", deterministic" : ""); 4077 gk20a_debug_output(o, "channel status: %s in use %s %s\n", 4078 ccsr_channel_enable_v(channel) ? "" : "not", 4079 gk20a_decode_ccsr_chan_status(status), 4080 ccsr_channel_busy_v(channel) ? "busy" : "not busy"); 4081 gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx " 4082 "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n" 4083 "SYNCPOINT %08x %08x SEMAPHORE %08x %08x %08x %08x\n", 4084 (u64)inst_mem[ram_fc_pb_top_level_get_w()] + 4085 ((u64)inst_mem[ram_fc_pb_top_level_get_hi_w()] << 32ULL), 4086 (u64)inst_mem[ram_fc_pb_put_w()] + 4087 ((u64)inst_mem[ram_fc_pb_put_hi_w()] << 32ULL), 4088 (u64)inst_mem[ram_fc_pb_get_w()] + 4089 ((u64)inst_mem[ram_fc_pb_get_hi_w()] << 32ULL), 4090 (u64)inst_mem[ram_fc_pb_fetch_w()] + 4091 ((u64)inst_mem[ram_fc_pb_fetch_hi_w()] << 32ULL), 4092 inst_mem[ram_fc_pb_header_w()], 4093 inst_mem[ram_fc_pb_count_w()], 4094 syncpointa, 4095 syncpointb, 4096 inst_mem[ram_fc_semaphorea_w()], 4097 inst_mem[ram_fc_semaphoreb_w()], 4098 inst_mem[ram_fc_semaphorec_w()], 4099 inst_mem[ram_fc_semaphored_w()]); 4100 if (hw_sema) { 4101 gk20a_debug_output(o, "SEMA STATE: value: 0x%08x " 4102 "next_val: 0x%08x addr: 0x%010llx\n", 4103 __nvgpu_semaphore_read(hw_sema), 4104 nvgpu_atomic_read(&hw_sema->next_value), 4105 nvgpu_hw_sema_addr(hw_sema)); 4106 } 4107 4108#ifdef CONFIG_TEGRA_GK20A_NVHOST 4109 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v()) 4110 && (pbdma_syncpointb_wait_switch_v(syncpointb) == 4111 pbdma_syncpointb_wait_switch_en_v())) 4112 gk20a_debug_output(o, "%s on syncpt %u (%s) val %u\n", 4113 (status == 3 || status == 8) ? "Waiting" : "Waited", 4114 pbdma_syncpointb_syncpt_index_v(syncpointb), 4115 nvgpu_nvhost_syncpt_get_name(g->nvhost_dev, 4116 pbdma_syncpointb_syncpt_index_v(syncpointb)), 4117 pbdma_syncpointa_payload_v(syncpointa)); 4118#endif 4119 4120 gk20a_debug_output(o, "\n"); 4121} 4122 4123void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, 4124 struct gk20a_debug_output *o) 4125{ 4126 struct fifo_gk20a *f = &g->fifo; 4127 u32 chid; 4128 struct ch_state **ch_state; 4129 4130 ch_state = nvgpu_kzalloc(g, sizeof(*ch_state) * f->num_channels); 4131 if (!ch_state) { 4132 gk20a_debug_output(o, "cannot alloc memory for channels\n"); 4133 return; 4134 } 4135 4136 for (chid = 0; chid < f->num_channels; chid++) { 4137 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); 4138 if (ch != NULL) { 4139 ch_state[chid] = 4140 nvgpu_kmalloc(g, sizeof(struct ch_state) + 4141 ram_in_alloc_size_v()); 4142 /* ref taken stays to below loop with 4143 * successful allocs */ 4144 if (!ch_state[chid]) { 4145 gk20a_channel_put(ch); 4146 } 4147 } 4148 } 4149 4150 for (chid = 0; chid < f->num_channels; chid++) { 4151 struct channel_gk20a *ch = &f->channel[chid]; 4152 if (!ch_state[chid]) { 4153 continue; 4154 } 4155 4156 ch_state[chid]->pid = ch->pid; 4157 ch_state[chid]->refs = nvgpu_atomic_read(&ch->ref_count); 4158 ch_state[chid]->deterministic = ch->deterministic; 4159 nvgpu_mem_rd_n(g, &ch->inst_block, 0, 4160 &ch_state[chid]->inst_block[0], 4161 ram_in_alloc_size_v()); 4162 gk20a_channel_put(ch); 4163 } 4164 for (chid = 0; chid < f->num_channels; chid++) { 4165 if (ch_state[chid]) { 4166 g->ops.fifo.dump_channel_status_ramfc(g, o, chid, 4167 ch_state[chid]); 4168 nvgpu_kfree(g, ch_state[chid]); 4169 } 4170 } 4171 nvgpu_kfree(g, ch_state); 4172} 4173 4174void gk20a_dump_pbdma_status(struct gk20a *g, 4175 struct gk20a_debug_output *o) 4176{ 4177 u32 i, host_num_pbdma; 4178 4179 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 4180 4181 for (i = 0; i < host_num_pbdma; i++) { 4182 u32 status = gk20a_readl(g, fifo_pbdma_status_r(i)); 4183 u32 chan_status = fifo_pbdma_status_chan_status_v(status); 4184 4185 gk20a_debug_output(o, "%s pbdma %d: ", g->name, i); 4186 gk20a_debug_output(o, 4187 "id: %d (%s), next_id: %d (%s) chan status: %s\n", 4188 fifo_pbdma_status_id_v(status), 4189 fifo_pbdma_status_id_type_v(status) ? 4190 "tsg" : "channel", 4191 fifo_pbdma_status_next_id_v(status), 4192 fifo_pbdma_status_next_id_type_v(status) ? 4193 "tsg" : "channel", 4194 gk20a_decode_pbdma_chan_eng_ctx_status(chan_status)); 4195 gk20a_debug_output(o, "PBDMA_PUT: %016llx PBDMA_GET: %016llx " 4196 "GP_PUT: %08x GP_GET: %08x " 4197 "FETCH: %08x HEADER: %08x\n" 4198 "HDR: %08x SHADOW0: %08x SHADOW1: %08x", 4199 (u64)gk20a_readl(g, pbdma_put_r(i)) + 4200 ((u64)gk20a_readl(g, pbdma_put_hi_r(i)) << 32ULL), 4201 (u64)gk20a_readl(g, pbdma_get_r(i)) + 4202 ((u64)gk20a_readl(g, pbdma_get_hi_r(i)) << 32ULL), 4203 gk20a_readl(g, pbdma_gp_put_r(i)), 4204 gk20a_readl(g, pbdma_gp_get_r(i)), 4205 gk20a_readl(g, pbdma_gp_fetch_r(i)), 4206 gk20a_readl(g, pbdma_pb_header_r(i)), 4207 gk20a_readl(g, pbdma_hdr_shadow_r(i)), 4208 gk20a_readl(g, pbdma_gp_shadow_0_r(i)), 4209 gk20a_readl(g, pbdma_gp_shadow_1_r(i))); 4210 } 4211 gk20a_debug_output(o, "\n"); 4212} 4213 4214void gk20a_dump_eng_status(struct gk20a *g, 4215 struct gk20a_debug_output *o) 4216{ 4217 u32 i, host_num_engines; 4218 4219 host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); 4220 4221 for (i = 0; i < host_num_engines; i++) { 4222 u32 status = gk20a_readl(g, fifo_engine_status_r(i)); 4223 u32 ctx_status = fifo_engine_status_ctx_status_v(status); 4224 4225 gk20a_debug_output(o, "%s eng %d: ", g->name, i); 4226 gk20a_debug_output(o, 4227 "id: %d (%s), next_id: %d (%s), ctx status: %s ", 4228 fifo_engine_status_id_v(status), 4229 fifo_engine_status_id_type_v(status) ? 4230 "tsg" : "channel", 4231 fifo_engine_status_next_id_v(status), 4232 fifo_engine_status_next_id_type_v(status) ? 4233 "tsg" : "channel", 4234 gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); 4235 4236 if (fifo_engine_status_faulted_v(status)) { 4237 gk20a_debug_output(o, "faulted "); 4238 } 4239 if (fifo_engine_status_engine_v(status)) { 4240 gk20a_debug_output(o, "busy "); 4241 } 4242 gk20a_debug_output(o, "\n"); 4243 } 4244 gk20a_debug_output(o, "\n"); 4245} 4246 4247void gk20a_fifo_enable_channel(struct channel_gk20a *ch) 4248{ 4249 gk20a_writel(ch->g, ccsr_channel_r(ch->chid), 4250 gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) | 4251 ccsr_channel_enable_set_true_f()); 4252} 4253 4254void gk20a_fifo_disable_channel(struct channel_gk20a *ch) 4255{ 4256 gk20a_writel(ch->g, ccsr_channel_r(ch->chid), 4257 gk20a_readl(ch->g, 4258 ccsr_channel_r(ch->chid)) | 4259 ccsr_channel_enable_clr_true_f()); 4260} 4261 4262void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a) 4263{ 4264 struct gk20a *g = ch_gk20a->g; 4265 4266 nvgpu_log_fn(g, " "); 4267 4268 if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) { 4269 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), 4270 ccsr_channel_inst_ptr_f(0) | 4271 ccsr_channel_inst_bind_false_f()); 4272 } 4273} 4274 4275static int gk20a_fifo_commit_userd(struct channel_gk20a *c) 4276{ 4277 u32 addr_lo; 4278 u32 addr_hi; 4279 struct gk20a *g = c->g; 4280 4281 nvgpu_log_fn(g, " "); 4282 4283 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); 4284 addr_hi = u64_hi32(c->userd_iova); 4285 4286 nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", 4287 c->chid, (u64)c->userd_iova); 4288 4289 nvgpu_mem_wr32(g, &c->inst_block, 4290 ram_in_ramfc_w() + ram_fc_userd_w(), 4291 nvgpu_aperture_mask(g, &g->fifo.userd, 4292 pbdma_userd_target_sys_mem_ncoh_f(), 4293 pbdma_userd_target_sys_mem_coh_f(), 4294 pbdma_userd_target_vid_mem_f()) | 4295 pbdma_userd_addr_f(addr_lo)); 4296 4297 nvgpu_mem_wr32(g, &c->inst_block, 4298 ram_in_ramfc_w() + ram_fc_userd_hi_w(), 4299 pbdma_userd_hi_addr_f(addr_hi)); 4300 4301 return 0; 4302} 4303 4304int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, 4305 u64 gpfifo_base, u32 gpfifo_entries, 4306 unsigned long timeout, 4307 u32 flags) 4308{ 4309 struct gk20a *g = c->g; 4310 struct nvgpu_mem *mem = &c->inst_block; 4311 4312 nvgpu_log_fn(g, " "); 4313 4314 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); 4315 4316 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(), 4317 pbdma_gp_base_offset_f( 4318 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 4319 4320 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 4321 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 4322 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 4323 4324 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(), 4325 c->g->ops.fifo.get_pbdma_signature(c->g)); 4326 4327 nvgpu_mem_wr32(g, mem, ram_fc_formats_w(), 4328 pbdma_formats_gp_fermi0_f() | 4329 pbdma_formats_pb_fermi1_f() | 4330 pbdma_formats_mp_fermi0_f()); 4331 4332 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(), 4333 pbdma_pb_header_priv_user_f() | 4334 pbdma_pb_header_method_zero_f() | 4335 pbdma_pb_header_subchannel_zero_f() | 4336 pbdma_pb_header_level_main_f() | 4337 pbdma_pb_header_first_true_f() | 4338 pbdma_pb_header_type_inc_f()); 4339 4340 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(), 4341 pbdma_subdevice_id_f(1) | 4342 pbdma_subdevice_status_active_f() | 4343 pbdma_subdevice_channel_dma_enable_f()); 4344 4345 nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); 4346 4347 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(), 4348 g->ops.fifo.pbdma_acquire_val(timeout)); 4349 4350 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 4351 fifo_runlist_timeslice_timeout_128_f() | 4352 fifo_runlist_timeslice_timescale_3_f() | 4353 fifo_runlist_timeslice_enable_true_f()); 4354 4355 nvgpu_mem_wr32(g, mem, ram_fc_pb_timeslice_w(), 4356 fifo_pb_timeslice_timeout_16_f() | 4357 fifo_pb_timeslice_timescale_0_f() | 4358 fifo_pb_timeslice_enable_true_f()); 4359 4360 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid)); 4361 4362 if (c->is_privileged_channel) { 4363 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 4364 } 4365 4366 return gk20a_fifo_commit_userd(c); 4367} 4368 4369void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c) 4370{ 4371 struct gk20a *g = c->g; 4372 struct nvgpu_mem *mem = &c->inst_block; 4373 4374 nvgpu_log_info(g, "channel %d : set ramfc privileged_channel", c->chid); 4375 4376 /* Enable HCE priv mode for phys mode transfer */ 4377 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 4378 pbdma_hce_ctrl_hce_priv_mode_yes_f()); 4379} 4380 4381int gk20a_fifo_setup_userd(struct channel_gk20a *c) 4382{ 4383 struct gk20a *g = c->g; 4384 struct nvgpu_mem *mem; 4385 u32 offset; 4386 4387 nvgpu_log_fn(g, " "); 4388 4389 if (nvgpu_mem_is_valid(&c->usermode_userd)) { 4390 mem = &c->usermode_userd; 4391 offset = 0; 4392 } else { 4393 mem = &g->fifo.userd; 4394 offset = c->chid * g->fifo.userd_entry_size / sizeof(u32); 4395 } 4396 4397 nvgpu_mem_wr32(g, mem, offset + ram_userd_put_w(), 0); 4398 nvgpu_mem_wr32(g, mem, offset + ram_userd_get_w(), 0); 4399 nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0); 4400 nvgpu_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0); 4401 nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0); 4402 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0); 4403 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0); 4404 nvgpu_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0); 4405 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0); 4406 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0); 4407 4408 return 0; 4409} 4410 4411int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) 4412{ 4413 int err; 4414 4415 nvgpu_log_fn(g, " "); 4416 4417 err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); 4418 if (err) { 4419 return err; 4420 } 4421 4422 nvgpu_log_info(g, "channel %d inst block physical addr: 0x%16llx", 4423 ch->chid, nvgpu_inst_block_addr(g, &ch->inst_block)); 4424 4425 nvgpu_log_fn(g, "done"); 4426 return 0; 4427} 4428 4429void gk20a_fifo_free_inst(struct gk20a *g, struct channel_gk20a *ch) 4430{ 4431 nvgpu_free_inst_block(g, &ch->inst_block); 4432} 4433 4434u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) 4435{ 4436 return gk20a_bar1_readl(g, 4437 c->userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w()); 4438} 4439 4440u64 gk20a_fifo_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) 4441{ 4442 u32 lo = gk20a_bar1_readl(g, 4443 c->userd_gpu_va + sizeof(u32) * ram_userd_get_w()); 4444 u32 hi = gk20a_bar1_readl(g, 4445 c->userd_gpu_va + sizeof(u32) * ram_userd_get_hi_w()); 4446 4447 return ((u64)hi << 32) | lo; 4448} 4449 4450void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) 4451{ 4452 gk20a_bar1_writel(g, 4453 c->userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w(), 4454 c->gpfifo.put); 4455} 4456 4457u32 gk20a_fifo_pbdma_acquire_val(u64 timeout) 4458{ 4459 u32 val, exp, man; 4460 unsigned int val_len; 4461 4462 val = pbdma_acquire_retry_man_2_f() | 4463 pbdma_acquire_retry_exp_2_f(); 4464 4465 if (!timeout) { 4466 return val; 4467 } 4468 4469 timeout *= 80UL; 4470 do_div(timeout, 100); /* set acquire timeout to 80% of channel wdt */ 4471 timeout *= 1000000UL; /* ms -> ns */ 4472 do_div(timeout, 1024); /* in unit of 1024ns */ 4473 val_len = fls(timeout >> 32) + 32; 4474 if (val_len == 32) { 4475 val_len = fls(timeout); 4476 } 4477 if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ 4478 exp = pbdma_acquire_timeout_exp_max_v(); 4479 man = pbdma_acquire_timeout_man_max_v(); 4480 } else if (val_len > 16) { 4481 exp = val_len - 16; 4482 man = timeout >> exp; 4483 } else { 4484 exp = 0; 4485 man = timeout; 4486 } 4487 4488 val |= pbdma_acquire_timeout_exp_f(exp) | 4489 pbdma_acquire_timeout_man_f(man) | 4490 pbdma_acquire_timeout_en_enable_f(); 4491 4492 return val; 4493} 4494 4495const char *gk20a_fifo_interleave_level_name(u32 interleave_level) 4496{ 4497 switch (interleave_level) { 4498 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: 4499 return "LOW"; 4500 4501 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: 4502 return "MEDIUM"; 4503 4504 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: 4505 return "HIGH"; 4506 4507 default: 4508 return "?"; 4509 } 4510} 4511 4512u32 gk20a_fifo_get_sema_wait_cmd_size(void) 4513{ 4514 return 8; 4515} 4516 4517u32 gk20a_fifo_get_sema_incr_cmd_size(void) 4518{ 4519 return 10; 4520} 4521 4522void gk20a_fifo_add_sema_cmd(struct gk20a *g, 4523 struct nvgpu_semaphore *s, u64 sema_va, 4524 struct priv_cmd_entry *cmd, 4525 u32 off, bool acquire, bool wfi) 4526{ 4527 nvgpu_log_fn(g, " "); 4528 4529 /* semaphore_a */ 4530 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); 4531 /* offset_upper */ 4532 nvgpu_mem_wr32(g, cmd->mem, off++, (sema_va >> 32) & 0xff); 4533 /* semaphore_b */ 4534 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005); 4535 /* offset */ 4536 nvgpu_mem_wr32(g, cmd->mem, off++, sema_va & 0xffffffff); 4537 4538 if (acquire) { 4539 /* semaphore_c */ 4540 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); 4541 /* payload */ 4542 nvgpu_mem_wr32(g, cmd->mem, off++, 4543 nvgpu_semaphore_get_value(s)); 4544 /* semaphore_d */ 4545 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); 4546 /* operation: acq_geq, switch_en */ 4547 nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12)); 4548 } else { 4549 /* semaphore_c */ 4550 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); 4551 /* payload */ 4552 nvgpu_mem_wr32(g, cmd->mem, off++, 4553 nvgpu_semaphore_get_value(s)); 4554 /* semaphore_d */ 4555 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); 4556 /* operation: release, wfi */ 4557 nvgpu_mem_wr32(g, cmd->mem, off++, 4558 0x2 | ((wfi ? 0x0 : 0x1) << 20)); 4559 /* non_stall_int */ 4560 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010008); 4561 /* ignored */ 4562 nvgpu_mem_wr32(g, cmd->mem, off++, 0); 4563 } 4564} 4565 4566#ifdef CONFIG_TEGRA_GK20A_NVHOST 4567void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, 4568 struct priv_cmd_entry *cmd, u32 off, 4569 u32 id, u32 thresh, u64 gpu_va) 4570{ 4571 nvgpu_log_fn(g, " "); 4572 4573 off = cmd->off + off; 4574 /* syncpoint_a */ 4575 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C); 4576 /* payload */ 4577 nvgpu_mem_wr32(g, cmd->mem, off++, thresh); 4578 /* syncpoint_b */ 4579 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); 4580 /* syncpt_id, switch_en, wait */ 4581 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10); 4582} 4583 4584u32 gk20a_fifo_get_syncpt_wait_cmd_size(void) 4585{ 4586 return 4; 4587} 4588 4589u32 gk20a_fifo_get_syncpt_incr_per_release(void) 4590{ 4591 return 2; 4592} 4593 4594void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, 4595 bool wfi_cmd, struct priv_cmd_entry *cmd, 4596 u32 id, u64 gpu_va) 4597{ 4598 u32 off = cmd->off; 4599 4600 nvgpu_log_fn(g, " "); 4601 if (wfi_cmd) { 4602 /* wfi */ 4603 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); 4604 /* handle, ignored */ 4605 nvgpu_mem_wr32(g, cmd->mem, off++, 0x00000000); 4606 } 4607 /* syncpoint_a */ 4608 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C); 4609 /* payload, ignored */ 4610 nvgpu_mem_wr32(g, cmd->mem, off++, 0); 4611 /* syncpoint_b */ 4612 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); 4613 /* syncpt_id, incr */ 4614 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1); 4615 /* syncpoint_b */ 4616 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); 4617 /* syncpt_id, incr */ 4618 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1); 4619 4620} 4621 4622u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd) 4623{ 4624 if (wfi_cmd) 4625 return 8; 4626 else 4627 return 6; 4628} 4629 4630void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c, 4631 struct nvgpu_mem *syncpt_buf) 4632{ 4633 4634} 4635 4636int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c, 4637 u32 syncpt_id, struct nvgpu_mem *syncpt_buf) 4638{ 4639 return 0; 4640} 4641#endif
diff --git a/include/gk20a/fifo_gk20a.h b/include/gk20a/fifo_gk20a.h
deleted file mode 100644
index 078236d..0000000
--- a/include/gk20a/fifo_gk20a.h
+++ /dev/null
@@ -1,472 +0,0 @@ 1/* 2 * GK20A graphics fifo (gr host) 3 * 4 * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef FIFO_GK20A_H 25#define FIFO_GK20A_H 26 27#include <nvgpu/kref.h> 28 29struct gk20a_debug_output; 30struct mmu_fault_info; 31struct nvgpu_semaphore; 32struct channel_gk20a; 33struct tsg_gk20a; 34 35enum { 36 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW = 0, 37 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM, 38 NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH, 39 NVGPU_FIFO_RUNLIST_INTERLEAVE_NUM_LEVELS, 40}; 41 42#define MAX_RUNLIST_BUFFERS 2 43 44#define FIFO_INVAL_ENGINE_ID ((u32)~0) 45#define FIFO_INVAL_CHANNEL_ID ((u32)~0) 46#define FIFO_INVAL_TSG_ID ((u32)~0) 47#define FIFO_INVAL_RUNLIST_ID ((u32)~0) 48 49#define ID_TYPE_CHANNEL 0 50#define ID_TYPE_TSG 1 51#define ID_TYPE_UNKNOWN ((u32)~0) 52 53#define RC_YES 1 54#define RC_NO 0 55 56#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000 57 58#define RC_TYPE_NO_RC 0 59#define RC_TYPE_MMU_FAULT 1 60#define RC_TYPE_PBDMA_FAULT 2 61#define RC_TYPE_GR_FAULT 3 62#define RC_TYPE_PREEMPT_TIMEOUT 4 63#define RC_TYPE_CTXSW_TIMEOUT 5 64#define RC_TYPE_RUNLIST_UPDATE_TIMEOUT 6 65#define RC_TYPE_FORCE_RESET 7 66#define RC_TYPE_SCHED_ERR 8 67 68#define NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT 128UL 69#define NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE 3UL 70 71/* 72 * Number of entries in the kickoff latency buffer, used to calculate 73 * the profiling and histogram. This number is calculated to be statistically 74 * significative on a histogram on a 5% step 75 */ 76#ifdef CONFIG_DEBUG_FS 77#define FIFO_PROFILING_ENTRIES 16384 78#endif 79 80#define RUNLIST_DISABLED 0 81#define RUNLIST_ENABLED 1 82 83/* generally corresponds to the "pbdma" engine */ 84 85struct fifo_runlist_info_gk20a { 86 unsigned long *active_channels; 87 unsigned long *active_tsgs; 88 /* Each engine has its own SW and HW runlist buffer.*/ 89 struct nvgpu_mem mem[MAX_RUNLIST_BUFFERS]; 90 u32 cur_buffer; 91 u32 total_entries; 92 u32 pbdma_bitmask; /* pbdmas supported for this runlist*/ 93 u32 eng_bitmask; /* engines using this runlist */ 94 u32 reset_eng_bitmask; /* engines to be reset during recovery */ 95 u32 count; /* cached runlist_hw_submit parameter */ 96 bool stopped; 97 bool support_tsg; 98 /* protect ch/tsg/runlist preempt & runlist update */ 99 struct nvgpu_mutex runlist_lock; 100}; 101 102enum { 103 ENGINE_GR_GK20A = 0U, 104 ENGINE_GRCE_GK20A = 1U, 105 ENGINE_ASYNC_CE_GK20A = 2U, 106 ENGINE_INVAL_GK20A = 3U, 107}; 108 109struct fifo_pbdma_exception_info_gk20a { 110 u32 status_r; /* raw register value from hardware */ 111 u32 id, next_id; 112 u32 chan_status_v; /* raw value from hardware */ 113 bool id_is_chid, next_id_is_chid; 114 bool chsw_in_progress; 115}; 116 117struct fifo_engine_exception_info_gk20a { 118 u32 status_r; /* raw register value from hardware */ 119 u32 id, next_id; 120 u32 ctx_status_v; /* raw value from hardware */ 121 bool id_is_chid, next_id_is_chid; 122 bool faulted, idle, ctxsw_in_progress; 123}; 124 125struct fifo_engine_info_gk20a { 126 u32 engine_id; 127 u32 runlist_id; 128 u32 intr_mask; 129 u32 reset_mask; 130 u32 pbdma_id; 131 u32 inst_id; 132 u32 pri_base; 133 u32 fault_id; 134 u32 engine_enum; 135 struct fifo_pbdma_exception_info_gk20a pbdma_exception_info; 136 struct fifo_engine_exception_info_gk20a engine_exception_info; 137}; 138 139enum { 140 PROFILE_IOCTL_ENTRY = 0U, 141 PROFILE_ENTRY, 142 PROFILE_JOB_TRACKING, 143 PROFILE_APPEND, 144 PROFILE_END, 145 PROFILE_IOCTL_EXIT, 146 PROFILE_MAX 147}; 148 149struct fifo_profile_gk20a { 150 u64 timestamp[PROFILE_MAX]; 151}; 152 153struct fifo_gk20a { 154 struct gk20a *g; 155 unsigned int num_channels; 156 unsigned int runlist_entry_size; 157 unsigned int num_runlist_entries; 158 159 unsigned int num_pbdma; 160 u32 *pbdma_map; 161 162 struct fifo_engine_info_gk20a *engine_info; 163 u32 max_engines; 164 u32 num_engines; 165 u32 *active_engines_list; 166 167 struct fifo_runlist_info_gk20a *runlist_info; 168 u32 max_runlists; 169#ifdef CONFIG_DEBUG_FS 170 struct { 171 struct fifo_profile_gk20a *data; 172 nvgpu_atomic_t get; 173 bool enabled; 174 u64 *sorted; 175 struct nvgpu_ref ref; 176 struct nvgpu_mutex lock; 177 } profile; 178#endif 179 struct nvgpu_mem userd; 180 u32 userd_entry_size; 181 182 unsigned int used_channels; 183 struct channel_gk20a *channel; 184 /* zero-kref'd channels here */ 185 struct nvgpu_list_node free_chs; 186 struct nvgpu_mutex free_chs_mutex; 187 struct nvgpu_mutex engines_reset_mutex; 188 189 struct tsg_gk20a *tsg; 190 struct nvgpu_mutex tsg_inuse_mutex; 191 192 void (*remove_support)(struct fifo_gk20a *); 193 bool sw_ready; 194 struct { 195 /* share info between isrs and non-isr code */ 196 struct { 197 struct nvgpu_mutex mutex; 198 } isr; 199 struct { 200 u32 device_fatal_0; 201 u32 channel_fatal_0; 202 u32 restartable_0; 203 } pbdma; 204 struct { 205 206 } engine; 207 208 209 } intr; 210 211 unsigned long deferred_fault_engines; 212 bool deferred_reset_pending; 213 struct nvgpu_mutex deferred_reset_mutex; 214 215 u32 max_subctx_count; 216 u32 channel_base; 217}; 218 219struct ch_state { 220 int pid; 221 int refs; 222 bool deterministic; 223 u32 inst_block[0]; 224}; 225 226int gk20a_init_fifo_support(struct gk20a *g); 227 228int gk20a_init_fifo_setup_hw(struct gk20a *g); 229 230void gk20a_fifo_isr(struct gk20a *g); 231u32 gk20a_fifo_nonstall_isr(struct gk20a *g); 232 233int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); 234int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); 235int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch); 236 237int gk20a_fifo_enable_engine_activity(struct gk20a *g, 238 struct fifo_engine_info_gk20a *eng_info); 239int gk20a_fifo_enable_all_engine_activity(struct gk20a *g); 240int gk20a_fifo_disable_engine_activity(struct gk20a *g, 241 struct fifo_engine_info_gk20a *eng_info, 242 bool wait_for_idle); 243int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, 244 bool wait_for_idle); 245void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg); 246void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg); 247 248u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid); 249 250int gk20a_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next); 251int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next, 252 bool wait_preempt); 253 254int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid, 255 bool add, bool wait_for_finish); 256 257int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 258 u32 chid, bool add, 259 bool wait_for_finish); 260int gk20a_fifo_suspend(struct gk20a *g); 261 262bool gk20a_fifo_mmu_fault_pending(struct gk20a *g); 263 264void gk20a_fifo_recover(struct gk20a *g, 265 u32 engine_ids, /* if zero, will be queried from HW */ 266 u32 hw_id, /* if ~0, will be queried from HW */ 267 bool id_is_tsg, /* ignored if hw_id == ~0 */ 268 bool id_is_known, bool verbose, int rc_type); 269void gk20a_fifo_recover_ch(struct gk20a *g, struct channel_gk20a *ch, 270 bool verbose, u32 rc_type); 271void gk20a_fifo_recover_tsg(struct gk20a *g, struct tsg_gk20a *tsg, 272 bool verbose, u32 rc_type); 273int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, 274 u32 err_code, bool verbose); 275void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id); 276int gk20a_init_fifo_reset_enable_hw(struct gk20a *g); 277int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch); 278 279void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g, 280 unsigned long fault_id); 281int gk20a_fifo_wait_engine_idle(struct gk20a *g); 282bool gk20a_fifo_is_engine_busy(struct gk20a *g); 283u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g); 284u32 gk20a_fifo_act_eng_interrupt_mask(struct gk20a *g, u32 act_eng_id); 285u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g); 286u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g, 287 int *__id, bool *__is_tsg); 288void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, 289 struct tsg_gk20a *tsg); 290void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt); 291void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, 292 struct channel_gk20a *refch); 293bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg); 294bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch); 295 296void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg); 297int gk20a_fifo_set_runlist_interleave(struct gk20a *g, 298 u32 id, 299 u32 runlist_id, 300 u32 new_level); 301int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice); 302 303const char *gk20a_fifo_interleave_level_name(u32 interleave_level); 304 305int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, 306 u32 *inst_id); 307 308u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[], 309 u32 engine_id_sz, u32 engine_enum); 310 311void gk20a_fifo_delete_runlist(struct fifo_gk20a *f); 312 313struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, 314 u32 engine_id); 315 316bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id); 317 318u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g); 319 320int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch); 321 322u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g); 323 324u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g); 325 326u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g); 327 328bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id); 329 330int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid, 331 bool add, bool wait_for_finish); 332 333int gk20a_fifo_init_engine_info(struct fifo_gk20a *f); 334 335void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist); 336void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist); 337void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, 338 u32 runlist_state); 339 340u32 gk20a_fifo_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); 341void gk20a_fifo_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); 342u64 gk20a_fifo_userd_pb_get(struct gk20a *g, struct channel_gk20a *c); 343 344bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid); 345#ifdef CONFIG_DEBUG_FS 346struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g); 347void gk20a_fifo_profile_release(struct gk20a *g, 348 struct fifo_profile_gk20a *profile); 349void gk20a_fifo_profile_snapshot(struct fifo_profile_gk20a *profile, int idx); 350#else 351static inline struct fifo_profile_gk20a * 352gk20a_fifo_profile_acquire(struct gk20a *g) 353{ 354 return NULL; 355} 356static inline void gk20a_fifo_profile_release(struct gk20a *g, 357 struct fifo_profile_gk20a *profile) 358{ 359} 360static inline void gk20a_fifo_profile_snapshot( 361 struct fifo_profile_gk20a *profile, int idx) 362{ 363} 364#endif 365 366void gk20a_dump_channel_status_ramfc(struct gk20a *g, 367 struct gk20a_debug_output *o, 368 u32 chid, 369 struct ch_state *ch_state); 370void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, 371 struct gk20a_debug_output *o); 372void gk20a_dump_pbdma_status(struct gk20a *g, 373 struct gk20a_debug_output *o); 374void gk20a_dump_eng_status(struct gk20a *g, 375 struct gk20a_debug_output *o); 376const char *gk20a_decode_ccsr_chan_status(u32 index); 377const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index); 378void gk20a_fifo_enable_channel(struct channel_gk20a *ch); 379void gk20a_fifo_disable_channel(struct channel_gk20a *ch); 380 381bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid); 382bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid); 383int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch); 384 385struct channel_gk20a *gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr); 386void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a); 387 388u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g); 389 390int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, 391 unsigned int id_type, bool preempt_retries_left); 392int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg, 393 bool preempt_retries_left); 394void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg); 395void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch); 396int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, 397 u64 gpfifo_base, u32 gpfifo_entries, 398 unsigned long timeout, u32 flags); 399void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c); 400int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch); 401void gk20a_fifo_free_inst(struct gk20a *g, struct channel_gk20a *ch); 402int gk20a_fifo_setup_userd(struct channel_gk20a *c); 403u32 gk20a_fifo_pbdma_acquire_val(u64 timeout); 404 405 406u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, 407 struct fifo_runlist_info_gk20a *runlist, 408 u32 cur_level, 409 u32 *runlist_entry, 410 bool interleave_enabled, 411 bool prev_empty, 412 u32 *entries_left); 413void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id, 414 u32 count, u32 buffer_index); 415int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id); 416int gk20a_init_fifo_setup_sw_common(struct gk20a *g); 417int gk20a_init_fifo_setup_sw(struct gk20a *g); 418void gk20a_fifo_handle_runlist_event(struct gk20a *g); 419bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id, 420 u32 engine_subid, bool fake_fault); 421 422void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, 423 u32 hw_id, unsigned int id_type, unsigned int rc_type, 424 struct mmu_fault_info *mmfault); 425 426bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch, 427 bool *verbose, u32 *ms); 428bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, 429 bool *verbose, u32 *ms); 430void gk20a_fifo_teardown_mask_intr(struct gk20a *g); 431void gk20a_fifo_teardown_unmask_intr(struct gk20a *g); 432bool gk20a_fifo_handle_sched_error(struct gk20a *g); 433 434void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id, 435 int pbdma_method_index); 436unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, 437 u32 pbdma_intr_0, u32 *handled, u32 *error_notifier); 438unsigned int gk20a_fifo_handle_pbdma_intr_1(struct gk20a *g, u32 pbdma_id, 439 u32 pbdma_intr_1, u32 *handled, u32 *error_notifier); 440u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f, 441 u32 pbdma_id, unsigned int rc); 442 443u32 gk20a_fifo_default_timeslice_us(struct gk20a *g); 444 445#ifdef CONFIG_TEGRA_GK20A_NVHOST 446void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, 447 struct priv_cmd_entry *cmd, u32 off, 448 u32 id, u32 thresh, u64 gpu_va); 449u32 gk20a_fifo_get_syncpt_wait_cmd_size(void); 450u32 gk20a_fifo_get_syncpt_incr_per_release(void); 451void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, 452 bool wfi_cmd, struct priv_cmd_entry *cmd, 453 u32 id, u64 gpu_va); 454u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd); 455void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c, 456 struct nvgpu_mem *syncpt_buf); 457int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c, 458 u32 syncpt_id, struct nvgpu_mem *syncpt_buf); 459#endif 460 461void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, 462 struct mmu_fault_info *mmfault); 463void gk20a_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault); 464void gk20a_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault); 465void gk20a_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault); 466u32 gk20a_fifo_get_sema_wait_cmd_size(void); 467u32 gk20a_fifo_get_sema_incr_cmd_size(void); 468void gk20a_fifo_add_sema_cmd(struct gk20a *g, 469 struct nvgpu_semaphore *s, u64 sema_va, 470 struct priv_cmd_entry *cmd, 471 u32 off, bool acquire, bool wfi); 472#endif /* FIFO_GK20A_H */
diff --git a/include/gk20a/flcn_gk20a.c b/include/gk20a/flcn_gk20a.c
deleted file mode 100644
index fdcaef9..0000000
--- a/include/gk20a/flcn_gk20a.c
+++ /dev/null
@@ -1,759 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#include <nvgpu/falcon.h> 23#include <nvgpu/pmu.h> 24#include <nvgpu/io.h> 25 26#include "gk20a/gk20a.h" 27#include "gk20a/flcn_gk20a.h" 28 29#include <nvgpu/hw/gm20b/hw_falcon_gm20b.h> 30 31static int gk20a_flcn_reset(struct nvgpu_falcon *flcn) 32{ 33 struct gk20a *g = flcn->g; 34 u32 base_addr = flcn->flcn_base; 35 u32 unit_status = 0; 36 int status = 0; 37 38 if (flcn->flcn_engine_dep_ops.reset_eng) { 39 /* falcon & engine reset */ 40 status = flcn->flcn_engine_dep_ops.reset_eng(g); 41 } else { 42 /* do falcon CPU hard reset */ 43 unit_status = gk20a_readl(g, base_addr + 44 falcon_falcon_cpuctl_r()); 45 gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(), 46 (unit_status | falcon_falcon_cpuctl_hreset_f(1))); 47 } 48 49 return status; 50} 51 52static bool gk20a_flcn_clear_halt_interrupt_status(struct nvgpu_falcon *flcn) 53{ 54 struct gk20a *g = flcn->g; 55 u32 base_addr = flcn->flcn_base; 56 u32 data = 0; 57 bool status = false; 58 59 gk20a_writel(g, base_addr + falcon_falcon_irqsclr_r(), 60 gk20a_readl(g, base_addr + falcon_falcon_irqsclr_r()) | 61 (0x10)); 62 data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r())); 63 64 if ((data & falcon_falcon_irqstat_halt_true_f()) != 65 falcon_falcon_irqstat_halt_true_f()) { 66 /*halt irq is clear*/ 67 status = true; 68 } 69 70 return status; 71} 72 73static void gk20a_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable) 74{ 75 struct gk20a *g = flcn->g; 76 u32 base_addr = flcn->flcn_base; 77 78 if (!flcn->is_interrupt_enabled) { 79 nvgpu_warn(g, "Interrupt not supported on flcn 0x%x ", 80 flcn->flcn_id); 81 /* Keep interrupt disabled */ 82 enable = false; 83 } 84 85 if (enable) { 86 gk20a_writel(g, base_addr + falcon_falcon_irqmset_r(), 87 flcn->intr_mask); 88 gk20a_writel(g, base_addr + falcon_falcon_irqdest_r(), 89 flcn->intr_dest); 90 } else { 91 gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(), 92 0xffffffff); 93 } 94} 95 96static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn) 97{ 98 struct gk20a *g = flcn->g; 99 u32 base_addr = flcn->flcn_base; 100 101 return (gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()) & 102 falcon_falcon_cpuctl_halt_intr_m() ? 103 true : false); 104} 105 106static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn) 107{ 108 struct gk20a *g = flcn->g; 109 u32 base_addr = flcn->flcn_base; 110 u32 unit_status = 0; 111 bool status = false; 112 113 unit_status = gk20a_readl(g, 114 base_addr + falcon_falcon_idlestate_r()); 115 116 if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 && 117 falcon_falcon_idlestate_ext_busy_v(unit_status) == 0) { 118 status = true; 119 } else { 120 status = false; 121 } 122 123 return status; 124} 125 126static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn) 127{ 128 struct gk20a *g = flcn->g; 129 u32 base_addr = flcn->flcn_base; 130 u32 unit_status = 0; 131 bool status = false; 132 133 unit_status = gk20a_readl(g, 134 base_addr + falcon_falcon_dmactl_r()); 135 136 if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() | 137 falcon_falcon_dmactl_imem_scrubbing_m())) { 138 status = false; 139 } else { 140 status = true; 141 } 142 143 return status; 144} 145 146static u32 gk20a_falcon_get_mem_size(struct nvgpu_falcon *flcn, 147 enum flcn_mem_type mem_type) 148{ 149 struct gk20a *g = flcn->g; 150 u32 mem_size = 0; 151 u32 hw_cfg_reg = gk20a_readl(g, 152 flcn->flcn_base + falcon_falcon_hwcfg_r()); 153 154 if (mem_type == MEM_DMEM) { 155 mem_size = falcon_falcon_hwcfg_dmem_size_v(hw_cfg_reg) 156 << GK20A_PMU_DMEM_BLKSIZE2; 157 } else { 158 mem_size = falcon_falcon_hwcfg_imem_size_v(hw_cfg_reg) 159 << GK20A_PMU_DMEM_BLKSIZE2; 160 } 161 162 return mem_size; 163} 164 165static int flcn_mem_overflow_check(struct nvgpu_falcon *flcn, 166 u32 offset, u32 size, enum flcn_mem_type mem_type) 167{ 168 struct gk20a *g = flcn->g; 169 u32 mem_size = 0; 170 171 if (size == 0) { 172 nvgpu_err(g, "size is zero"); 173 return -EINVAL; 174 } 175 176 if (offset & 0x3) { 177 nvgpu_err(g, "offset (0x%08x) not 4-byte aligned", offset); 178 return -EINVAL; 179 } 180 181 mem_size = gk20a_falcon_get_mem_size(flcn, mem_type); 182 if (!(offset <= mem_size && (offset + size) <= mem_size)) { 183 nvgpu_err(g, "flcn-id 0x%x, copy overflow ", 184 flcn->flcn_id); 185 nvgpu_err(g, "total size 0x%x, offset 0x%x, copy size 0x%x", 186 mem_size, offset, size); 187 return -EINVAL; 188 } 189 190 return 0; 191} 192 193static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn, 194 u32 src, u8 *dst, u32 size, u8 port) 195{ 196 struct gk20a *g = flcn->g; 197 u32 base_addr = flcn->flcn_base; 198 u32 i, words, bytes; 199 u32 data, addr_mask; 200 u32 *dst_u32 = (u32 *)dst; 201 202 nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size); 203 204 if (flcn_mem_overflow_check(flcn, src, size, MEM_DMEM)) { 205 nvgpu_err(g, "incorrect parameters"); 206 return -EINVAL; 207 } 208 209 nvgpu_mutex_acquire(&flcn->copy_lock); 210 211 words = size >> 2; 212 bytes = size & 0x3; 213 214 addr_mask = falcon_falcon_dmemc_offs_m() | 215 falcon_falcon_dmemc_blk_m(); 216 217 src &= addr_mask; 218 219 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port), 220 src | falcon_falcon_dmemc_aincr_f(1)); 221 222 for (i = 0; i < words; i++) { 223 dst_u32[i] = gk20a_readl(g, 224 base_addr + falcon_falcon_dmemd_r(port)); 225 } 226 227 if (bytes > 0) { 228 data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port)); 229 for (i = 0; i < bytes; i++) { 230 dst[(words << 2) + i] = ((u8 *)&data)[i]; 231 } 232 } 233 234 nvgpu_mutex_release(&flcn->copy_lock); 235 return 0; 236} 237 238static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn, 239 u32 dst, u8 *src, u32 size, u8 port) 240{ 241 struct gk20a *g = flcn->g; 242 u32 base_addr = flcn->flcn_base; 243 u32 i, words, bytes; 244 u32 data, addr_mask; 245 u32 *src_u32 = (u32 *)src; 246 247 nvgpu_log_fn(g, "dest dmem offset - %x, size - %x", dst, size); 248 249 if (flcn_mem_overflow_check(flcn, dst, size, MEM_DMEM)) { 250 nvgpu_err(g, "incorrect parameters"); 251 return -EINVAL; 252 } 253 254 nvgpu_mutex_acquire(&flcn->copy_lock); 255 256 words = size >> 2; 257 bytes = size & 0x3; 258 259 addr_mask = falcon_falcon_dmemc_offs_m() | 260 falcon_falcon_dmemc_blk_m(); 261 262 dst &= addr_mask; 263 264 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port), 265 dst | falcon_falcon_dmemc_aincw_f(1)); 266 267 for (i = 0; i < words; i++) { 268 gk20a_writel(g, 269 base_addr + falcon_falcon_dmemd_r(port), src_u32[i]); 270 } 271 272 if (bytes > 0) { 273 data = 0; 274 for (i = 0; i < bytes; i++) { 275 ((u8 *)&data)[i] = src[(words << 2) + i]; 276 } 277 gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data); 278 } 279 280 size = ALIGN(size, 4); 281 data = gk20a_readl(g, 282 base_addr + falcon_falcon_dmemc_r(port)) & addr_mask; 283 if (data != ((dst + size) & addr_mask)) { 284 nvgpu_warn(g, "copy failed. bytes written %d, expected %d", 285 data - dst, size); 286 } 287 288 nvgpu_mutex_release(&flcn->copy_lock); 289 290 return 0; 291} 292 293static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src, 294 u8 *dst, u32 size, u8 port) 295{ 296 struct gk20a *g = flcn->g; 297 u32 base_addr = flcn->flcn_base; 298 u32 *dst_u32 = (u32 *)dst; 299 u32 words = 0; 300 u32 bytes = 0; 301 u32 data = 0; 302 u32 blk = 0; 303 u32 i = 0; 304 305 nvgpu_log_info(g, "download %d bytes from 0x%x", size, src); 306 307 if (flcn_mem_overflow_check(flcn, src, size, MEM_IMEM)) { 308 nvgpu_err(g, "incorrect parameters"); 309 return -EINVAL; 310 } 311 312 nvgpu_mutex_acquire(&flcn->copy_lock); 313 314 words = size >> 2; 315 bytes = size & 0x3; 316 blk = src >> 8; 317 318 nvgpu_log_info(g, "download %d words from 0x%x block %d", 319 words, src, blk); 320 321 gk20a_writel(g, base_addr + falcon_falcon_imemc_r(port), 322 falcon_falcon_imemc_offs_f(src >> 2) | 323 falcon_falcon_imemc_blk_f(blk) | 324 falcon_falcon_dmemc_aincr_f(1)); 325 326 for (i = 0; i < words; i++) { 327 dst_u32[i] = gk20a_readl(g, 328 base_addr + falcon_falcon_imemd_r(port)); 329 } 330 331 if (bytes > 0) { 332 data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port)); 333 for (i = 0; i < bytes; i++) { 334 dst[(words << 2) + i] = ((u8 *)&data)[i]; 335 } 336 } 337 338 nvgpu_mutex_release(&flcn->copy_lock); 339 340 return 0; 341} 342 343static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst, 344 u8 *src, u32 size, u8 port, bool sec, u32 tag) 345{ 346 struct gk20a *g = flcn->g; 347 u32 base_addr = flcn->flcn_base; 348 u32 *src_u32 = (u32 *)src; 349 u32 words = 0; 350 u32 blk = 0; 351 u32 i = 0; 352 353 nvgpu_log_info(g, "upload %d bytes to 0x%x", size, dst); 354 355 if (flcn_mem_overflow_check(flcn, dst, size, MEM_IMEM)) { 356 nvgpu_err(g, "incorrect parameters"); 357 return -EINVAL; 358 } 359 360 nvgpu_mutex_acquire(&flcn->copy_lock); 361 362 words = size >> 2; 363 blk = dst >> 8; 364 365 nvgpu_log_info(g, "upload %d words to 0x%x block %d, tag 0x%x", 366 words, dst, blk, tag); 367 368 gk20a_writel(g, base_addr + falcon_falcon_imemc_r(port), 369 falcon_falcon_imemc_offs_f(dst >> 2) | 370 falcon_falcon_imemc_blk_f(blk) | 371 /* Set Auto-Increment on write */ 372 falcon_falcon_imemc_aincw_f(1) | 373 falcon_falcon_imemc_secure_f(sec ? 1U : 0U)); 374 375 for (i = 0; i < words; i++) { 376 if (i % 64 == 0) { 377 /* tag is always 256B aligned */ 378 gk20a_writel(g, base_addr + falcon_falcon_imemt_r(0), 379 tag); 380 tag++; 381 } 382 383 gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port), 384 src_u32[i]); 385 } 386 387 /* WARNING : setting remaining bytes in block to 0x0 */ 388 while (i % 64) { 389 gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port), 0); 390 i++; 391 } 392 393 nvgpu_mutex_release(&flcn->copy_lock); 394 395 return 0; 396} 397 398static int gk20a_falcon_bootstrap(struct nvgpu_falcon *flcn, 399 u32 boot_vector) 400{ 401 struct gk20a *g = flcn->g; 402 u32 base_addr = flcn->flcn_base; 403 404 nvgpu_log_info(g, "boot vec 0x%x", boot_vector); 405 406 gk20a_writel(g, base_addr + falcon_falcon_dmactl_r(), 407 falcon_falcon_dmactl_require_ctx_f(0)); 408 409 gk20a_writel(g, base_addr + falcon_falcon_bootvec_r(), 410 falcon_falcon_bootvec_vec_f(boot_vector)); 411 412 gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(), 413 falcon_falcon_cpuctl_startcpu_f(1)); 414 415 return 0; 416} 417 418static u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn, 419 u32 mailbox_index) 420{ 421 struct gk20a *g = flcn->g; 422 u32 data = 0; 423 424 if (mailbox_index < FALCON_MAILBOX_COUNT) { 425 data = gk20a_readl(g, flcn->flcn_base + (mailbox_index ? 426 falcon_falcon_mailbox1_r() : 427 falcon_falcon_mailbox0_r())); 428 } else { 429 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index); 430 } 431 432 return data; 433} 434 435static void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn, 436 u32 mailbox_index, u32 data) 437{ 438 struct gk20a *g = flcn->g; 439 440 if (mailbox_index < FALCON_MAILBOX_COUNT) { 441 gk20a_writel(g, flcn->flcn_base + (mailbox_index ? 442 falcon_falcon_mailbox1_r() : 443 falcon_falcon_mailbox0_r()), 444 data); 445 } else { 446 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index); 447 } 448} 449 450static int gk20a_falcon_bl_bootstrap(struct nvgpu_falcon *flcn, 451 struct nvgpu_falcon_bl_info *bl_info) 452{ 453 struct gk20a *g = flcn->g; 454 u32 base_addr = flcn->flcn_base; 455 u32 virt_addr = 0; 456 u32 dst = 0; 457 int err = 0; 458 459 /*copy bootloader interface structure to dmem*/ 460 err = gk20a_flcn_copy_to_dmem(flcn, 0, (u8 *)bl_info->bl_desc, 461 bl_info->bl_desc_size, (u8)0); 462 if (err != 0) { 463 goto exit; 464 } 465 466 /* copy bootloader to TOP of IMEM */ 467 dst = (falcon_falcon_hwcfg_imem_size_v(gk20a_readl(g, 468 base_addr + falcon_falcon_hwcfg_r())) << 8) - bl_info->bl_size; 469 470 err = gk20a_flcn_copy_to_imem(flcn, dst, (u8 *)(bl_info->bl_src), 471 bl_info->bl_size, (u8)0, false, bl_info->bl_start_tag); 472 if (err != 0) { 473 goto exit; 474 } 475 476 gk20a_falcon_mailbox_write(flcn, FALCON_MAILBOX_0, 0xDEADA5A5U); 477 478 virt_addr = bl_info->bl_start_tag << 8; 479 480 err = gk20a_falcon_bootstrap(flcn, virt_addr); 481 482exit: 483 if (err != 0) { 484 nvgpu_err(g, "falcon id-0x%x bootstrap failed", flcn->flcn_id); 485 } 486 487 return err; 488} 489 490static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn) 491{ 492 struct gk20a *g = flcn->g; 493 u32 base_addr = flcn->flcn_base; 494 u32 i = 0, j = 0; 495 u32 data[8] = {0}; 496 u32 block_count = 0; 497 498 block_count = falcon_falcon_hwcfg_imem_size_v(gk20a_readl(g, 499 flcn->flcn_base + falcon_falcon_hwcfg_r())); 500 501 /* block_count must be multiple of 8 */ 502 block_count &= ~0x7; 503 nvgpu_err(g, "FALCON IMEM BLK MAPPING (PA->VA) (%d TOTAL):", 504 block_count); 505 506 for (i = 0; i < block_count; i += 8) { 507 for (j = 0; j < 8; j++) { 508 gk20a_writel(g, flcn->flcn_base + 509 falcon_falcon_imctl_debug_r(), 510 falcon_falcon_imctl_debug_cmd_f(0x2) | 511 falcon_falcon_imctl_debug_addr_blk_f(i + j)); 512 513 data[j] = gk20a_readl(g, base_addr + 514 falcon_falcon_imstat_r()); 515 } 516 517 nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x", 518 i, data[0], data[1], data[2], data[3]); 519 nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x", 520 i + 4, data[4], data[5], data[6], data[7]); 521 } 522} 523 524static void gk20a_falcon_dump_pc_trace(struct nvgpu_falcon *flcn) 525{ 526 struct gk20a *g = flcn->g; 527 u32 base_addr = flcn->flcn_base; 528 u32 trace_pc_count = 0; 529 u32 pc = 0; 530 u32 i = 0; 531 532 if (gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02) { 533 nvgpu_err(g, " falcon is in HS mode, PC TRACE dump not supported"); 534 return; 535 } 536 537 trace_pc_count = falcon_falcon_traceidx_maxidx_v(gk20a_readl(g, 538 base_addr + falcon_falcon_traceidx_r())); 539 nvgpu_err(g, 540 "PC TRACE (TOTAL %d ENTRIES. entry 0 is the most recent branch):", 541 trace_pc_count); 542 543 for (i = 0; i < trace_pc_count; i++) { 544 gk20a_writel(g, base_addr + falcon_falcon_traceidx_r(), 545 falcon_falcon_traceidx_idx_f(i)); 546 547 pc = falcon_falcon_tracepc_pc_v(gk20a_readl(g, 548 base_addr + falcon_falcon_tracepc_r())); 549 nvgpu_err(g, "FALCON_TRACEPC(%d) : %#010x", i, pc); 550 } 551} 552 553void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn) 554{ 555 struct gk20a *g = flcn->g; 556 u32 base_addr = flcn->flcn_base; 557 unsigned int i; 558 559 nvgpu_err(g, "<<< FALCON id-%d DEBUG INFORMATION - START >>>", 560 flcn->flcn_id); 561 562 /* imblk dump */ 563 gk20a_falcon_dump_imblk(flcn); 564 /* PC trace dump */ 565 gk20a_falcon_dump_pc_trace(flcn); 566 567 nvgpu_err(g, "FALCON ICD REGISTERS DUMP"); 568 569 for (i = 0; i < 4; i++) { 570 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 571 falcon_falcon_icd_cmd_opc_rreg_f() | 572 falcon_falcon_icd_cmd_idx_f(FALCON_REG_PC)); 573 nvgpu_err(g, "FALCON_REG_PC : 0x%x", 574 gk20a_readl(g, base_addr + 575 falcon_falcon_icd_rdata_r())); 576 577 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 578 falcon_falcon_icd_cmd_opc_rreg_f() | 579 falcon_falcon_icd_cmd_idx_f(FALCON_REG_SP)); 580 nvgpu_err(g, "FALCON_REG_SP : 0x%x", 581 gk20a_readl(g, base_addr + 582 falcon_falcon_icd_rdata_r())); 583 } 584 585 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 586 falcon_falcon_icd_cmd_opc_rreg_f() | 587 falcon_falcon_icd_cmd_idx_f(FALCON_REG_IMB)); 588 nvgpu_err(g, "FALCON_REG_IMB : 0x%x", 589 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r())); 590 591 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 592 falcon_falcon_icd_cmd_opc_rreg_f() | 593 falcon_falcon_icd_cmd_idx_f(FALCON_REG_DMB)); 594 nvgpu_err(g, "FALCON_REG_DMB : 0x%x", 595 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r())); 596 597 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 598 falcon_falcon_icd_cmd_opc_rreg_f() | 599 falcon_falcon_icd_cmd_idx_f(FALCON_REG_CSW)); 600 nvgpu_err(g, "FALCON_REG_CSW : 0x%x", 601 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r())); 602 603 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 604 falcon_falcon_icd_cmd_opc_rreg_f() | 605 falcon_falcon_icd_cmd_idx_f(FALCON_REG_CTX)); 606 nvgpu_err(g, "FALCON_REG_CTX : 0x%x", 607 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r())); 608 609 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 610 falcon_falcon_icd_cmd_opc_rreg_f() | 611 falcon_falcon_icd_cmd_idx_f(FALCON_REG_EXCI)); 612 nvgpu_err(g, "FALCON_REG_EXCI : 0x%x", 613 gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r())); 614 615 for (i = 0; i < 6; i++) { 616 gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), 617 falcon_falcon_icd_cmd_opc_rreg_f() | 618 falcon_falcon_icd_cmd_idx_f( 619 falcon_falcon_icd_cmd_opc_rstat_f())); 620 nvgpu_err(g, "FALCON_REG_RSTAT[%d] : 0x%x", i, 621 gk20a_readl(g, base_addr + 622 falcon_falcon_icd_rdata_r())); 623 } 624 625 nvgpu_err(g, " FALCON REGISTERS DUMP"); 626 nvgpu_err(g, "falcon_falcon_os_r : %d", 627 gk20a_readl(g, base_addr + falcon_falcon_os_r())); 628 nvgpu_err(g, "falcon_falcon_cpuctl_r : 0x%x", 629 gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r())); 630 nvgpu_err(g, "falcon_falcon_idlestate_r : 0x%x", 631 gk20a_readl(g, base_addr + falcon_falcon_idlestate_r())); 632 nvgpu_err(g, "falcon_falcon_mailbox0_r : 0x%x", 633 gk20a_readl(g, base_addr + falcon_falcon_mailbox0_r())); 634 nvgpu_err(g, "falcon_falcon_mailbox1_r : 0x%x", 635 gk20a_readl(g, base_addr + falcon_falcon_mailbox1_r())); 636 nvgpu_err(g, "falcon_falcon_irqstat_r : 0x%x", 637 gk20a_readl(g, base_addr + falcon_falcon_irqstat_r())); 638 nvgpu_err(g, "falcon_falcon_irqmode_r : 0x%x", 639 gk20a_readl(g, base_addr + falcon_falcon_irqmode_r())); 640 nvgpu_err(g, "falcon_falcon_irqmask_r : 0x%x", 641 gk20a_readl(g, base_addr + falcon_falcon_irqmask_r())); 642 nvgpu_err(g, "falcon_falcon_irqdest_r : 0x%x", 643 gk20a_readl(g, base_addr + falcon_falcon_irqdest_r())); 644 nvgpu_err(g, "falcon_falcon_debug1_r : 0x%x", 645 gk20a_readl(g, base_addr + falcon_falcon_debug1_r())); 646 nvgpu_err(g, "falcon_falcon_debuginfo_r : 0x%x", 647 gk20a_readl(g, base_addr + falcon_falcon_debuginfo_r())); 648 nvgpu_err(g, "falcon_falcon_bootvec_r : 0x%x", 649 gk20a_readl(g, base_addr + falcon_falcon_bootvec_r())); 650 nvgpu_err(g, "falcon_falcon_hwcfg_r : 0x%x", 651 gk20a_readl(g, base_addr + falcon_falcon_hwcfg_r())); 652 nvgpu_err(g, "falcon_falcon_engctl_r : 0x%x", 653 gk20a_readl(g, base_addr + falcon_falcon_engctl_r())); 654 nvgpu_err(g, "falcon_falcon_curctx_r : 0x%x", 655 gk20a_readl(g, base_addr + falcon_falcon_curctx_r())); 656 nvgpu_err(g, "falcon_falcon_nxtctx_r : 0x%x", 657 gk20a_readl(g, base_addr + falcon_falcon_nxtctx_r())); 658 nvgpu_err(g, "falcon_falcon_exterrstat_r : 0x%x", 659 gk20a_readl(g, base_addr + falcon_falcon_exterrstat_r())); 660 nvgpu_err(g, "falcon_falcon_exterraddr_r : 0x%x", 661 gk20a_readl(g, base_addr + falcon_falcon_exterraddr_r())); 662} 663 664static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) 665{ 666 struct gk20a *g = flcn->g; 667 struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops = 668 &flcn->flcn_engine_dep_ops; 669 670 switch (flcn->flcn_id) { 671 case FALCON_ID_PMU: 672 flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset; 673 flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head; 674 flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail; 675 break; 676 default: 677 /* NULL assignment make sure 678 * CPU hard reset in gk20a_flcn_reset() gets execute 679 * if falcon doesn't need specific reset implementation 680 */ 681 flcn_eng_dep_ops->reset_eng = NULL; 682 break; 683 } 684} 685 686void gk20a_falcon_ops(struct nvgpu_falcon *flcn) 687{ 688 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 689 690 flcn_ops->reset = gk20a_flcn_reset; 691 flcn_ops->set_irq = gk20a_flcn_set_irq; 692 flcn_ops->clear_halt_interrupt_status = 693 gk20a_flcn_clear_halt_interrupt_status; 694 flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted; 695 flcn_ops->is_falcon_idle = gk20a_is_falcon_idle; 696 flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done; 697 flcn_ops->copy_from_dmem = gk20a_flcn_copy_from_dmem; 698 flcn_ops->copy_to_dmem = gk20a_flcn_copy_to_dmem; 699 flcn_ops->copy_to_imem = gk20a_flcn_copy_to_imem; 700 flcn_ops->copy_from_imem = gk20a_flcn_copy_from_imem; 701 flcn_ops->bootstrap = gk20a_falcon_bootstrap; 702 flcn_ops->dump_falcon_stats = gk20a_falcon_dump_stats; 703 flcn_ops->mailbox_read = gk20a_falcon_mailbox_read; 704 flcn_ops->mailbox_write = gk20a_falcon_mailbox_write; 705 flcn_ops->bl_bootstrap = gk20a_falcon_bl_bootstrap; 706 707 gk20a_falcon_engine_dependency_ops(flcn); 708} 709 710int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn) 711{ 712 struct gk20a *g = flcn->g; 713 int err = 0; 714 715 switch (flcn->flcn_id) { 716 case FALCON_ID_PMU: 717 flcn->flcn_base = FALCON_PWR_BASE; 718 flcn->is_falcon_supported = true; 719 flcn->is_interrupt_enabled = true; 720 break; 721 case FALCON_ID_SEC2: 722 flcn->flcn_base = FALCON_SEC_BASE; 723 flcn->is_falcon_supported = false; 724 flcn->is_interrupt_enabled = false; 725 break; 726 case FALCON_ID_FECS: 727 flcn->flcn_base = FALCON_FECS_BASE; 728 flcn->is_falcon_supported = true; 729 flcn->is_interrupt_enabled = false; 730 break; 731 case FALCON_ID_GPCCS: 732 flcn->flcn_base = FALCON_GPCCS_BASE; 733 flcn->is_falcon_supported = true; 734 flcn->is_interrupt_enabled = false; 735 break; 736 case FALCON_ID_NVDEC: 737 flcn->flcn_base = FALCON_NVDEC_BASE; 738 flcn->is_falcon_supported = false; 739 flcn->is_interrupt_enabled = false; 740 break; 741 default: 742 flcn->is_falcon_supported = false; 743 break; 744 } 745 746 if (flcn->is_falcon_supported) { 747 err = nvgpu_mutex_init(&flcn->copy_lock); 748 if (err != 0) { 749 nvgpu_err(g, "Error in flcn.copy_lock mutex initialization"); 750 } else { 751 gk20a_falcon_ops(flcn); 752 } 753 } else { 754 nvgpu_log_info(g, "falcon 0x%x not supported on %s", 755 flcn->flcn_id, g->name); 756 } 757 758 return err; 759}
diff --git a/include/gk20a/flcn_gk20a.h b/include/gk20a/flcn_gk20a.h
deleted file mode 100644
index 9d27b38..0000000
--- a/include/gk20a/flcn_gk20a.h
+++ /dev/null
@@ -1,29 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_GK20A_FLCN_GK20A_H 23#define NVGPU_GK20A_FLCN_GK20A_H 24 25void gk20a_falcon_ops(struct nvgpu_falcon *flcn); 26int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn); 27void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn); 28 29#endif /* NVGPU_GK20A_FLCN_GK20A_H */
diff --git a/include/gk20a/gk20a.c b/include/gk20a/gk20a.c
deleted file mode 100644
index 1a11716..0000000
--- a/include/gk20a/gk20a.c
+++ /dev/null
@@ -1,595 +0,0 @@ 1/* 2 * GK20A Graphics 3 * 4 * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/nvgpu_common.h> 26#include <nvgpu/kmem.h> 27#include <nvgpu/allocator.h> 28#include <nvgpu/timers.h> 29#include <nvgpu/soc.h> 30#include <nvgpu/enabled.h> 31#include <nvgpu/pmu.h> 32#include <nvgpu/gmmu.h> 33#include <nvgpu/ltc.h> 34#include <nvgpu/vidmem.h> 35#include <nvgpu/mm.h> 36#include <nvgpu/ctxsw_trace.h> 37#include <nvgpu/soc.h> 38#include <nvgpu/clk_arb.h> 39#include <nvgpu/therm.h> 40#include <nvgpu/mc.h> 41#include <nvgpu/channel_sync.h> 42#include <nvgpu/nvgpu_err.h> 43 44#include <trace/events/gk20a.h> 45 46#include "gk20a.h" 47 48#include "dbg_gpu_gk20a.h" 49#include "pstate/pstate.h" 50 51void __nvgpu_check_gpu_state(struct gk20a *g) 52{ 53 u32 boot_0 = 0xffffffff; 54 55 boot_0 = nvgpu_mc_boot_0(g, NULL, NULL, NULL); 56 if (boot_0 == 0xffffffff) { 57 nvgpu_err(g, "GPU has disappeared from bus!!"); 58 nvgpu_err(g, "Rebooting system!!"); 59 nvgpu_kernel_restart(NULL); 60 } 61} 62 63void __gk20a_warn_on_no_regs(void) 64{ 65 WARN_ONCE(1, "Attempted access to GPU regs after unmapping!"); 66} 67 68static void gk20a_mask_interrupts(struct gk20a *g) 69{ 70 if (g->ops.mc.intr_mask != NULL) { 71 g->ops.mc.intr_mask(g); 72 } 73 74 if (g->ops.mc.log_pending_intrs != NULL) { 75 g->ops.mc.log_pending_intrs(g); 76 } 77} 78 79int gk20a_prepare_poweroff(struct gk20a *g) 80{ 81 int ret = 0; 82 83 nvgpu_log_fn(g, " "); 84 85 if (g->ops.fifo.channel_suspend) { 86 ret = g->ops.fifo.channel_suspend(g); 87 if (ret) { 88 return ret; 89 } 90 } 91 92 /* disable elpg before gr or fifo suspend */ 93 if (g->ops.pmu.is_pmu_supported(g)) { 94 ret |= nvgpu_pmu_destroy(g); 95 } 96 97 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { 98 ret |= nvgpu_sec2_destroy(g); 99 } 100 101 ret |= gk20a_gr_suspend(g); 102 ret |= nvgpu_mm_suspend(g); 103 ret |= gk20a_fifo_suspend(g); 104 105 gk20a_ce_suspend(g); 106 107 /* Disable GPCPLL */ 108 if (g->ops.clk.suspend_clk_support) { 109 ret |= g->ops.clk.suspend_clk_support(g); 110 } 111 112 if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { 113 gk20a_deinit_pstate_support(g); 114 } 115 116 gk20a_mask_interrupts(g); 117 118 g->power_on = false; 119 120 return ret; 121} 122 123int gk20a_finalize_poweron(struct gk20a *g) 124{ 125 int err = 0; 126#if defined(CONFIG_TEGRA_GK20A_NVHOST) 127 u32 nr_pages; 128#endif 129 130 u32 fuse_status; 131 132 nvgpu_log_fn(g, " "); 133 134 if (g->power_on) { 135 return 0; 136 } 137 138 g->power_on = true; 139 140 /* 141 * Before probing the GPU make sure the GPU's state is cleared. This is 142 * relevant for rebind operations. 143 */ 144 if (g->ops.xve.reset_gpu && !g->gpu_reset_done) { 145 g->ops.xve.reset_gpu(g); 146 g->gpu_reset_done = true; 147 } 148 149 if (g->ops.clock_gating.slcg_acb_load_gating_prod != NULL) { 150 g->ops.clock_gating.slcg_acb_load_gating_prod(g, true); 151 } 152 153 /* 154 * Do this early so any early VMs that get made are capable of mapping 155 * buffers. 156 */ 157 err = nvgpu_pd_cache_init(g); 158 if (err) { 159 return err; 160 } 161 162 /* init interface layer support for PMU falcon */ 163 err = nvgpu_flcn_sw_init(g, FALCON_ID_PMU); 164 if (err != 0) { 165 nvgpu_err(g, "failed to sw init FALCON_ID_PMU"); 166 goto done; 167 } 168 err = nvgpu_flcn_sw_init(g, FALCON_ID_SEC2); 169 if (err != 0) { 170 nvgpu_err(g, "failed to sw init FALCON_ID_SEC2"); 171 goto done; 172 } 173 err = nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC); 174 if (err != 0) { 175 nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC"); 176 goto done; 177 } 178 err = nvgpu_flcn_sw_init(g, FALCON_ID_GSPLITE); 179 if (err != 0) { 180 nvgpu_err(g, "failed to sw init FALCON_ID_GSPLITE"); 181 goto done; 182 } 183 184 if (g->ops.acr.acr_sw_init != NULL && 185 nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { 186 g->ops.acr.acr_sw_init(g, &g->acr); 187 } 188 189 if (g->ops.bios.init) { 190 err = g->ops.bios.init(g); 191 } 192 if (err) { 193 goto done; 194 } 195 196 g->ops.bus.init_hw(g); 197 198 if (g->ops.clk.disable_slowboot) { 199 g->ops.clk.disable_slowboot(g); 200 } 201 202 g->ops.priv_ring.enable_priv_ring(g); 203 204 /* TBD: move this after graphics init in which blcg/slcg is enabled. 205 This function removes SlowdownOnBoot which applies 32x divider 206 on gpcpll bypass path. The purpose of slowdown is to save power 207 during boot but it also significantly slows down gk20a init on 208 simulation and emulation. We should remove SOB after graphics power 209 saving features (blcg/slcg) are enabled. For now, do it here. */ 210 if (g->ops.clk.init_clk_support) { 211 err = g->ops.clk.init_clk_support(g); 212 if (err) { 213 nvgpu_err(g, "failed to init gk20a clk"); 214 goto done; 215 } 216 } 217 218 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_NVLINK)) { 219 err = g->ops.nvlink.init(g); 220 if (err) { 221 nvgpu_err(g, "failed to init nvlink"); 222 goto done; 223 } 224 } 225 226 if (g->ops.fb.init_fbpa) { 227 err = g->ops.fb.init_fbpa(g); 228 if (err) { 229 nvgpu_err(g, "failed to init fbpa"); 230 goto done; 231 } 232 } 233 234 if (g->ops.fb.mem_unlock) { 235 err = g->ops.fb.mem_unlock(g); 236 if (err) { 237 nvgpu_err(g, "failed to unlock memory"); 238 goto done; 239 } 240 } 241 242 err = g->ops.fifo.reset_enable_hw(g); 243 244 if (err) { 245 nvgpu_err(g, "failed to reset gk20a fifo"); 246 goto done; 247 } 248 249 err = nvgpu_init_ltc_support(g); 250 if (err) { 251 nvgpu_err(g, "failed to init ltc"); 252 goto done; 253 } 254 255 err = nvgpu_init_mm_support(g); 256 if (err) { 257 nvgpu_err(g, "failed to init gk20a mm"); 258 goto done; 259 } 260 261 err = gk20a_init_fifo_support(g); 262 if (err) { 263 nvgpu_err(g, "failed to init gk20a fifo"); 264 goto done; 265 } 266 267 if (g->ops.therm.elcg_init_idle_filters) { 268 g->ops.therm.elcg_init_idle_filters(g); 269 } 270 271 g->ops.mc.intr_enable(g); 272 273 /* 274 * Power gate the chip as per the TPC PG mask 275 * and the fuse_status register. 276 * If TPC PG mask is invalid halt the GPU poweron. 277 */ 278 g->can_tpc_powergate = false; 279 fuse_status = g->ops.fuse.fuse_status_opt_tpc_gpc(g, 0); 280 281 if (g->ops.tpc.tpc_powergate) { 282 err = g->ops.tpc.tpc_powergate(g, fuse_status); 283 } 284 285 if (err) { 286 nvgpu_err(g, "failed to power ON GPU"); 287 goto done; 288 } 289 290 nvgpu_mutex_acquire(&g->tpc_pg_lock); 291 292 if (g->can_tpc_powergate) { 293 if (g->ops.gr.powergate_tpc != NULL) 294 g->ops.gr.powergate_tpc(g); 295 } 296 297 err = gk20a_enable_gr_hw(g); 298 if (err) { 299 nvgpu_err(g, "failed to enable gr"); 300 nvgpu_mutex_release(&g->tpc_pg_lock); 301 goto done; 302 } 303 304 if (g->ops.pmu.is_pmu_supported(g)) { 305 if (g->ops.pmu.prepare_ucode) { 306 err = g->ops.pmu.prepare_ucode(g); 307 } 308 if (err) { 309 nvgpu_err(g, "failed to init pmu ucode"); 310 nvgpu_mutex_release(&g->tpc_pg_lock); 311 goto done; 312 } 313 } 314 315 if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { 316 err = gk20a_init_pstate_support(g); 317 if (err) { 318 nvgpu_err(g, "failed to init pstates"); 319 nvgpu_mutex_release(&g->tpc_pg_lock); 320 goto done; 321 } 322 } 323 324 if (g->acr.bootstrap_hs_acr != NULL && 325 nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { 326 err = g->acr.bootstrap_hs_acr(g, &g->acr, &g->acr.acr); 327 if (err != 0) { 328 nvgpu_err(g, "ACR bootstrap failed"); 329 nvgpu_mutex_release(&g->tpc_pg_lock); 330 goto done; 331 } 332 } 333 334 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { 335 err = nvgpu_init_sec2_support(g); 336 if (err != 0) { 337 nvgpu_err(g, "failed to init sec2"); 338 nvgpu_mutex_release(&g->tpc_pg_lock); 339 goto done; 340 } 341 } 342 343 if (g->ops.pmu.is_pmu_supported(g)) { 344 err = nvgpu_init_pmu_support(g); 345 if (err) { 346 nvgpu_err(g, "failed to init gk20a pmu"); 347 nvgpu_mutex_release(&g->tpc_pg_lock); 348 goto done; 349 } 350 } 351 352 err = gk20a_init_gr_support(g); 353 if (err) { 354 nvgpu_err(g, "failed to init gk20a gr"); 355 nvgpu_mutex_release(&g->tpc_pg_lock); 356 goto done; 357 } 358 359 nvgpu_mutex_release(&g->tpc_pg_lock); 360 361 if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { 362 err = gk20a_init_pstate_pmu_support(g); 363 if (err) { 364 nvgpu_err(g, "failed to init pstates"); 365 goto done; 366 } 367 } 368 369 if (g->ops.pmu_ver.clk.clk_set_boot_clk && nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { 370 g->ops.pmu_ver.clk.clk_set_boot_clk(g); 371 } else { 372 err = nvgpu_clk_arb_init_arbiter(g); 373 if (err) { 374 nvgpu_err(g, "failed to init clk arb"); 375 goto done; 376 } 377 } 378 379 err = nvgpu_init_therm_support(g); 380 if (err) { 381 nvgpu_err(g, "failed to init gk20a therm"); 382 goto done; 383 } 384 385 err = g->ops.chip_init_gpu_characteristics(g); 386 if (err) { 387 nvgpu_err(g, "failed to init gk20a gpu characteristics"); 388 goto done; 389 } 390 391#ifdef CONFIG_GK20A_CTXSW_TRACE 392 err = gk20a_ctxsw_trace_init(g); 393 if (err) 394 nvgpu_warn(g, "could not initialize ctxsw tracing"); 395#endif 396 397 /* Restore the debug setting */ 398 g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl); 399 400 gk20a_init_ce_support(g); 401 402 if (g->ops.xve.available_speeds) { 403 u32 speed; 404 405 if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_ASPM) && g->ops.xve.disable_aspm) { 406 g->ops.xve.disable_aspm(g); 407 } 408 409 g->ops.xve.available_speeds(g, &speed); 410 411 /* Set to max speed */ 412 speed = 1 << (fls(speed) - 1); 413 err = g->ops.xve.set_speed(g, speed); 414 if (err) { 415 nvgpu_err(g, "Failed to set PCIe bus speed!"); 416 goto done; 417 } 418 } 419 420#if defined(CONFIG_TEGRA_GK20A_NVHOST) 421 if (nvgpu_has_syncpoints(g) && g->syncpt_unit_size) { 422 if (!nvgpu_mem_is_valid(&g->syncpt_mem)) { 423 nr_pages = DIV_ROUND_UP(g->syncpt_unit_size, PAGE_SIZE); 424 __nvgpu_mem_create_from_phys(g, &g->syncpt_mem, 425 g->syncpt_unit_base, nr_pages); 426 } 427 } 428#endif 429 430 if (g->ops.fifo.channel_resume) { 431 g->ops.fifo.channel_resume(g); 432 } 433 434done: 435 if (err) { 436 g->power_on = false; 437 } 438 439 return err; 440} 441 442int gk20a_wait_for_idle(struct gk20a *g) 443{ 444 int wait_length = 150; /* 3 second overall max wait. */ 445 int target_usage_count = 0; 446 447 if (!g) { 448 return -ENODEV; 449 } 450 451 while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count) 452 && (wait_length-- >= 0)) { 453 nvgpu_msleep(20); 454 } 455 456 if (wait_length < 0) { 457 nvgpu_warn(g, "Timed out waiting for idle (%d)!\n", 458 nvgpu_atomic_read(&g->usage_count)); 459 return -ETIMEDOUT; 460 } 461 462 return 0; 463} 464 465int gk20a_init_gpu_characteristics(struct gk20a *g) 466{ 467 __nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true); 468 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true); 469 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true); 470 471 if (IS_ENABLED(CONFIG_SYNC)) { 472 __nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true); 473 } 474 475 if (g->ops.mm.support_sparse && g->ops.mm.support_sparse(g)) { 476 __nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true); 477 } 478 479 /* 480 * Fast submits are supported as long as the user doesn't request 481 * anything that depends on job tracking. (Here, fast means strictly no 482 * metadata, just the gpfifo contents are copied and gp_put updated). 483 */ 484 __nvgpu_set_enabled(g, 485 NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_NO_JOBTRACKING, 486 true); 487 488 /* 489 * Sync framework requires deferred job cleanup, wrapping syncs in FDs, 490 * and other heavy stuff, which prevents deterministic submits. This is 491 * supported otherwise, provided that the user doesn't request anything 492 * that depends on deferred cleanup. 493 */ 494 if (!nvgpu_channel_sync_needs_os_fence_framework(g)) { 495 __nvgpu_set_enabled(g, 496 NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL, 497 true); 498 } 499 500 __nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true); 501 502 __nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true); 503 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG, true); 504 505 if (g->ops.clk_arb.get_arbiter_clk_domains != NULL && 506 g->ops.clk.support_clk_freq_controller) { 507 __nvgpu_set_enabled(g, NVGPU_SUPPORT_CLOCK_CONTROLS, true); 508 } 509 510 g->ops.gr.detect_sm_arch(g); 511 512 if (g->ops.gr.init_cyclestats) { 513 g->ops.gr.init_cyclestats(g); 514 } 515 516 g->ops.gr.get_rop_l2_en_mask(g); 517 518 return 0; 519} 520 521/* 522 * Free the gk20a struct. 523 */ 524static void gk20a_free_cb(struct nvgpu_ref *refcount) 525{ 526 struct gk20a *g = container_of(refcount, 527 struct gk20a, refcount); 528 529#ifdef CONFIG_NVGPU_SUPPORT_LINUX_ECC_ERROR_REPORTING 530 nvgpu_deinit_ecc_reporting(g); 531#endif 532 533 nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!"); 534 535 gk20a_ce_destroy(g); 536 537 if (g->remove_support) { 538 g->remove_support(g); 539 } 540 541 if (g->free) { 542 g->free(g); 543 } 544} 545 546/** 547 * gk20a_get() - Increment ref count on driver 548 * 549 * @g The driver to increment 550 * This will fail if the driver is in the process of being released. In that 551 * case it will return NULL. Otherwise a pointer to the driver passed in will 552 * be returned. 553 */ 554struct gk20a * __must_check gk20a_get(struct gk20a *g) 555{ 556 int success; 557 558 /* 559 * Handle the possibility we are still freeing the gk20a struct while 560 * gk20a_get() is called. Unlikely but plausible race condition. Ideally 561 * the code will never be in such a situation that this race is 562 * possible. 563 */ 564 success = nvgpu_ref_get_unless_zero(&g->refcount); 565 566 nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s", 567 nvgpu_atomic_read(&g->refcount.refcount), 568 success ? "" : "(FAILED)"); 569 570 return success ? g : NULL; 571} 572 573/** 574 * gk20a_put() - Decrement ref count on driver 575 * 576 * @g - The driver to decrement 577 * 578 * Decrement the driver ref-count. If neccesary also free the underlying driver 579 * memory 580 */ 581void gk20a_put(struct gk20a *g) 582{ 583 /* 584 * Note - this is racy, two instances of this could run before the 585 * actual kref_put(0 runs, you could see something like: 586 * 587 * ... PUT: refs currently 2 588 * ... PUT: refs currently 2 589 * ... Freeing GK20A struct! 590 */ 591 nvgpu_log(g, gpu_dbg_shutdown, "PUT: refs currently %d", 592 nvgpu_atomic_read(&g->refcount.refcount)); 593 594 nvgpu_ref_put(&g->refcount, gk20a_free_cb); 595}
diff --git a/include/gk20a/gk20a.h b/include/gk20a/gk20a.h
deleted file mode 100644
index 16a2453..0000000
--- a/include/gk20a/gk20a.h
+++ /dev/null
@@ -1,33 +0,0 @@ 1/* 2 * This file is used as a temporary redirection header for <nvgpu/gk20a.h> 3 * 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * GK20A Graphics 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27#ifndef GK20A_GK20A_H 28#define GK20A_GK20A_H 29 30/* no new headers should be added here */ 31#include <nvgpu/gk20a.h> 32 33#endif
diff --git a/include/gk20a/gr_ctx_gk20a.c b/include/gk20a/gr_ctx_gk20a.c
deleted file mode 100644
index 8b9ac32..0000000
--- a/include/gk20a/gr_ctx_gk20a.c
+++ /dev/null
@@ -1,486 +0,0 @@ 1/* 2 * GK20A Graphics Context 3 * 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/nvgpu_common.h> 26#include <nvgpu/kmem.h> 27#include <nvgpu/log.h> 28#include <nvgpu/firmware.h> 29#include <nvgpu/enabled.h> 30#include <nvgpu/io.h> 31 32#include "gk20a.h" 33#include "gr_ctx_gk20a.h" 34 35#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 36 37static int gr_gk20a_alloc_load_netlist_u32(struct gk20a *g, u32 *src, u32 len, 38 struct u32_list_gk20a *u32_list) 39{ 40 u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32); 41 if (!alloc_u32_list_gk20a(g, u32_list)) { 42 return -ENOMEM; 43 } 44 45 memcpy(u32_list->l, src, len); 46 47 return 0; 48} 49 50static int gr_gk20a_alloc_load_netlist_av(struct gk20a *g, u32 *src, u32 len, 51 struct av_list_gk20a *av_list) 52{ 53 av_list->count = len / sizeof(struct av_gk20a); 54 if (!alloc_av_list_gk20a(g, av_list)) { 55 return -ENOMEM; 56 } 57 58 memcpy(av_list->l, src, len); 59 60 return 0; 61} 62 63static int gr_gk20a_alloc_load_netlist_av64(struct gk20a *g, u32 *src, u32 len, 64 struct av64_list_gk20a *av64_list) 65{ 66 av64_list->count = len / sizeof(struct av64_gk20a); 67 if (!alloc_av64_list_gk20a(g, av64_list)) { 68 return -ENOMEM; 69 } 70 71 memcpy(av64_list->l, src, len); 72 73 return 0; 74} 75 76static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len, 77 struct aiv_list_gk20a *aiv_list) 78{ 79 aiv_list->count = len / sizeof(struct aiv_gk20a); 80 if (!alloc_aiv_list_gk20a(g, aiv_list)) { 81 return -ENOMEM; 82 } 83 84 memcpy(aiv_list->l, src, len); 85 86 return 0; 87} 88 89static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) 90{ 91 struct nvgpu_firmware *netlist_fw; 92 struct netlist_image *netlist = NULL; 93 char name[MAX_NETLIST_NAME]; 94 u32 i, major_v = ~0, major_v_hw, netlist_num; 95 int net, max, err = -ENOENT; 96 97 nvgpu_log_fn(g, " "); 98 99 if (g->ops.gr_ctx.is_fw_defined()) { 100 net = NETLIST_FINAL; 101 max = 0; 102 major_v_hw = ~0; 103 g->gr.ctx_vars.dynamic = false; 104 } else { 105 net = NETLIST_SLOT_A; 106 max = MAX_NETLIST; 107 major_v_hw = gk20a_readl(g, 108 gr_fecs_ctx_state_store_major_rev_id_r()); 109 g->gr.ctx_vars.dynamic = true; 110 } 111 112 for (; net < max; net++) { 113 if (g->ops.gr_ctx.get_netlist_name(g, net, name) != 0) { 114 nvgpu_warn(g, "invalid netlist index %d", net); 115 continue; 116 } 117 118 netlist_fw = nvgpu_request_firmware(g, name, 0); 119 if (!netlist_fw) { 120 nvgpu_warn(g, "failed to load netlist %s", name); 121 continue; 122 } 123 124 netlist = (struct netlist_image *)netlist_fw->data; 125 126 for (i = 0; i < netlist->header.regions; i++) { 127 u32 *src = (u32 *)((u8 *)netlist + netlist->regions[i].data_offset); 128 u32 size = netlist->regions[i].data_size; 129 130 switch (netlist->regions[i].region_id) { 131 case NETLIST_REGIONID_FECS_UCODE_DATA: 132 nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA"); 133 err = gr_gk20a_alloc_load_netlist_u32(g, 134 src, size, &g->gr.ctx_vars.ucode.fecs.data); 135 if (err) { 136 goto clean_up; 137 } 138 break; 139 case NETLIST_REGIONID_FECS_UCODE_INST: 140 nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST"); 141 err = gr_gk20a_alloc_load_netlist_u32(g, 142 src, size, &g->gr.ctx_vars.ucode.fecs.inst); 143 if (err) { 144 goto clean_up; 145 } 146 break; 147 case NETLIST_REGIONID_GPCCS_UCODE_DATA: 148 nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA"); 149 err = gr_gk20a_alloc_load_netlist_u32(g, 150 src, size, &g->gr.ctx_vars.ucode.gpccs.data); 151 if (err) { 152 goto clean_up; 153 } 154 break; 155 case NETLIST_REGIONID_GPCCS_UCODE_INST: 156 nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST"); 157 err = gr_gk20a_alloc_load_netlist_u32(g, 158 src, size, &g->gr.ctx_vars.ucode.gpccs.inst); 159 if (err) { 160 goto clean_up; 161 } 162 break; 163 case NETLIST_REGIONID_SW_BUNDLE_INIT: 164 nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT"); 165 err = gr_gk20a_alloc_load_netlist_av(g, 166 src, size, &g->gr.ctx_vars.sw_bundle_init); 167 if (err) { 168 goto clean_up; 169 } 170 break; 171 case NETLIST_REGIONID_SW_METHOD_INIT: 172 nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT"); 173 err = gr_gk20a_alloc_load_netlist_av(g, 174 src, size, &g->gr.ctx_vars.sw_method_init); 175 if (err) { 176 goto clean_up; 177 } 178 break; 179 case NETLIST_REGIONID_SW_CTX_LOAD: 180 nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD"); 181 err = gr_gk20a_alloc_load_netlist_aiv(g, 182 src, size, &g->gr.ctx_vars.sw_ctx_load); 183 if (err) { 184 goto clean_up; 185 } 186 break; 187 case NETLIST_REGIONID_SW_NON_CTX_LOAD: 188 nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD"); 189 err = gr_gk20a_alloc_load_netlist_av(g, 190 src, size, &g->gr.ctx_vars.sw_non_ctx_load); 191 if (err) { 192 goto clean_up; 193 } 194 break; 195 case NETLIST_REGIONID_SWVEIDBUNDLEINIT: 196 nvgpu_log_info(g, 197 "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); 198 err = gr_gk20a_alloc_load_netlist_av(g, 199 src, size, 200 &g->gr.ctx_vars.sw_veid_bundle_init); 201 if (err) { 202 goto clean_up; 203 } 204 break; 205 case NETLIST_REGIONID_CTXREG_SYS: 206 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS"); 207 err = gr_gk20a_alloc_load_netlist_aiv(g, 208 src, size, &g->gr.ctx_vars.ctxsw_regs.sys); 209 if (err) { 210 goto clean_up; 211 } 212 break; 213 case NETLIST_REGIONID_CTXREG_GPC: 214 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC"); 215 err = gr_gk20a_alloc_load_netlist_aiv(g, 216 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); 217 if (err) { 218 goto clean_up; 219 } 220 break; 221 case NETLIST_REGIONID_CTXREG_TPC: 222 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC"); 223 err = gr_gk20a_alloc_load_netlist_aiv(g, 224 src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); 225 if (err) { 226 goto clean_up; 227 } 228 break; 229 case NETLIST_REGIONID_CTXREG_ZCULL_GPC: 230 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC"); 231 err = gr_gk20a_alloc_load_netlist_aiv(g, 232 src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); 233 if (err) { 234 goto clean_up; 235 } 236 break; 237 case NETLIST_REGIONID_CTXREG_PPC: 238 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC"); 239 err = gr_gk20a_alloc_load_netlist_aiv(g, 240 src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); 241 if (err) { 242 goto clean_up; 243 } 244 break; 245 case NETLIST_REGIONID_CTXREG_PM_SYS: 246 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS"); 247 err = gr_gk20a_alloc_load_netlist_aiv(g, 248 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); 249 if (err) { 250 goto clean_up; 251 } 252 break; 253 case NETLIST_REGIONID_CTXREG_PM_GPC: 254 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC"); 255 err = gr_gk20a_alloc_load_netlist_aiv(g, 256 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); 257 if (err) { 258 goto clean_up; 259 } 260 break; 261 case NETLIST_REGIONID_CTXREG_PM_TPC: 262 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC"); 263 err = gr_gk20a_alloc_load_netlist_aiv(g, 264 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); 265 if (err) { 266 goto clean_up; 267 } 268 break; 269 case NETLIST_REGIONID_BUFFER_SIZE: 270 g->gr.ctx_vars.buffer_size = *src; 271 nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d", 272 g->gr.ctx_vars.buffer_size); 273 break; 274 case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX: 275 g->gr.ctx_vars.regs_base_index = *src; 276 nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u", 277 g->gr.ctx_vars.regs_base_index); 278 break; 279 case NETLIST_REGIONID_MAJORV: 280 major_v = *src; 281 nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d", 282 major_v); 283 break; 284 case NETLIST_REGIONID_NETLIST_NUM: 285 netlist_num = *src; 286 nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d", 287 netlist_num); 288 break; 289 case NETLIST_REGIONID_CTXREG_PMPPC: 290 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC"); 291 err = gr_gk20a_alloc_load_netlist_aiv(g, 292 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); 293 if (err) { 294 goto clean_up; 295 } 296 break; 297 case NETLIST_REGIONID_NVPERF_CTXREG_SYS: 298 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS"); 299 err = gr_gk20a_alloc_load_netlist_aiv(g, 300 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); 301 if (err) { 302 goto clean_up; 303 } 304 break; 305 case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: 306 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); 307 err = gr_gk20a_alloc_load_netlist_aiv(g, 308 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); 309 if (err) { 310 goto clean_up; 311 } 312 break; 313 case NETLIST_REGIONID_NVPERF_CTXREG_GPC: 314 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC"); 315 err = gr_gk20a_alloc_load_netlist_aiv(g, 316 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); 317 if (err) { 318 goto clean_up; 319 } 320 break; 321 case NETLIST_REGIONID_NVPERF_FBP_ROUTER: 322 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER"); 323 err = gr_gk20a_alloc_load_netlist_aiv(g, 324 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); 325 if (err) { 326 goto clean_up; 327 } 328 break; 329 case NETLIST_REGIONID_NVPERF_GPC_ROUTER: 330 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER"); 331 err = gr_gk20a_alloc_load_netlist_aiv(g, 332 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); 333 if (err) { 334 goto clean_up; 335 } 336 break; 337 case NETLIST_REGIONID_CTXREG_PMLTC: 338 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC"); 339 err = gr_gk20a_alloc_load_netlist_aiv(g, 340 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); 341 if (err) { 342 goto clean_up; 343 } 344 break; 345 case NETLIST_REGIONID_CTXREG_PMFBPA: 346 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA"); 347 err = gr_gk20a_alloc_load_netlist_aiv(g, 348 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); 349 if (err) { 350 goto clean_up; 351 } 352 break; 353 case NETLIST_REGIONID_NVPERF_SYS_ROUTER: 354 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER"); 355 err = gr_gk20a_alloc_load_netlist_aiv(g, 356 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); 357 if (err) { 358 goto clean_up; 359 } 360 break; 361 case NETLIST_REGIONID_NVPERF_PMA: 362 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA"); 363 err = gr_gk20a_alloc_load_netlist_aiv(g, 364 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); 365 if (err) { 366 goto clean_up; 367 } 368 break; 369 case NETLIST_REGIONID_CTXREG_PMROP: 370 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP"); 371 err = gr_gk20a_alloc_load_netlist_aiv(g, 372 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); 373 if (err) { 374 goto clean_up; 375 } 376 break; 377 case NETLIST_REGIONID_CTXREG_PMUCGPC: 378 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC"); 379 err = gr_gk20a_alloc_load_netlist_aiv(g, 380 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); 381 if (err) { 382 goto clean_up; 383 } 384 break; 385 case NETLIST_REGIONID_CTXREG_ETPC: 386 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC"); 387 err = gr_gk20a_alloc_load_netlist_aiv(g, 388 src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); 389 if (err) { 390 goto clean_up; 391 } 392 break; 393 case NETLIST_REGIONID_SW_BUNDLE64_INIT: 394 nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE64_INIT"); 395 err = gr_gk20a_alloc_load_netlist_av64(g, 396 src, size, 397 &g->gr.ctx_vars.sw_bundle64_init); 398 if (err) { 399 goto clean_up; 400 } 401 break; 402 case NETLIST_REGIONID_NVPERF_PMCAU: 403 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMCAU"); 404 err = gr_gk20a_alloc_load_netlist_aiv(g, 405 src, size, 406 &g->gr.ctx_vars.ctxsw_regs.pm_cau); 407 if (err) { 408 goto clean_up; 409 } 410 break; 411 412 default: 413 nvgpu_log_info(g, "unrecognized region %d skipped", i); 414 break; 415 } 416 } 417 418 if (net != NETLIST_FINAL && major_v != major_v_hw) { 419 nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x", 420 name, major_v, major_v_hw); 421 goto clean_up; 422 } 423 424 g->gr.ctx_vars.valid = true; 425 g->gr.netlist = net; 426 427 nvgpu_release_firmware(g, netlist_fw); 428 nvgpu_log_fn(g, "done"); 429 goto done; 430 431clean_up: 432 g->gr.ctx_vars.valid = false; 433 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.inst.l); 434 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.data.l); 435 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.inst.l); 436 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.data.l); 437 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle_init.l); 438 nvgpu_kfree(g, g->gr.ctx_vars.sw_method_init.l); 439 nvgpu_kfree(g, g->gr.ctx_vars.sw_ctx_load.l); 440 nvgpu_kfree(g, g->gr.ctx_vars.sw_non_ctx_load.l); 441 nvgpu_kfree(g, g->gr.ctx_vars.sw_veid_bundle_init.l); 442 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.sys.l); 443 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc.l); 444 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.tpc.l); 445 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l); 446 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.ppc.l); 447 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_sys.l); 448 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_gpc.l); 449 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_tpc.l); 450 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ppc.l); 451 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys.l); 452 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp.l); 453 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_gpc.l); 454 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp_router.l); 455 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc_router.l); 456 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ltc.l); 457 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_fbpa.l); 458 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys_router.l); 459 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_pma.l); 460 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_rop.l); 461 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l); 462 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.etpc.l); 463 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle64_init.l); 464 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_cau.l); 465 nvgpu_release_firmware(g, netlist_fw); 466 err = -ENOENT; 467 } 468 469done: 470 if (g->gr.ctx_vars.valid) { 471 nvgpu_log_info(g, "netlist image %s loaded", name); 472 return 0; 473 } else { 474 nvgpu_err(g, "failed to load netlist image!!"); 475 return err; 476 } 477} 478 479int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr) 480{ 481 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 482 return gr_gk20a_init_ctx_vars_sim(g, gr); 483 } else { 484 return gr_gk20a_init_ctx_vars_fw(g, gr); 485 } 486}
diff --git a/include/gk20a/gr_ctx_gk20a.h b/include/gk20a/gr_ctx_gk20a.h
deleted file mode 100644
index e75472c..0000000
--- a/include/gk20a/gr_ctx_gk20a.h
+++ /dev/null
@@ -1,206 +0,0 @@ 1/* 2 * GK20A Graphics Context 3 * 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_GK20A_GR_CTX_GK20A_H 25#define NVGPU_GK20A_GR_CTX_GK20A_H 26 27#include <nvgpu/kmem.h> 28 29struct gr_gk20a; 30 31/* emulation netlists, match majorV with HW */ 32#define GK20A_NETLIST_IMAGE_A "NETA_img.bin" 33#define GK20A_NETLIST_IMAGE_B "NETB_img.bin" 34#define GK20A_NETLIST_IMAGE_C "NETC_img.bin" 35#define GK20A_NETLIST_IMAGE_D "NETD_img.bin" 36 37/* 38 * Need to support multiple ARCH in same GPU family 39 * then need to provide path like ARCH/NETIMAGE to 40 * point to correct netimage within GPU family, 41 * Example, gm20x can support gm204 or gm206,so path 42 * for netimage is gm204/NETC_img.bin, and '/' char 43 * will inserted at null terminator char of "GAxxx" 44 * to get complete path like gm204/NETC_img.bin 45 */ 46#define GPU_ARCH "GAxxx" 47 48union __max_name { 49#ifdef GK20A_NETLIST_IMAGE_A 50 char __name_a[sizeof(GK20A_NETLIST_IMAGE_A)]; 51#endif 52#ifdef GK20A_NETLIST_IMAGE_B 53 char __name_b[sizeof(GK20A_NETLIST_IMAGE_B)]; 54#endif 55#ifdef GK20A_NETLIST_IMAGE_C 56 char __name_c[sizeof(GK20A_NETLIST_IMAGE_C)]; 57#endif 58#ifdef GK20A_NETLIST_IMAGE_D 59 char __name_d[sizeof(GK20A_NETLIST_IMAGE_D)]; 60#endif 61}; 62 63#define MAX_NETLIST_NAME (sizeof(GPU_ARCH) + sizeof(union __max_name)) 64 65/* index for emulation netlists */ 66#define NETLIST_FINAL -1 67#define NETLIST_SLOT_A 0 68#define NETLIST_SLOT_B 1 69#define NETLIST_SLOT_C 2 70#define NETLIST_SLOT_D 3 71#define MAX_NETLIST 4 72 73/* netlist regions */ 74#define NETLIST_REGIONID_FECS_UCODE_DATA 0 75#define NETLIST_REGIONID_FECS_UCODE_INST 1 76#define NETLIST_REGIONID_GPCCS_UCODE_DATA 2 77#define NETLIST_REGIONID_GPCCS_UCODE_INST 3 78#define NETLIST_REGIONID_SW_BUNDLE_INIT 4 79#define NETLIST_REGIONID_SW_CTX_LOAD 5 80#define NETLIST_REGIONID_SW_NON_CTX_LOAD 6 81#define NETLIST_REGIONID_SW_METHOD_INIT 7 82#define NETLIST_REGIONID_CTXREG_SYS 8 83#define NETLIST_REGIONID_CTXREG_GPC 9 84#define NETLIST_REGIONID_CTXREG_TPC 10 85#define NETLIST_REGIONID_CTXREG_ZCULL_GPC 11 86#define NETLIST_REGIONID_CTXREG_PM_SYS 12 87#define NETLIST_REGIONID_CTXREG_PM_GPC 13 88#define NETLIST_REGIONID_CTXREG_PM_TPC 14 89#define NETLIST_REGIONID_MAJORV 15 90#define NETLIST_REGIONID_BUFFER_SIZE 16 91#define NETLIST_REGIONID_CTXSW_REG_BASE_INDEX 17 92#define NETLIST_REGIONID_NETLIST_NUM 18 93#define NETLIST_REGIONID_CTXREG_PPC 19 94#define NETLIST_REGIONID_CTXREG_PMPPC 20 95#define NETLIST_REGIONID_NVPERF_CTXREG_SYS 21 96#define NETLIST_REGIONID_NVPERF_FBP_CTXREGS 22 97#define NETLIST_REGIONID_NVPERF_CTXREG_GPC 23 98#define NETLIST_REGIONID_NVPERF_FBP_ROUTER 24 99#define NETLIST_REGIONID_NVPERF_GPC_ROUTER 25 100#define NETLIST_REGIONID_CTXREG_PMLTC 26 101#define NETLIST_REGIONID_CTXREG_PMFBPA 27 102#define NETLIST_REGIONID_SWVEIDBUNDLEINIT 28 103#define NETLIST_REGIONID_NVPERF_SYS_ROUTER 29 104#define NETLIST_REGIONID_NVPERF_PMA 30 105#define NETLIST_REGIONID_CTXREG_PMROP 31 106#define NETLIST_REGIONID_CTXREG_PMUCGPC 32 107#define NETLIST_REGIONID_CTXREG_ETPC 33 108#define NETLIST_REGIONID_SW_BUNDLE64_INIT 34 109#define NETLIST_REGIONID_NVPERF_PMCAU 35 110 111struct netlist_region { 112 u32 region_id; 113 u32 data_size; 114 u32 data_offset; 115}; 116 117struct netlist_image_header { 118 u32 version; 119 u32 regions; 120}; 121 122struct netlist_image { 123 struct netlist_image_header header; 124 struct netlist_region regions[1]; 125}; 126 127struct av_gk20a { 128 u32 addr; 129 u32 value; 130}; 131struct av64_gk20a { 132 u32 addr; 133 u32 value_lo; 134 u32 value_hi; 135}; 136struct aiv_gk20a { 137 u32 addr; 138 u32 index; 139 u32 value; 140}; 141struct aiv_list_gk20a { 142 struct aiv_gk20a *l; 143 u32 count; 144}; 145struct av_list_gk20a { 146 struct av_gk20a *l; 147 u32 count; 148}; 149struct av64_list_gk20a { 150 struct av64_gk20a *l; 151 u32 count; 152}; 153struct u32_list_gk20a { 154 u32 *l; 155 u32 count; 156}; 157 158struct ctxsw_buf_offset_map_entry { 159 u32 addr; /* Register address */ 160 u32 offset; /* Offset in ctxt switch buffer */ 161}; 162 163static inline 164struct av_gk20a *alloc_av_list_gk20a(struct gk20a *g, struct av_list_gk20a *avl) 165{ 166 avl->l = nvgpu_kzalloc(g, avl->count * sizeof(*avl->l)); 167 return avl->l; 168} 169 170static inline 171struct av64_gk20a *alloc_av64_list_gk20a(struct gk20a *g, struct av64_list_gk20a *avl) 172{ 173 avl->l = nvgpu_kzalloc(g, avl->count * sizeof(*avl->l)); 174 return avl->l; 175} 176 177static inline 178struct aiv_gk20a *alloc_aiv_list_gk20a(struct gk20a *g, 179 struct aiv_list_gk20a *aivl) 180{ 181 aivl->l = nvgpu_kzalloc(g, aivl->count * sizeof(*aivl->l)); 182 return aivl->l; 183} 184 185static inline 186u32 *alloc_u32_list_gk20a(struct gk20a *g, struct u32_list_gk20a *u32l) 187{ 188 u32l->l = nvgpu_kzalloc(g, u32l->count * sizeof(*u32l->l)); 189 return u32l->l; 190} 191 192struct gr_ucode_gk20a { 193 struct { 194 struct u32_list_gk20a inst; 195 struct u32_list_gk20a data; 196 } gpccs, fecs; 197}; 198 199/* main entry for grctx loading */ 200int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr); 201int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr); 202 203struct gpu_ops; 204void gk20a_init_gr_ctx(struct gpu_ops *gops); 205 206#endif /*NVGPU_GK20A_GR_CTX_GK20A_H*/
diff --git a/include/gk20a/gr_ctx_gk20a_sim.c b/include/gk20a/gr_ctx_gk20a_sim.c
deleted file mode 100644
index ce65c77..0000000
--- a/include/gk20a/gr_ctx_gk20a_sim.c
+++ /dev/null
@@ -1,356 +0,0 @@ 1/* 2 * GK20A Graphics Context for Simulation 3 * 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include "gk20a.h" 26#include <nvgpu/sim.h> 27#include "gr_ctx_gk20a.h" 28 29#include <nvgpu/log.h> 30 31int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) 32{ 33 int err = -ENOMEM; 34 u32 i, temp; 35 36 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_info, 37 "querying grctx info from chiplib"); 38 39 g->gr.ctx_vars.dynamic = true; 40 g->gr.netlist = GR_NETLIST_DYNAMIC; 41 42 if (g->sim->esc_readl == NULL) { 43 nvgpu_err(g, "Invalid pointer to query function."); 44 err = -ENOENT; 45 goto fail; 46 } 47 48 /* query sizes and counts */ 49 g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS_COUNT", 0, 50 &g->gr.ctx_vars.ucode.fecs.inst.count); 51 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_FECS_COUNT", 0, 52 &g->gr.ctx_vars.ucode.fecs.data.count); 53 g->sim->esc_readl(g, "GRCTX_UCODE_INST_GPCCS_COUNT", 0, 54 &g->gr.ctx_vars.ucode.gpccs.inst.count); 55 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_GPCCS_COUNT", 0, 56 &g->gr.ctx_vars.ucode.gpccs.data.count); 57 g->sim->esc_readl(g, "GRCTX_ALL_CTX_TOTAL_WORDS", 0, &temp); 58 g->gr.ctx_vars.buffer_size = temp << 2; 59 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE_INIT_SIZE", 0, 60 &g->gr.ctx_vars.sw_bundle_init.count); 61 g->sim->esc_readl(g, "GRCTX_SW_METHOD_INIT_SIZE", 0, 62 &g->gr.ctx_vars.sw_method_init.count); 63 g->sim->esc_readl(g, "GRCTX_SW_CTX_LOAD_SIZE", 0, 64 &g->gr.ctx_vars.sw_ctx_load.count); 65 g->sim->esc_readl(g, "GRCTX_SW_VEID_BUNDLE_INIT_SIZE", 0, 66 &g->gr.ctx_vars.sw_veid_bundle_init.count); 67 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE64_INIT_SIZE", 0, 68 &g->gr.ctx_vars.sw_bundle64_init.count); 69 70 g->sim->esc_readl(g, "GRCTX_NONCTXSW_REG_SIZE", 0, 71 &g->gr.ctx_vars.sw_non_ctx_load.count); 72 g->sim->esc_readl(g, "GRCTX_REG_LIST_SYS_COUNT", 0, 73 &g->gr.ctx_vars.ctxsw_regs.sys.count); 74 g->sim->esc_readl(g, "GRCTX_REG_LIST_GPC_COUNT", 0, 75 &g->gr.ctx_vars.ctxsw_regs.gpc.count); 76 g->sim->esc_readl(g, "GRCTX_REG_LIST_TPC_COUNT", 0, 77 &g->gr.ctx_vars.ctxsw_regs.tpc.count); 78 g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC_COUNT", 0, 79 &g->gr.ctx_vars.ctxsw_regs.zcull_gpc.count); 80 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_SYS_COUNT", 0, 81 &g->gr.ctx_vars.ctxsw_regs.pm_sys.count); 82 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_GPC_COUNT", 0, 83 &g->gr.ctx_vars.ctxsw_regs.pm_gpc.count); 84 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_TPC_COUNT", 0, 85 &g->gr.ctx_vars.ctxsw_regs.pm_tpc.count); 86 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC_COUNT", 0, 87 &g->gr.ctx_vars.ctxsw_regs.ppc.count); 88 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC_COUNT", 0, 89 &g->gr.ctx_vars.ctxsw_regs.etpc.count); 90 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC_COUNT", 0, 91 &g->gr.ctx_vars.ctxsw_regs.ppc.count); 92 93 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.inst) == NULL) { 94 goto fail; 95 } 96 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.data) == NULL) { 97 goto fail; 98 } 99 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.inst) == NULL) { 100 goto fail; 101 } 102 if (alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.data) == NULL) { 103 goto fail; 104 } 105 if (alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_bundle_init) == NULL) { 106 goto fail; 107 } 108 if (alloc_av64_list_gk20a(g, 109 &g->gr.ctx_vars.sw_bundle64_init) == NULL) { 110 goto fail; 111 } 112 if (alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_method_init) == NULL) { 113 goto fail; 114 } 115 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.sw_ctx_load) == NULL) { 116 goto fail; 117 } 118 if (alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_non_ctx_load) == NULL) { 119 goto fail; 120 } 121 if (alloc_av_list_gk20a(g, 122 &g->gr.ctx_vars.sw_veid_bundle_init) == NULL) { 123 goto fail; 124 } 125 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.sys) == NULL) { 126 goto fail; 127 } 128 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.gpc) == NULL) { 129 goto fail; 130 } 131 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.tpc) == NULL) { 132 goto fail; 133 } 134 if (alloc_aiv_list_gk20a(g, 135 &g->gr.ctx_vars.ctxsw_regs.zcull_gpc) == NULL) { 136 goto fail; 137 } 138 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.ppc) == NULL) { 139 goto fail; 140 } 141 if (alloc_aiv_list_gk20a(g, 142 &g->gr.ctx_vars.ctxsw_regs.pm_sys) == NULL) { 143 goto fail; 144 } 145 if (alloc_aiv_list_gk20a(g, 146 &g->gr.ctx_vars.ctxsw_regs.pm_gpc) == NULL) { 147 goto fail; 148 } 149 if (alloc_aiv_list_gk20a(g, 150 &g->gr.ctx_vars.ctxsw_regs.pm_tpc) == NULL) { 151 goto fail; 152 } 153 if (alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.etpc) == NULL) { 154 goto fail; 155 } 156 157 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++) { 158 g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS", 159 i, &g->gr.ctx_vars.ucode.fecs.inst.l[i]); 160 } 161 162 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.data.count; i++) { 163 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_FECS", 164 i, &g->gr.ctx_vars.ucode.fecs.data.l[i]); 165 } 166 167 for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.inst.count; i++) { 168 g->sim->esc_readl(g, "GRCTX_UCODE_INST_GPCCS", 169 i, &g->gr.ctx_vars.ucode.gpccs.inst.l[i]); 170 } 171 172 for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.data.count; i++) { 173 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_GPCCS", 174 i, &g->gr.ctx_vars.ucode.gpccs.data.l[i]); 175 } 176 177 for (i = 0; i < g->gr.ctx_vars.sw_bundle_init.count; i++) { 178 struct av_gk20a *l = g->gr.ctx_vars.sw_bundle_init.l; 179 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE_INIT:ADDR", 180 i, &l[i].addr); 181 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE_INIT:VALUE", 182 i, &l[i].value); 183 } 184 185 for (i = 0; i < g->gr.ctx_vars.sw_method_init.count; i++) { 186 struct av_gk20a *l = g->gr.ctx_vars.sw_method_init.l; 187 g->sim->esc_readl(g, "GRCTX_SW_METHOD_INIT:ADDR", 188 i, &l[i].addr); 189 g->sim->esc_readl(g, "GRCTX_SW_METHOD_INIT:VALUE", 190 i, &l[i].value); 191 } 192 193 for (i = 0; i < g->gr.ctx_vars.sw_ctx_load.count; i++) { 194 struct aiv_gk20a *l = g->gr.ctx_vars.sw_ctx_load.l; 195 g->sim->esc_readl(g, "GRCTX_SW_CTX_LOAD:ADDR", 196 i, &l[i].addr); 197 g->sim->esc_readl(g, "GRCTX_SW_CTX_LOAD:INDEX", 198 i, &l[i].index); 199 g->sim->esc_readl(g, "GRCTX_SW_CTX_LOAD:VALUE", 200 i, &l[i].value); 201 } 202 203 for (i = 0; i < g->gr.ctx_vars.sw_non_ctx_load.count; i++) { 204 struct av_gk20a *l = g->gr.ctx_vars.sw_non_ctx_load.l; 205 g->sim->esc_readl(g, "GRCTX_NONCTXSW_REG:REG", 206 i, &l[i].addr); 207 g->sim->esc_readl(g, "GRCTX_NONCTXSW_REG:VALUE", 208 i, &l[i].value); 209 } 210 211 for (i = 0; i < g->gr.ctx_vars.sw_veid_bundle_init.count; i++) { 212 struct av_gk20a *l = g->gr.ctx_vars.sw_veid_bundle_init.l; 213 214 g->sim->esc_readl(g, "GRCTX_SW_VEID_BUNDLE_INIT:ADDR", 215 i, &l[i].addr); 216 g->sim->esc_readl(g, "GRCTX_SW_VEID_BUNDLE_INIT:VALUE", 217 i, &l[i].value); 218 } 219 220 for (i = 0; i < g->gr.ctx_vars.sw_bundle64_init.count; i++) { 221 struct av64_gk20a *l = g->gr.ctx_vars.sw_bundle64_init.l; 222 223 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE64_INIT:ADDR", 224 i, &l[i].addr); 225 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE64_INIT:VALUE_LO", 226 i, &l[i].value_lo); 227 g->sim->esc_readl(g, "GRCTX_SW_BUNDLE64_INIT:VALUE_HI", 228 i, &l[i].value_hi); 229 } 230 231 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.sys.count; i++) { 232 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.sys.l; 233 g->sim->esc_readl(g, "GRCTX_REG_LIST_SYS:ADDR", 234 i, &l[i].addr); 235 g->sim->esc_readl(g, "GRCTX_REG_LIST_SYS:INDEX", 236 i, &l[i].index); 237 g->sim->esc_readl(g, "GRCTX_REG_LIST_SYS:VALUE", 238 i, &l[i].value); 239 } 240 241 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.gpc.count; i++) { 242 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.gpc.l; 243 g->sim->esc_readl(g, "GRCTX_REG_LIST_GPC:ADDR", 244 i, &l[i].addr); 245 g->sim->esc_readl(g, "GRCTX_REG_LIST_GPC:INDEX", 246 i, &l[i].index); 247 g->sim->esc_readl(g, "GRCTX_REG_LIST_GPC:VALUE", 248 i, &l[i].value); 249 } 250 251 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.tpc.count; i++) { 252 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.tpc.l; 253 g->sim->esc_readl(g, "GRCTX_REG_LIST_TPC:ADDR", 254 i, &l[i].addr); 255 g->sim->esc_readl(g, "GRCTX_REG_LIST_TPC:INDEX", 256 i, &l[i].index); 257 g->sim->esc_readl(g, "GRCTX_REG_LIST_TPC:VALUE", 258 i, &l[i].value); 259 } 260 261 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.ppc.count; i++) { 262 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.ppc.l; 263 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC:ADDR", 264 i, &l[i].addr); 265 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC:INDEX", 266 i, &l[i].index); 267 g->sim->esc_readl(g, "GRCTX_REG_LIST_PPC:VALUE", 268 i, &l[i].value); 269 } 270 271 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.zcull_gpc.count; i++) { 272 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l; 273 g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC:ADDR", 274 i, &l[i].addr); 275 g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC:INDEX", 276 i, &l[i].index); 277 g->sim->esc_readl(g, "GRCTX_REG_LIST_ZCULL_GPC:VALUE", 278 i, &l[i].value); 279 } 280 281 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.pm_sys.count; i++) { 282 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.pm_sys.l; 283 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_SYS:ADDR", 284 i, &l[i].addr); 285 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_SYS:INDEX", 286 i, &l[i].index); 287 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_SYS:VALUE", 288 i, &l[i].value); 289 } 290 291 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.pm_gpc.count; i++) { 292 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.pm_gpc.l; 293 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_GPC:ADDR", 294 i, &l[i].addr); 295 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_GPC:INDEX", 296 i, &l[i].index); 297 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_GPC:VALUE", 298 i, &l[i].value); 299 } 300 301 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.pm_tpc.count; i++) { 302 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.pm_tpc.l; 303 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_TPC:ADDR", 304 i, &l[i].addr); 305 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_TPC:INDEX", 306 i, &l[i].index); 307 g->sim->esc_readl(g, "GRCTX_REG_LIST_PM_TPC:VALUE", 308 i, &l[i].value); 309 } 310 311 nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "query GRCTX_REG_LIST_ETPC"); 312 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { 313 struct aiv_gk20a *l = g->gr.ctx_vars.ctxsw_regs.etpc.l; 314 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:ADDR", 315 i, &l[i].addr); 316 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:INDEX", 317 i, &l[i].index); 318 g->sim->esc_readl(g, "GRCTX_REG_LIST_ETPC:VALUE", 319 i, &l[i].value); 320 nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, 321 "addr:0x%#08x index:0x%08x value:0x%08x", 322 l[i].addr, l[i].index, l[i].value); 323 } 324 325 g->gr.ctx_vars.valid = true; 326 327 g->sim->esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0, 328 &g->gr.ctx_vars.regs_base_index); 329 330 nvgpu_log(g, gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); 331 return 0; 332fail: 333 nvgpu_err(g, "failed querying grctx info from chiplib"); 334 335 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.inst.l); 336 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.data.l); 337 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.inst.l); 338 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.data.l); 339 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle_init.l); 340 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle64_init.l); 341 nvgpu_kfree(g, g->gr.ctx_vars.sw_method_init.l); 342 nvgpu_kfree(g, g->gr.ctx_vars.sw_ctx_load.l); 343 nvgpu_kfree(g, g->gr.ctx_vars.sw_non_ctx_load.l); 344 nvgpu_kfree(g, g->gr.ctx_vars.sw_veid_bundle_init.l); 345 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.sys.l); 346 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc.l); 347 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.tpc.l); 348 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l); 349 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.ppc.l); 350 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_sys.l); 351 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_gpc.l); 352 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_tpc.l); 353 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.etpc.l); 354 355 return err; 356}
diff --git a/include/gk20a/gr_gk20a.c b/include/gk20a/gr_gk20a.c
deleted file mode 100644
index 1eda853..0000000
--- a/include/gk20a/gr_gk20a.c
+++ /dev/null
@@ -1,9090 +0,0 @@ 1/* 2 * GK20A Graphics 3 * 4 * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/dma.h> 26#include <nvgpu/kmem.h> 27#include <nvgpu/gmmu.h> 28#include <nvgpu/timers.h> 29#include <nvgpu/nvgpu_common.h> 30#include <nvgpu/log.h> 31#include <nvgpu/bsearch.h> 32#include <nvgpu/sort.h> 33#include <nvgpu/bug.h> 34#include <nvgpu/firmware.h> 35#include <nvgpu/enabled.h> 36#include <nvgpu/debug.h> 37#include <nvgpu/barrier.h> 38#include <nvgpu/mm.h> 39#include <nvgpu/ctxsw_trace.h> 40#include <nvgpu/error_notifier.h> 41#include <nvgpu/ecc.h> 42#include <nvgpu/io.h> 43#include <nvgpu/utils.h> 44#include <nvgpu/channel.h> 45#include <nvgpu/unit.h> 46#include <nvgpu/power_features/pg.h> 47#include <nvgpu/power_features/cg.h> 48 49#include "gk20a.h" 50#include "gr_gk20a.h" 51#include "gk20a/fecs_trace_gk20a.h" 52#include "gr_ctx_gk20a.h" 53#include "gr_pri_gk20a.h" 54#include "regops_gk20a.h" 55#include "dbg_gpu_gk20a.h" 56 57#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 58#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h> 59#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h> 60#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 61#include <nvgpu/hw/gk20a/hw_gmmu_gk20a.h> 62#include <nvgpu/hw/gk20a/hw_mc_gk20a.h> 63#include <nvgpu/hw/gk20a/hw_ram_gk20a.h> 64#include <nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h> 65#include <nvgpu/hw/gk20a/hw_top_gk20a.h> 66#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 67 68#define BLK_SIZE (256) 69#define NV_PERF_PMM_FBP_ROUTER_STRIDE 0x0200 70#define NV_PERF_PMMGPCROUTER_STRIDE 0x0200 71#define NV_PCFG_BASE 0x00088000 72#define NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE 0x0020 73#define FE_PWR_MODE_TIMEOUT_MAX 2000 74#define FE_PWR_MODE_TIMEOUT_DEFAULT 10 75#define CTXSW_MEM_SCRUBBING_TIMEOUT_MAX 1000 76#define CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 77#define FECS_ARB_CMD_TIMEOUT_MAX 40 78#define FECS_ARB_CMD_TIMEOUT_DEFAULT 2 79 80static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g); 81 82static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g, 83 struct vm_gk20a *vm, 84 struct nvgpu_gr_ctx *gr_ctx); 85 86/* channel patch ctx buffer */ 87static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, 88 struct channel_gk20a *c); 89static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g, 90 struct vm_gk20a *vm, 91 struct nvgpu_gr_ctx *gr_ctx); 92 93/* golden ctx image */ 94static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, 95 struct channel_gk20a *c); 96 97int gr_gk20a_get_ctx_id(struct gk20a *g, 98 struct channel_gk20a *c, 99 u32 *ctx_id) 100{ 101 struct tsg_gk20a *tsg; 102 struct nvgpu_gr_ctx *gr_ctx = NULL; 103 struct nvgpu_mem *mem = NULL; 104 105 tsg = tsg_gk20a_from_ch(c); 106 if (tsg == NULL) { 107 return -EINVAL; 108 } 109 110 gr_ctx = &tsg->gr_ctx; 111 mem = &gr_ctx->mem; 112 113 /* Channel gr_ctx buffer is gpu cacheable. 114 Flush and invalidate before cpu update. */ 115 g->ops.mm.l2_flush(g, true); 116 117 *ctx_id = nvgpu_mem_rd(g, mem, 118 ctxsw_prog_main_image_context_id_o()); 119 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", *ctx_id); 120 121 return 0; 122} 123 124void gk20a_gpccs_dump_falcon_stats(struct gk20a *g) 125{ 126 unsigned int i; 127 128 nvgpu_err(g, "gr_gpc0_gpccs_falcon_irqstat : %d", 129 gk20a_readl(g, gr_gpc0_gpccs_falcon_irqstat_r())); 130 nvgpu_err(g, "gr_gpc0_gpccs_falcon_irqmode : %d", 131 gk20a_readl(g, gr_gpc0_gpccs_falcon_irqmode_r())); 132 nvgpu_err(g, "gr_gpc0_gpccs_falcon_irqmask : %d", 133 gk20a_readl(g, gr_gpc0_gpccs_falcon_irqmask_r())); 134 nvgpu_err(g, "gr_gpc0_gpccs_falcon_irqdest : %d", 135 gk20a_readl(g, gr_gpc0_gpccs_falcon_irqdest_r())); 136 nvgpu_err(g, "gr_gpc0_gpccs_falcon_debug1 : %d", 137 gk20a_readl(g, gr_gpc0_gpccs_falcon_debug1_r())); 138 nvgpu_err(g, "gr_gpc0_gpccs_falcon_debuginfo : %d", 139 gk20a_readl(g, gr_gpc0_gpccs_falcon_debuginfo_r())); 140 nvgpu_err(g, "gr_gpc0_gpccs_falcon_engctl : %d", 141 gk20a_readl(g, gr_gpc0_gpccs_falcon_engctl_r())); 142 nvgpu_err(g, "gr_gpc0_gpccs_falcon_curctx : %d", 143 gk20a_readl(g, gr_gpc0_gpccs_falcon_curctx_r())); 144 nvgpu_err(g, "gr_gpc0_gpccs_falcon_nxtctx : %d", 145 gk20a_readl(g, gr_gpc0_gpccs_falcon_nxtctx_r())); 146 nvgpu_err(g, "gr_gpc0_gpccs_ctxsw_status_1 : %d", 147 gk20a_readl(g, gr_gpc0_gpccs_ctxsw_status_1_r())); 148 149 for (i = 0; i < g->ops.gr.gpc0_gpccs_ctxsw_mailbox_size(); i++) { 150 nvgpu_err(g, "gr_gpc0_gpccs_ctxsw_mailbox_r(%d) : 0x%x", 151 i, gk20a_readl(g, gr_gpc0_gpccs_ctxsw_mailbox_r(i))); 152 } 153 154 155 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 156 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 157 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); 158 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_IMB : 0x%x", 159 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 160 161 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 162 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 163 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); 164 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_DMB : 0x%x", 165 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 166 167 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 168 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 169 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); 170 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_CSW : 0x%x", 171 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 172 173 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 174 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 175 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); 176 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_CTX : 0x%x", 177 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 178 179 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 180 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 181 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); 182 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_EXCI : 0x%x", 183 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 184 185 186 for (i = 0; i < 4U; i++) { 187 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 188 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 189 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC)); 190 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_PC : 0x%x", 191 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 192 193 gk20a_writel(g, gr_gpc0_gpccs_falcon_icd_cmd_r(), 194 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f() | 195 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP)); 196 nvgpu_err(g, "GPC0_GPCCS_FALCON_REG_SP : 0x%x", 197 gk20a_readl(g, gr_gpc_gpccs_falcon_icd_rdata_r())); 198 } 199} 200 201void gk20a_fecs_dump_falcon_stats(struct gk20a *g) 202{ 203 unsigned int i; 204 205 nvgpu_err(g, "gr_fecs_os_r : %d", 206 gk20a_readl(g, gr_fecs_os_r())); 207 nvgpu_err(g, "gr_fecs_cpuctl_r : 0x%x", 208 gk20a_readl(g, gr_fecs_cpuctl_r())); 209 nvgpu_err(g, "gr_fecs_idlestate_r : 0x%x", 210 gk20a_readl(g, gr_fecs_idlestate_r())); 211 nvgpu_err(g, "gr_fecs_mailbox0_r : 0x%x", 212 gk20a_readl(g, gr_fecs_mailbox0_r())); 213 nvgpu_err(g, "gr_fecs_mailbox1_r : 0x%x", 214 gk20a_readl(g, gr_fecs_mailbox1_r())); 215 nvgpu_err(g, "gr_fecs_irqstat_r : 0x%x", 216 gk20a_readl(g, gr_fecs_irqstat_r())); 217 nvgpu_err(g, "gr_fecs_irqmode_r : 0x%x", 218 gk20a_readl(g, gr_fecs_irqmode_r())); 219 nvgpu_err(g, "gr_fecs_irqmask_r : 0x%x", 220 gk20a_readl(g, gr_fecs_irqmask_r())); 221 nvgpu_err(g, "gr_fecs_irqdest_r : 0x%x", 222 gk20a_readl(g, gr_fecs_irqdest_r())); 223 nvgpu_err(g, "gr_fecs_debug1_r : 0x%x", 224 gk20a_readl(g, gr_fecs_debug1_r())); 225 nvgpu_err(g, "gr_fecs_debuginfo_r : 0x%x", 226 gk20a_readl(g, gr_fecs_debuginfo_r())); 227 nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x", 228 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 229 230 for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) { 231 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", 232 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i))); 233 } 234 235 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x", 236 gk20a_readl(g, gr_fecs_engctl_r())); 237 nvgpu_err(g, "gr_fecs_curctx_r : 0x%x", 238 gk20a_readl(g, gr_fecs_curctx_r())); 239 nvgpu_err(g, "gr_fecs_nxtctx_r : 0x%x", 240 gk20a_readl(g, gr_fecs_nxtctx_r())); 241 242 gk20a_writel(g, gr_fecs_icd_cmd_r(), 243 gr_fecs_icd_cmd_opc_rreg_f() | 244 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); 245 nvgpu_err(g, "FECS_FALCON_REG_IMB : 0x%x", 246 gk20a_readl(g, gr_fecs_icd_rdata_r())); 247 248 gk20a_writel(g, gr_fecs_icd_cmd_r(), 249 gr_fecs_icd_cmd_opc_rreg_f() | 250 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); 251 nvgpu_err(g, "FECS_FALCON_REG_DMB : 0x%x", 252 gk20a_readl(g, gr_fecs_icd_rdata_r())); 253 254 gk20a_writel(g, gr_fecs_icd_cmd_r(), 255 gr_fecs_icd_cmd_opc_rreg_f() | 256 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); 257 nvgpu_err(g, "FECS_FALCON_REG_CSW : 0x%x", 258 gk20a_readl(g, gr_fecs_icd_rdata_r())); 259 260 gk20a_writel(g, gr_fecs_icd_cmd_r(), 261 gr_fecs_icd_cmd_opc_rreg_f() | 262 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); 263 nvgpu_err(g, "FECS_FALCON_REG_CTX : 0x%x", 264 gk20a_readl(g, gr_fecs_icd_rdata_r())); 265 266 gk20a_writel(g, gr_fecs_icd_cmd_r(), 267 gr_fecs_icd_cmd_opc_rreg_f() | 268 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); 269 nvgpu_err(g, "FECS_FALCON_REG_EXCI : 0x%x", 270 gk20a_readl(g, gr_fecs_icd_rdata_r())); 271 272 for (i = 0; i < 4; i++) { 273 gk20a_writel(g, gr_fecs_icd_cmd_r(), 274 gr_fecs_icd_cmd_opc_rreg_f() | 275 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC)); 276 nvgpu_err(g, "FECS_FALCON_REG_PC : 0x%x", 277 gk20a_readl(g, gr_fecs_icd_rdata_r())); 278 279 gk20a_writel(g, gr_fecs_icd_cmd_r(), 280 gr_fecs_icd_cmd_opc_rreg_f() | 281 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP)); 282 nvgpu_err(g, "FECS_FALCON_REG_SP : 0x%x", 283 gk20a_readl(g, gr_fecs_icd_rdata_r())); 284 } 285} 286 287static void gr_gk20a_load_falcon_dmem(struct gk20a *g) 288{ 289 u32 i, ucode_u32_size; 290 const u32 *ucode_u32_data; 291 u32 checksum; 292 293 nvgpu_log_fn(g, " "); 294 295 gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | 296 gr_gpccs_dmemc_blk_f(0) | 297 gr_gpccs_dmemc_aincw_f(1))); 298 299 ucode_u32_size = g->gr.ctx_vars.ucode.gpccs.data.count; 300 ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.gpccs.data.l; 301 302 for (i = 0, checksum = 0; i < ucode_u32_size; i++) { 303 gk20a_writel(g, gr_gpccs_dmemd_r(0), ucode_u32_data[i]); 304 checksum += ucode_u32_data[i]; 305 } 306 307 gk20a_writel(g, gr_fecs_dmemc_r(0), (gr_fecs_dmemc_offs_f(0) | 308 gr_fecs_dmemc_blk_f(0) | 309 gr_fecs_dmemc_aincw_f(1))); 310 311 ucode_u32_size = g->gr.ctx_vars.ucode.fecs.data.count; 312 ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.fecs.data.l; 313 314 for (i = 0, checksum = 0; i < ucode_u32_size; i++) { 315 gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); 316 checksum += ucode_u32_data[i]; 317 } 318 nvgpu_log_fn(g, "done"); 319} 320 321static void gr_gk20a_load_falcon_imem(struct gk20a *g) 322{ 323 u32 cfg, fecs_imem_size, gpccs_imem_size, ucode_u32_size; 324 const u32 *ucode_u32_data; 325 u32 tag, i, pad_start, pad_end; 326 u32 checksum; 327 328 nvgpu_log_fn(g, " "); 329 330 cfg = gk20a_readl(g, gr_fecs_cfg_r()); 331 fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); 332 333 cfg = gk20a_readl(g, gr_gpc0_cfg_r()); 334 gpccs_imem_size = gr_gpc0_cfg_imem_sz_v(cfg); 335 336 /* Use the broadcast address to access all of the GPCCS units. */ 337 gk20a_writel(g, gr_gpccs_imemc_r(0), (gr_gpccs_imemc_offs_f(0) | 338 gr_gpccs_imemc_blk_f(0) | 339 gr_gpccs_imemc_aincw_f(1))); 340 341 /* Setup the tags for the instruction memory. */ 342 tag = 0; 343 gk20a_writel(g, gr_gpccs_imemt_r(0), gr_gpccs_imemt_tag_f(tag)); 344 345 ucode_u32_size = g->gr.ctx_vars.ucode.gpccs.inst.count; 346 ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.gpccs.inst.l; 347 348 for (i = 0, checksum = 0; i < ucode_u32_size; i++) { 349 if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { 350 tag++; 351 gk20a_writel(g, gr_gpccs_imemt_r(0), 352 gr_gpccs_imemt_tag_f(tag)); 353 } 354 gk20a_writel(g, gr_gpccs_imemd_r(0), ucode_u32_data[i]); 355 checksum += ucode_u32_data[i]; 356 } 357 358 pad_start = i * 4U; 359 pad_end = pad_start + (256U - pad_start % 256U) + 256U; 360 for (i = pad_start; 361 (i < gpccs_imem_size * 256U) && (i < pad_end); 362 i += 4U) { 363 if ((i != 0U) && ((i % 256U) == 0U)) { 364 tag++; 365 gk20a_writel(g, gr_gpccs_imemt_r(0), 366 gr_gpccs_imemt_tag_f(tag)); 367 } 368 gk20a_writel(g, gr_gpccs_imemd_r(0), 0); 369 } 370 371 gk20a_writel(g, gr_fecs_imemc_r(0), (gr_fecs_imemc_offs_f(0) | 372 gr_fecs_imemc_blk_f(0) | 373 gr_fecs_imemc_aincw_f(1))); 374 375 /* Setup the tags for the instruction memory. */ 376 tag = 0; 377 gk20a_writel(g, gr_fecs_imemt_r(0), gr_fecs_imemt_tag_f(tag)); 378 379 ucode_u32_size = g->gr.ctx_vars.ucode.fecs.inst.count; 380 ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.fecs.inst.l; 381 382 for (i = 0, checksum = 0; i < ucode_u32_size; i++) { 383 if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { 384 tag++; 385 gk20a_writel(g, gr_fecs_imemt_r(0), 386 gr_fecs_imemt_tag_f(tag)); 387 } 388 gk20a_writel(g, gr_fecs_imemd_r(0), ucode_u32_data[i]); 389 checksum += ucode_u32_data[i]; 390 } 391 392 pad_start = i * 4U; 393 pad_end = pad_start + (256U - pad_start % 256U) + 256U; 394 for (i = pad_start; 395 (i < fecs_imem_size * 256U) && i < pad_end; 396 i += 4U) { 397 if ((i != 0U) && ((i % 256U) == 0U)) { 398 tag++; 399 gk20a_writel(g, gr_fecs_imemt_r(0), 400 gr_fecs_imemt_tag_f(tag)); 401 } 402 gk20a_writel(g, gr_fecs_imemd_r(0), 0); 403 } 404} 405 406int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, 407 u32 expect_delay) 408{ 409 u32 delay = expect_delay; 410 bool ctxsw_active; 411 bool gr_busy; 412 u32 gr_engine_id; 413 u32 engine_status; 414 bool ctx_status_invalid; 415 struct nvgpu_timeout timeout; 416 417 nvgpu_log_fn(g, " "); 418 419 gr_engine_id = gk20a_fifo_get_gr_engine_id(g); 420 421 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 422 423 do { 424 /* fmodel: host gets fifo_engine_status(gr) from gr 425 only when gr_status is read */ 426 (void) gk20a_readl(g, gr_status_r()); 427 428 engine_status = gk20a_readl(g, 429 fifo_engine_status_r(gr_engine_id)); 430 431 ctxsw_active = engine_status & 432 fifo_engine_status_ctxsw_in_progress_f(); 433 434 ctx_status_invalid = 435 (fifo_engine_status_ctx_status_v(engine_status) == 436 fifo_engine_status_ctx_status_invalid_v()); 437 438 gr_busy = gk20a_readl(g, gr_engine_status_r()) & 439 gr_engine_status_value_busy_f(); 440 441 if (ctx_status_invalid || (!gr_busy && !ctxsw_active)) { 442 nvgpu_log_fn(g, "done"); 443 return 0; 444 } 445 446 nvgpu_usleep_range(delay, delay * 2); 447 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 448 449 } while (nvgpu_timeout_expired(&timeout) == 0); 450 451 nvgpu_err(g, 452 "timeout, ctxsw busy : %d, gr busy : %d", 453 ctxsw_active, gr_busy); 454 455 return -EAGAIN; 456} 457 458int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, 459 u32 expect_delay) 460{ 461 u32 val; 462 u32 delay = expect_delay; 463 struct nvgpu_timeout timeout; 464 465 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 466 return 0; 467 } 468 469 nvgpu_log_fn(g, " "); 470 471 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); 472 473 do { 474 val = gk20a_readl(g, gr_status_r()); 475 476 if (gr_status_fe_method_lower_v(val) == 0U) { 477 nvgpu_log_fn(g, "done"); 478 return 0; 479 } 480 481 nvgpu_usleep_range(delay, delay * 2); 482 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 483 } while (nvgpu_timeout_expired(&timeout) == 0); 484 485 nvgpu_err(g, 486 "timeout, fe busy : %x", val); 487 488 return -EAGAIN; 489} 490 491int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, 492 u32 *mailbox_ret, u32 opc_success, 493 u32 mailbox_ok, u32 opc_fail, 494 u32 mailbox_fail, bool sleepduringwait) 495{ 496 struct nvgpu_timeout timeout; 497 u32 delay = GR_FECS_POLL_INTERVAL; 498 u32 check = WAIT_UCODE_LOOP; 499 u32 reg; 500 501 nvgpu_log_fn(g, " "); 502 503 if (sleepduringwait) { 504 delay = GR_IDLE_CHECK_DEFAULT; 505 } 506 507 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 508 NVGPU_TIMER_CPU_TIMER); 509 510 while (check == WAIT_UCODE_LOOP) { 511 if (nvgpu_timeout_expired(&timeout)) { 512 check = WAIT_UCODE_TIMEOUT; 513 } 514 515 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(mailbox_id)); 516 517 if (mailbox_ret) { 518 *mailbox_ret = reg; 519 } 520 521 switch (opc_success) { 522 case GR_IS_UCODE_OP_EQUAL: 523 if (reg == mailbox_ok) { 524 check = WAIT_UCODE_OK; 525 } 526 break; 527 case GR_IS_UCODE_OP_NOT_EQUAL: 528 if (reg != mailbox_ok) { 529 check = WAIT_UCODE_OK; 530 } 531 break; 532 case GR_IS_UCODE_OP_AND: 533 if (reg & mailbox_ok) { 534 check = WAIT_UCODE_OK; 535 } 536 break; 537 case GR_IS_UCODE_OP_LESSER: 538 if (reg < mailbox_ok) { 539 check = WAIT_UCODE_OK; 540 } 541 break; 542 case GR_IS_UCODE_OP_LESSER_EQUAL: 543 if (reg <= mailbox_ok) { 544 check = WAIT_UCODE_OK; 545 } 546 break; 547 case GR_IS_UCODE_OP_SKIP: 548 /* do no success check */ 549 break; 550 default: 551 nvgpu_err(g, 552 "invalid success opcode 0x%x", opc_success); 553 554 check = WAIT_UCODE_ERROR; 555 break; 556 } 557 558 switch (opc_fail) { 559 case GR_IS_UCODE_OP_EQUAL: 560 if (reg == mailbox_fail) { 561 check = WAIT_UCODE_ERROR; 562 } 563 break; 564 case GR_IS_UCODE_OP_NOT_EQUAL: 565 if (reg != mailbox_fail) { 566 check = WAIT_UCODE_ERROR; 567 } 568 break; 569 case GR_IS_UCODE_OP_AND: 570 if (reg & mailbox_fail) { 571 check = WAIT_UCODE_ERROR; 572 } 573 break; 574 case GR_IS_UCODE_OP_LESSER: 575 if (reg < mailbox_fail) { 576 check = WAIT_UCODE_ERROR; 577 } 578 break; 579 case GR_IS_UCODE_OP_LESSER_EQUAL: 580 if (reg <= mailbox_fail) { 581 check = WAIT_UCODE_ERROR; 582 } 583 break; 584 case GR_IS_UCODE_OP_SKIP: 585 /* do no check on fail*/ 586 break; 587 default: 588 nvgpu_err(g, 589 "invalid fail opcode 0x%x", opc_fail); 590 check = WAIT_UCODE_ERROR; 591 break; 592 } 593 594 if (sleepduringwait) { 595 nvgpu_usleep_range(delay, delay * 2); 596 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 597 } else { 598 nvgpu_udelay(delay); 599 } 600 } 601 602 if (check == WAIT_UCODE_TIMEOUT) { 603 nvgpu_err(g, 604 "timeout waiting on mailbox=%d value=0x%08x", 605 mailbox_id, reg); 606 gk20a_fecs_dump_falcon_stats(g); 607 gk20a_gpccs_dump_falcon_stats(g); 608 gk20a_gr_debug_dump(g); 609 return -1; 610 } else if (check == WAIT_UCODE_ERROR) { 611 nvgpu_err(g, 612 "ucode method failed on mailbox=%d value=0x%08x", 613 mailbox_id, reg); 614 gk20a_fecs_dump_falcon_stats(g); 615 gk20a_gpccs_dump_falcon_stats(g); 616 return -1; 617 } 618 619 nvgpu_log_fn(g, "done"); 620 return 0; 621} 622 623int gr_gk20a_submit_fecs_method_op_locked(struct gk20a *g, 624 struct fecs_method_op_gk20a op, 625 bool sleepduringwait) 626{ 627 int ret; 628 629 if (op.mailbox.id != 0) { 630 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(op.mailbox.id), 631 op.mailbox.data); 632 } 633 634 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 635 gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr)); 636 637 gk20a_writel(g, gr_fecs_method_data_r(), op.method.data); 638 gk20a_writel(g, gr_fecs_method_push_r(), 639 gr_fecs_method_push_adr_f(op.method.addr)); 640 641 /* op.mailbox.id == 4 cases require waiting for completion on 642 * for op.mailbox.id == 0 */ 643 if (op.mailbox.id == 4) { 644 op.mailbox.id = 0; 645 } 646 647 ret = gr_gk20a_ctx_wait_ucode(g, op.mailbox.id, op.mailbox.ret, 648 op.cond.ok, op.mailbox.ok, 649 op.cond.fail, op.mailbox.fail, 650 sleepduringwait); 651 if (ret) { 652 nvgpu_err(g,"fecs method: data=0x%08x push adr=0x%08x", 653 op.method.data, op.method.addr); 654 } 655 656 return ret; 657} 658 659/* The following is a less brittle way to call gr_gk20a_submit_fecs_method(...) 660 * We should replace most, if not all, fecs method calls to this instead. */ 661int gr_gk20a_submit_fecs_method_op(struct gk20a *g, 662 struct fecs_method_op_gk20a op, 663 bool sleepduringwait) 664{ 665 struct gr_gk20a *gr = &g->gr; 666 int ret; 667 668 nvgpu_mutex_acquire(&gr->fecs_mutex); 669 670 ret = gr_gk20a_submit_fecs_method_op_locked(g, op, sleepduringwait); 671 672 nvgpu_mutex_release(&gr->fecs_mutex); 673 674 return ret; 675} 676 677/* Sideband mailbox writes are done a bit differently */ 678int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g, 679 struct fecs_method_op_gk20a op) 680{ 681 struct gr_gk20a *gr = &g->gr; 682 int ret; 683 684 nvgpu_mutex_acquire(&gr->fecs_mutex); 685 686 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id), 687 gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr)); 688 689 gk20a_writel(g, gr_fecs_method_data_r(), op.method.data); 690 gk20a_writel(g, gr_fecs_method_push_r(), 691 gr_fecs_method_push_adr_f(op.method.addr)); 692 693 ret = gr_gk20a_ctx_wait_ucode(g, op.mailbox.id, op.mailbox.ret, 694 op.cond.ok, op.mailbox.ok, 695 op.cond.fail, op.mailbox.fail, 696 false); 697 if (ret) { 698 nvgpu_err(g,"fecs method: data=0x%08x push adr=0x%08x", 699 op.method.data, op.method.addr); 700 } 701 702 nvgpu_mutex_release(&gr->fecs_mutex); 703 704 return ret; 705} 706 707static int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret) 708{ 709 return gr_gk20a_submit_fecs_method_op(g, 710 (struct fecs_method_op_gk20a) { 711 .method.addr = fecs_method, 712 .method.data = ~0, 713 .mailbox = { .id = 1, /*sideband?*/ 714 .data = ~0, .clr = ~0, .ret = ret, 715 .ok = gr_fecs_ctxsw_mailbox_value_pass_v(), 716 .fail = gr_fecs_ctxsw_mailbox_value_fail_v(), }, 717 .cond.ok = GR_IS_UCODE_OP_EQUAL, 718 .cond.fail = GR_IS_UCODE_OP_EQUAL }, true); 719} 720 721/** 722 * Stop processing (stall) context switches at FECS:- 723 * If fecs is sent stop_ctxsw method, elpg entry/exit cannot happen 724 * and may timeout. It could manifest as different error signatures 725 * depending on when stop_ctxsw fecs method gets sent with respect 726 * to pmu elpg sequence. It could come as pmu halt or abort or 727 * maybe ext error too. 728*/ 729int gr_gk20a_disable_ctxsw(struct gk20a *g) 730{ 731 int err = 0; 732 733 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); 734 735 nvgpu_mutex_acquire(&g->ctxsw_disable_lock); 736 g->ctxsw_disable_count++; 737 if (g->ctxsw_disable_count == 1) { 738 err = nvgpu_pg_elpg_disable(g); 739 if (err != 0) { 740 nvgpu_err(g, "failed to disable elpg. not safe to " 741 "stop_ctxsw"); 742 /* stop ctxsw command is not sent */ 743 g->ctxsw_disable_count--; 744 } else { 745 err = gr_gk20a_ctrl_ctxsw(g, 746 gr_fecs_method_push_adr_stop_ctxsw_v(), NULL); 747 if (err != 0) { 748 nvgpu_err(g, "failed to stop fecs ctxsw"); 749 /* stop ctxsw failed */ 750 g->ctxsw_disable_count--; 751 } 752 } 753 } else { 754 nvgpu_log_info(g, "ctxsw disabled, ctxsw_disable_count: %d", 755 g->ctxsw_disable_count); 756 } 757 nvgpu_mutex_release(&g->ctxsw_disable_lock); 758 759 return err; 760} 761 762/* Start processing (continue) context switches at FECS */ 763int gr_gk20a_enable_ctxsw(struct gk20a *g) 764{ 765 int err = 0; 766 767 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); 768 769 nvgpu_mutex_acquire(&g->ctxsw_disable_lock); 770 771 if (g->ctxsw_disable_count == 0) { 772 goto ctxsw_already_enabled; 773 } 774 g->ctxsw_disable_count--; 775 WARN_ON(g->ctxsw_disable_count < 0); 776 if (g->ctxsw_disable_count == 0) { 777 err = gr_gk20a_ctrl_ctxsw(g, 778 gr_fecs_method_push_adr_start_ctxsw_v(), NULL); 779 if (err != 0) { 780 nvgpu_err(g, "failed to start fecs ctxsw"); 781 } else { 782 if (nvgpu_pg_elpg_enable(g) != 0) { 783 nvgpu_err(g, "failed to enable elpg " 784 "after start_ctxsw"); 785 } 786 } 787 } else { 788 nvgpu_log_info(g, "ctxsw_disable_count: %d is not 0 yet", 789 g->ctxsw_disable_count); 790 } 791ctxsw_already_enabled: 792 nvgpu_mutex_release(&g->ctxsw_disable_lock); 793 794 return err; 795} 796 797int gr_gk20a_halt_pipe(struct gk20a *g) 798{ 799 return gr_gk20a_submit_fecs_method_op(g, 800 (struct fecs_method_op_gk20a) { 801 .method.addr = 802 gr_fecs_method_push_adr_halt_pipeline_v(), 803 .method.data = ~0, 804 .mailbox = { .id = 1, /*sideband?*/ 805 .data = ~0, .clr = ~0, .ret = NULL, 806 .ok = gr_fecs_ctxsw_mailbox_value_pass_v(), 807 .fail = gr_fecs_ctxsw_mailbox_value_fail_v(), }, 808 .cond.ok = GR_IS_UCODE_OP_EQUAL, 809 .cond.fail = GR_IS_UCODE_OP_EQUAL }, false); 810} 811 812 813int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va) 814{ 815 u32 addr_lo; 816 u32 addr_hi; 817 818 nvgpu_log_fn(c->g, " "); 819 820 addr_lo = u64_lo32(gpu_va) >> 12; 821 addr_hi = u64_hi32(gpu_va); 822 823 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(), 824 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() | 825 ram_in_gr_wfi_ptr_lo_f(addr_lo)); 826 827 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(), 828 ram_in_gr_wfi_ptr_hi_f(addr_hi)); 829 830 return 0; 831} 832 833/* 834 * Context state can be written directly, or "patched" at times. So that code 835 * can be used in either situation it is written using a series of 836 * _ctx_patch_write(..., patch) statements. However any necessary map overhead 837 * should be minimized; thus, bundle the sequence of these writes together, and 838 * set them up and close with _ctx_patch_write_begin/_ctx_patch_write_end. 839 */ 840 841int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, 842 struct nvgpu_gr_ctx *gr_ctx, 843 bool update_patch_count) 844{ 845 if (update_patch_count) { 846 /* reset patch count if ucode has already processed it */ 847 gr_ctx->patch_ctx.data_count = nvgpu_mem_rd(g, 848 &gr_ctx->mem, 849 ctxsw_prog_main_image_patch_count_o()); 850 nvgpu_log(g, gpu_dbg_info, "patch count reset to %d", 851 gr_ctx->patch_ctx.data_count); 852 } 853 return 0; 854} 855 856void gr_gk20a_ctx_patch_write_end(struct gk20a *g, 857 struct nvgpu_gr_ctx *gr_ctx, 858 bool update_patch_count) 859{ 860 /* Write context count to context image if it is mapped */ 861 if (update_patch_count) { 862 nvgpu_mem_wr(g, &gr_ctx->mem, 863 ctxsw_prog_main_image_patch_count_o(), 864 gr_ctx->patch_ctx.data_count); 865 nvgpu_log(g, gpu_dbg_info, "write patch count %d", 866 gr_ctx->patch_ctx.data_count); 867 } 868} 869 870void gr_gk20a_ctx_patch_write(struct gk20a *g, 871 struct nvgpu_gr_ctx *gr_ctx, 872 u32 addr, u32 data, bool patch) 873{ 874 if (patch) { 875 u32 patch_slot = gr_ctx->patch_ctx.data_count * 876 PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY; 877 if (patch_slot > (PATCH_CTX_ENTRIES_FROM_SIZE( 878 gr_ctx->patch_ctx.mem.size) - 879 PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY)) { 880 nvgpu_err(g, "failed to access patch_slot %d", 881 patch_slot); 882 return; 883 } 884 nvgpu_mem_wr32(g, &gr_ctx->patch_ctx.mem, patch_slot, addr); 885 nvgpu_mem_wr32(g, &gr_ctx->patch_ctx.mem, patch_slot + 1, data); 886 gr_ctx->patch_ctx.data_count++; 887 nvgpu_log(g, gpu_dbg_info, 888 "patch addr = 0x%x data = 0x%x data_count %d", 889 addr, data, gr_ctx->patch_ctx.data_count); 890 } else { 891 gk20a_writel(g, addr, data); 892 } 893} 894 895static u32 fecs_current_ctx_data(struct gk20a *g, struct nvgpu_mem *inst_block) 896{ 897 u64 ptr = nvgpu_inst_block_addr(g, inst_block) >> 898 ram_in_base_shift_v(); 899 u32 aperture = nvgpu_aperture_mask(g, inst_block, 900 gr_fecs_current_ctx_target_sys_mem_ncoh_f(), 901 gr_fecs_current_ctx_target_sys_mem_coh_f(), 902 gr_fecs_current_ctx_target_vid_mem_f()); 903 904 return gr_fecs_current_ctx_ptr_f(u64_lo32(ptr)) | aperture | 905 gr_fecs_current_ctx_valid_f(1); 906} 907 908int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, 909 struct channel_gk20a *c) 910{ 911 u32 inst_base_ptr = u64_lo32(nvgpu_inst_block_addr(g, &c->inst_block) 912 >> ram_in_base_shift_v()); 913 u32 data = fecs_current_ctx_data(g, &c->inst_block); 914 u32 ret; 915 916 nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", 917 c->chid, inst_base_ptr); 918 919 ret = gr_gk20a_submit_fecs_method_op(g, 920 (struct fecs_method_op_gk20a) { 921 .method.addr = gr_fecs_method_push_adr_bind_pointer_v(), 922 .method.data = data, 923 .mailbox = { .id = 0, .data = 0, 924 .clr = 0x30, 925 .ret = NULL, 926 .ok = 0x10, 927 .fail = 0x20, }, 928 .cond.ok = GR_IS_UCODE_OP_AND, 929 .cond.fail = GR_IS_UCODE_OP_AND}, true); 930 if (ret) { 931 nvgpu_err(g, 932 "bind channel instance failed"); 933 } 934 935 return ret; 936} 937 938void gr_gk20a_write_zcull_ptr(struct gk20a *g, 939 struct nvgpu_mem *mem, u64 gpu_va) 940{ 941 u32 va = u64_lo32(gpu_va >> 8); 942 943 nvgpu_mem_wr(g, mem, 944 ctxsw_prog_main_image_zcull_ptr_o(), va); 945} 946 947void gr_gk20a_write_pm_ptr(struct gk20a *g, 948 struct nvgpu_mem *mem, u64 gpu_va) 949{ 950 u32 va = u64_lo32(gpu_va >> 8); 951 952 nvgpu_mem_wr(g, mem, 953 ctxsw_prog_main_image_pm_ptr_o(), va); 954} 955 956static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) 957{ 958 struct tsg_gk20a *tsg; 959 struct nvgpu_gr_ctx *gr_ctx = NULL; 960 struct nvgpu_mem *mem = NULL; 961 struct nvgpu_mem *ctxheader = &c->ctx_header; 962 int ret = 0; 963 964 nvgpu_log_fn(g, " "); 965 966 tsg = tsg_gk20a_from_ch(c); 967 if (tsg == NULL) { 968 return -EINVAL; 969 } 970 971 gr_ctx = &tsg->gr_ctx; 972 mem = &gr_ctx->mem; 973 974 if (gr_ctx->zcull_ctx.gpu_va == 0 && 975 gr_ctx->zcull_ctx.ctx_sw_mode == 976 ctxsw_prog_main_image_zcull_mode_separate_buffer_v()) { 977 return -EINVAL; 978 } 979 980 ret = gk20a_disable_channel_tsg(g, c); 981 if (ret) { 982 nvgpu_err(g, "failed to disable channel/TSG"); 983 return ret; 984 } 985 ret = gk20a_fifo_preempt(g, c); 986 if (ret) { 987 gk20a_enable_channel_tsg(g, c); 988 nvgpu_err(g, "failed to preempt channel/TSG"); 989 return ret; 990 } 991 992 nvgpu_mem_wr(g, mem, 993 ctxsw_prog_main_image_zcull_o(), 994 gr_ctx->zcull_ctx.ctx_sw_mode); 995 996 if (ctxheader->gpu_va) { 997 g->ops.gr.write_zcull_ptr(g, ctxheader, 998 gr_ctx->zcull_ctx.gpu_va); 999 } else { 1000 g->ops.gr.write_zcull_ptr(g, mem, gr_ctx->zcull_ctx.gpu_va); 1001 } 1002 1003 gk20a_enable_channel_tsg(g, c); 1004 1005 return ret; 1006} 1007 1008u32 gk20a_gr_gpc_offset(struct gk20a *g, u32 gpc) 1009{ 1010 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 1011 u32 gpc_offset = gpc_stride * gpc; 1012 1013 return gpc_offset; 1014} 1015 1016u32 gk20a_gr_tpc_offset(struct gk20a *g, u32 tpc) 1017{ 1018 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, 1019 GPU_LIT_TPC_IN_GPC_STRIDE); 1020 u32 tpc_offset = tpc_in_gpc_stride * tpc; 1021 1022 return tpc_offset; 1023} 1024 1025int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, 1026 struct channel_gk20a *c, bool patch) 1027{ 1028 struct gr_gk20a *gr = &g->gr; 1029 struct tsg_gk20a *tsg; 1030 struct nvgpu_gr_ctx *gr_ctx = NULL; 1031 u64 addr; 1032 u32 size; 1033 1034 nvgpu_log_fn(g, " "); 1035 1036 tsg = tsg_gk20a_from_ch(c); 1037 if (tsg == NULL) { 1038 return -EINVAL; 1039 } 1040 1041 gr_ctx = &tsg->gr_ctx; 1042 if (patch) { 1043 int err; 1044 err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); 1045 if (err != 0) { 1046 return err; 1047 } 1048 } 1049 1050 /* global pagepool buffer */ 1051 addr = (u64_lo32(gr_ctx->global_ctx_buffer_va[PAGEPOOL_VA]) >> 1052 gr_scc_pagepool_base_addr_39_8_align_bits_v()) | 1053 (u64_hi32(gr_ctx->global_ctx_buffer_va[PAGEPOOL_VA]) << 1054 (32 - gr_scc_pagepool_base_addr_39_8_align_bits_v())); 1055 1056 size = gr->global_ctx_buffer[PAGEPOOL].mem.size / 1057 gr_scc_pagepool_total_pages_byte_granularity_v(); 1058 1059 if (size == g->ops.gr.pagepool_default_size(g)) { 1060 size = gr_scc_pagepool_total_pages_hwmax_v(); 1061 } 1062 1063 nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d", 1064 addr, size); 1065 1066 g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch); 1067 1068 /* global bundle cb */ 1069 addr = (u64_lo32(gr_ctx->global_ctx_buffer_va[CIRCULAR_VA]) >> 1070 gr_scc_bundle_cb_base_addr_39_8_align_bits_v()) | 1071 (u64_hi32(gr_ctx->global_ctx_buffer_va[CIRCULAR_VA]) << 1072 (32 - gr_scc_bundle_cb_base_addr_39_8_align_bits_v())); 1073 1074 size = gr->bundle_cb_default_size; 1075 1076 nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %d", 1077 addr, size); 1078 1079 g->ops.gr.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); 1080 1081 /* global attrib cb */ 1082 addr = (u64_lo32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) >> 1083 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()) | 1084 (u64_hi32(gr_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) << 1085 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 1086 1087 nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr); 1088 g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, patch); 1089 g->ops.gr.commit_global_cb_manager(g, c, patch); 1090 1091 if (patch) { 1092 gr_gk20a_ctx_patch_write_end(g, gr_ctx, false); 1093 } 1094 1095 return 0; 1096} 1097 1098int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) 1099{ 1100 struct gr_gk20a *gr = &g->gr; 1101 struct nvgpu_gr_ctx *gr_ctx = NULL; 1102 u32 gpm_pd_cfg; 1103 u32 pd_ab_dist_cfg0; 1104 u32 ds_debug; 1105 u32 mpc_vtg_debug; 1106 u32 pe_vaf; 1107 u32 pe_vsc_vpc; 1108 1109 nvgpu_log_fn(g, " "); 1110 1111 gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r()); 1112 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); 1113 ds_debug = gk20a_readl(g, gr_ds_debug_r()); 1114 mpc_vtg_debug = gk20a_readl(g, gr_gpcs_tpcs_mpc_vtg_debug_r()); 1115 1116 if (gr->timeslice_mode == gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v()) { 1117 pe_vaf = gk20a_readl(g, gr_gpcs_tpcs_pe_vaf_r()); 1118 pe_vsc_vpc = gk20a_readl(g, gr_gpcs_tpcs_pes_vsc_vpc_r()); 1119 1120 gpm_pd_cfg = gr_gpcs_gpm_pd_cfg_timeslice_mode_enable_f() | gpm_pd_cfg; 1121 pe_vaf = gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f() | pe_vaf; 1122 pe_vsc_vpc = gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f() | pe_vsc_vpc; 1123 pd_ab_dist_cfg0 = gr_pd_ab_dist_cfg0_timeslice_enable_en_f() | pd_ab_dist_cfg0; 1124 ds_debug = gr_ds_debug_timeslice_mode_enable_f() | ds_debug; 1125 mpc_vtg_debug = gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f() | mpc_vtg_debug; 1126 1127 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_gpm_pd_cfg_r(), gpm_pd_cfg, false); 1128 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_pe_vaf_r(), pe_vaf, false); 1129 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_pes_vsc_vpc_r(), pe_vsc_vpc, false); 1130 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg0_r(), pd_ab_dist_cfg0, false); 1131 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_ds_debug_r(), ds_debug, false); 1132 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_mpc_vtg_debug_r(), mpc_vtg_debug, false); 1133 } else { 1134 gpm_pd_cfg = gr_gpcs_gpm_pd_cfg_timeslice_mode_disable_f() | gpm_pd_cfg; 1135 pd_ab_dist_cfg0 = gr_pd_ab_dist_cfg0_timeslice_enable_dis_f() | pd_ab_dist_cfg0; 1136 ds_debug = gr_ds_debug_timeslice_mode_disable_f() | ds_debug; 1137 mpc_vtg_debug = gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f() | mpc_vtg_debug; 1138 1139 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_gpm_pd_cfg_r(), gpm_pd_cfg, false); 1140 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg0_r(), pd_ab_dist_cfg0, false); 1141 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_ds_debug_r(), ds_debug, false); 1142 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_mpc_vtg_debug_r(), mpc_vtg_debug, false); 1143 } 1144 1145 return 0; 1146} 1147 1148/* 1149 * Return map tiles count for given index 1150 * Return 0 if index is out-of-bounds 1151 */ 1152static u32 gr_gk20a_get_map_tile_count(struct gr_gk20a *gr, u32 index) 1153{ 1154 if (index >= gr->map_tile_count) { 1155 return 0; 1156 } 1157 1158 return gr->map_tiles[index]; 1159} 1160 1161int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) 1162{ 1163 u32 norm_entries, norm_shift; 1164 u32 coeff5_mod, coeff6_mod, coeff7_mod, coeff8_mod, coeff9_mod, coeff10_mod, coeff11_mod; 1165 u32 map0, map1, map2, map3, map4, map5; 1166 1167 if (gr->map_tiles == NULL) { 1168 return -1; 1169 } 1170 1171 nvgpu_log_fn(g, " "); 1172 1173 gk20a_writel(g, gr_crstr_map_table_cfg_r(), 1174 gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | 1175 gr_crstr_map_table_cfg_num_entries_f(gr->tpc_count)); 1176 1177 map0 = gr_crstr_gpc_map0_tile0_f(gr_gk20a_get_map_tile_count(gr, 0)) | 1178 gr_crstr_gpc_map0_tile1_f(gr_gk20a_get_map_tile_count(gr, 1)) | 1179 gr_crstr_gpc_map0_tile2_f(gr_gk20a_get_map_tile_count(gr, 2)) | 1180 gr_crstr_gpc_map0_tile3_f(gr_gk20a_get_map_tile_count(gr, 3)) | 1181 gr_crstr_gpc_map0_tile4_f(gr_gk20a_get_map_tile_count(gr, 4)) | 1182 gr_crstr_gpc_map0_tile5_f(gr_gk20a_get_map_tile_count(gr, 5)); 1183 1184 map1 = gr_crstr_gpc_map1_tile6_f(gr_gk20a_get_map_tile_count(gr, 6)) | 1185 gr_crstr_gpc_map1_tile7_f(gr_gk20a_get_map_tile_count(gr, 7)) | 1186 gr_crstr_gpc_map1_tile8_f(gr_gk20a_get_map_tile_count(gr, 8)) | 1187 gr_crstr_gpc_map1_tile9_f(gr_gk20a_get_map_tile_count(gr, 9)) | 1188 gr_crstr_gpc_map1_tile10_f(gr_gk20a_get_map_tile_count(gr, 10)) | 1189 gr_crstr_gpc_map1_tile11_f(gr_gk20a_get_map_tile_count(gr, 11)); 1190 1191 map2 = gr_crstr_gpc_map2_tile12_f(gr_gk20a_get_map_tile_count(gr, 12)) | 1192 gr_crstr_gpc_map2_tile13_f(gr_gk20a_get_map_tile_count(gr, 13)) | 1193 gr_crstr_gpc_map2_tile14_f(gr_gk20a_get_map_tile_count(gr, 14)) | 1194 gr_crstr_gpc_map2_tile15_f(gr_gk20a_get_map_tile_count(gr, 15)) | 1195 gr_crstr_gpc_map2_tile16_f(gr_gk20a_get_map_tile_count(gr, 16)) | 1196 gr_crstr_gpc_map2_tile17_f(gr_gk20a_get_map_tile_count(gr, 17)); 1197 1198 map3 = gr_crstr_gpc_map3_tile18_f(gr_gk20a_get_map_tile_count(gr, 18)) | 1199 gr_crstr_gpc_map3_tile19_f(gr_gk20a_get_map_tile_count(gr, 19)) | 1200 gr_crstr_gpc_map3_tile20_f(gr_gk20a_get_map_tile_count(gr, 20)) | 1201 gr_crstr_gpc_map3_tile21_f(gr_gk20a_get_map_tile_count(gr, 21)) | 1202 gr_crstr_gpc_map3_tile22_f(gr_gk20a_get_map_tile_count(gr, 22)) | 1203 gr_crstr_gpc_map3_tile23_f(gr_gk20a_get_map_tile_count(gr, 23)); 1204 1205 map4 = gr_crstr_gpc_map4_tile24_f(gr_gk20a_get_map_tile_count(gr, 24)) | 1206 gr_crstr_gpc_map4_tile25_f(gr_gk20a_get_map_tile_count(gr, 25)) | 1207 gr_crstr_gpc_map4_tile26_f(gr_gk20a_get_map_tile_count(gr, 26)) | 1208 gr_crstr_gpc_map4_tile27_f(gr_gk20a_get_map_tile_count(gr, 27)) | 1209 gr_crstr_gpc_map4_tile28_f(gr_gk20a_get_map_tile_count(gr, 28)) | 1210 gr_crstr_gpc_map4_tile29_f(gr_gk20a_get_map_tile_count(gr, 29)); 1211 1212 map5 = gr_crstr_gpc_map5_tile30_f(gr_gk20a_get_map_tile_count(gr, 30)) | 1213 gr_crstr_gpc_map5_tile31_f(gr_gk20a_get_map_tile_count(gr, 31)) | 1214 gr_crstr_gpc_map5_tile32_f(0) | 1215 gr_crstr_gpc_map5_tile33_f(0) | 1216 gr_crstr_gpc_map5_tile34_f(0) | 1217 gr_crstr_gpc_map5_tile35_f(0); 1218 1219 gk20a_writel(g, gr_crstr_gpc_map0_r(), map0); 1220 gk20a_writel(g, gr_crstr_gpc_map1_r(), map1); 1221 gk20a_writel(g, gr_crstr_gpc_map2_r(), map2); 1222 gk20a_writel(g, gr_crstr_gpc_map3_r(), map3); 1223 gk20a_writel(g, gr_crstr_gpc_map4_r(), map4); 1224 gk20a_writel(g, gr_crstr_gpc_map5_r(), map5); 1225 1226 switch (gr->tpc_count) { 1227 case 1: 1228 norm_shift = 4; 1229 break; 1230 case 2: 1231 case 3: 1232 norm_shift = 3; 1233 break; 1234 case 4: 1235 case 5: 1236 case 6: 1237 case 7: 1238 norm_shift = 2; 1239 break; 1240 case 8: 1241 case 9: 1242 case 10: 1243 case 11: 1244 case 12: 1245 case 13: 1246 case 14: 1247 case 15: 1248 norm_shift = 1; 1249 break; 1250 default: 1251 norm_shift = 0; 1252 break; 1253 } 1254 1255 norm_entries = gr->tpc_count << norm_shift; 1256 coeff5_mod = (1 << 5) % norm_entries; 1257 coeff6_mod = (1 << 6) % norm_entries; 1258 coeff7_mod = (1 << 7) % norm_entries; 1259 coeff8_mod = (1 << 8) % norm_entries; 1260 coeff9_mod = (1 << 9) % norm_entries; 1261 coeff10_mod = (1 << 10) % norm_entries; 1262 coeff11_mod = (1 << 11) % norm_entries; 1263 1264 gk20a_writel(g, gr_ppcs_wwdx_map_table_cfg_r(), 1265 gr_ppcs_wwdx_map_table_cfg_row_offset_f(gr->map_row_offset) | 1266 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(norm_entries) | 1267 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(norm_shift) | 1268 gr_ppcs_wwdx_map_table_cfg_coeff5_mod_value_f(coeff5_mod) | 1269 gr_ppcs_wwdx_map_table_cfg_num_entries_f(gr->tpc_count)); 1270 1271 gk20a_writel(g, gr_ppcs_wwdx_map_table_cfg2_r(), 1272 gr_ppcs_wwdx_map_table_cfg2_coeff6_mod_value_f(coeff6_mod) | 1273 gr_ppcs_wwdx_map_table_cfg2_coeff7_mod_value_f(coeff7_mod) | 1274 gr_ppcs_wwdx_map_table_cfg2_coeff8_mod_value_f(coeff8_mod) | 1275 gr_ppcs_wwdx_map_table_cfg2_coeff9_mod_value_f(coeff9_mod) | 1276 gr_ppcs_wwdx_map_table_cfg2_coeff10_mod_value_f(coeff10_mod) | 1277 gr_ppcs_wwdx_map_table_cfg2_coeff11_mod_value_f(coeff11_mod)); 1278 1279 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map0_r(), map0); 1280 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map1_r(), map1); 1281 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map2_r(), map2); 1282 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map3_r(), map3); 1283 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map4_r(), map4); 1284 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map5_r(), map5); 1285 1286 gk20a_writel(g, gr_rstr2d_map_table_cfg_r(), 1287 gr_rstr2d_map_table_cfg_row_offset_f(gr->map_row_offset) | 1288 gr_rstr2d_map_table_cfg_num_entries_f(gr->tpc_count)); 1289 1290 gk20a_writel(g, gr_rstr2d_gpc_map0_r(), map0); 1291 gk20a_writel(g, gr_rstr2d_gpc_map1_r(), map1); 1292 gk20a_writel(g, gr_rstr2d_gpc_map2_r(), map2); 1293 gk20a_writel(g, gr_rstr2d_gpc_map3_r(), map3); 1294 gk20a_writel(g, gr_rstr2d_gpc_map4_r(), map4); 1295 gk20a_writel(g, gr_rstr2d_gpc_map5_r(), map5); 1296 1297 return 0; 1298} 1299 1300static inline u32 count_bits(u32 mask) 1301{ 1302 u32 temp = mask; 1303 u32 count; 1304 for (count = 0; temp != 0; count++) { 1305 temp &= temp - 1; 1306 } 1307 1308 return count; 1309} 1310 1311int gr_gk20a_init_sm_id_table(struct gk20a *g) 1312{ 1313 u32 gpc, tpc; 1314 u32 sm_id = 0; 1315 1316 for (tpc = 0; tpc < g->gr.max_tpc_per_gpc_count; tpc++) { 1317 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { 1318 1319 if (tpc < g->gr.gpc_tpc_count[gpc]) { 1320 g->gr.sm_to_cluster[sm_id].tpc_index = tpc; 1321 g->gr.sm_to_cluster[sm_id].gpc_index = gpc; 1322 g->gr.sm_to_cluster[sm_id].sm_index = 0; 1323 g->gr.sm_to_cluster[sm_id].global_tpc_index = 1324 sm_id; 1325 sm_id++; 1326 } 1327 } 1328 } 1329 g->gr.no_of_sm = sm_id; 1330 return 0; 1331} 1332 1333/* 1334 * Return number of TPCs in a GPC 1335 * Return 0 if GPC index is invalid i.e. GPC is disabled 1336 */ 1337u32 gr_gk20a_get_tpc_count(struct gr_gk20a *gr, u32 gpc_index) 1338{ 1339 if (gpc_index >= gr->gpc_count) { 1340 return 0; 1341 } 1342 1343 return gr->gpc_tpc_count[gpc_index]; 1344} 1345 1346int gr_gk20a_init_fs_state(struct gk20a *g) 1347{ 1348 struct gr_gk20a *gr = &g->gr; 1349 u32 tpc_index, gpc_index; 1350 u32 sm_id = 0, gpc_id = 0; 1351 u32 tpc_per_gpc; 1352 u32 fuse_tpc_mask; 1353 u32 reg_index; 1354 int err; 1355 1356 nvgpu_log_fn(g, " "); 1357 1358 if (g->ops.gr.init_sm_id_table) { 1359 err = g->ops.gr.init_sm_id_table(g); 1360 if (err != 0) { 1361 return err; 1362 } 1363 1364 /* Is table empty ? */ 1365 if (g->gr.no_of_sm == 0) { 1366 return -EINVAL; 1367 } 1368 } 1369 1370 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { 1371 tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index; 1372 gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index; 1373 1374 g->ops.gr.program_sm_id_numbering(g, gpc_index, tpc_index, sm_id); 1375 1376 if (g->ops.gr.program_active_tpc_counts) { 1377 g->ops.gr.program_active_tpc_counts(g, gpc_index); 1378 } 1379 } 1380 1381 for (reg_index = 0, gpc_id = 0; 1382 reg_index < gr_pd_num_tpc_per_gpc__size_1_v(); 1383 reg_index++, gpc_id += 8) { 1384 1385 tpc_per_gpc = 1386 gr_pd_num_tpc_per_gpc_count0_f(gr_gk20a_get_tpc_count(gr, gpc_id + 0)) | 1387 gr_pd_num_tpc_per_gpc_count1_f(gr_gk20a_get_tpc_count(gr, gpc_id + 1)) | 1388 gr_pd_num_tpc_per_gpc_count2_f(gr_gk20a_get_tpc_count(gr, gpc_id + 2)) | 1389 gr_pd_num_tpc_per_gpc_count3_f(gr_gk20a_get_tpc_count(gr, gpc_id + 3)) | 1390 gr_pd_num_tpc_per_gpc_count4_f(gr_gk20a_get_tpc_count(gr, gpc_id + 4)) | 1391 gr_pd_num_tpc_per_gpc_count5_f(gr_gk20a_get_tpc_count(gr, gpc_id + 5)) | 1392 gr_pd_num_tpc_per_gpc_count6_f(gr_gk20a_get_tpc_count(gr, gpc_id + 6)) | 1393 gr_pd_num_tpc_per_gpc_count7_f(gr_gk20a_get_tpc_count(gr, gpc_id + 7)); 1394 1395 gk20a_writel(g, gr_pd_num_tpc_per_gpc_r(reg_index), tpc_per_gpc); 1396 gk20a_writel(g, gr_ds_num_tpc_per_gpc_r(reg_index), tpc_per_gpc); 1397 } 1398 1399 /* gr__setup_pd_mapping stubbed for gk20a */ 1400 g->ops.gr.setup_rop_mapping(g, gr); 1401 if (g->ops.gr.setup_alpha_beta_tables) { 1402 g->ops.gr.setup_alpha_beta_tables(g, gr); 1403 } 1404 1405 for (gpc_index = 0; 1406 gpc_index < gr_pd_dist_skip_table__size_1_v() * 4; 1407 gpc_index += 4) { 1408 1409 gk20a_writel(g, gr_pd_dist_skip_table_r(gpc_index/4), 1410 (gr_pd_dist_skip_table_gpc_4n0_mask_f(gr->gpc_skip_mask[gpc_index]) != 0U) || 1411 (gr_pd_dist_skip_table_gpc_4n1_mask_f(gr->gpc_skip_mask[gpc_index + 1]) != 0U) || 1412 (gr_pd_dist_skip_table_gpc_4n2_mask_f(gr->gpc_skip_mask[gpc_index + 2]) != 0U) || 1413 (gr_pd_dist_skip_table_gpc_4n3_mask_f(gr->gpc_skip_mask[gpc_index + 3]) != 0U)); 1414 } 1415 1416 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); 1417 if ((g->tpc_fs_mask_user != 0U) && 1418 (fuse_tpc_mask == BIT32(gr->max_tpc_count) - 1U)) { 1419 u32 val = g->tpc_fs_mask_user; 1420 val &= (0x1U << gr->max_tpc_count) - 1U; 1421 gk20a_writel(g, gr_cwd_fs_r(), 1422 gr_cwd_fs_num_gpcs_f(gr->gpc_count) | 1423 gr_cwd_fs_num_tpcs_f(hweight32(val))); 1424 } else { 1425 gk20a_writel(g, gr_cwd_fs_r(), 1426 gr_cwd_fs_num_gpcs_f(gr->gpc_count) | 1427 gr_cwd_fs_num_tpcs_f(gr->tpc_count)); 1428 } 1429 1430 gk20a_writel(g, gr_bes_zrop_settings_r(), 1431 gr_bes_zrop_settings_num_active_fbps_f(gr->num_fbps)); 1432 gk20a_writel(g, gr_bes_crop_settings_r(), 1433 gr_bes_crop_settings_num_active_fbps_f(gr->num_fbps)); 1434 1435 return 0; 1436} 1437 1438int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type) 1439{ 1440 struct gk20a *g = c->g; 1441 int ret; 1442 1443 nvgpu_log_fn(g, " "); 1444 1445 ret = gr_gk20a_submit_fecs_method_op(g, 1446 (struct fecs_method_op_gk20a) { 1447 .method.addr = save_type, 1448 .method.data = fecs_current_ctx_data(g, &c->inst_block), 1449 .mailbox = {.id = 0, .data = 0, .clr = 3, .ret = NULL, 1450 .ok = 1, .fail = 2, 1451 }, 1452 .cond.ok = GR_IS_UCODE_OP_AND, 1453 .cond.fail = GR_IS_UCODE_OP_AND, 1454 }, true); 1455 1456 if (ret) { 1457 nvgpu_err(g, "save context image failed"); 1458 } 1459 1460 return ret; 1461} 1462 1463u32 gk20a_init_sw_bundle(struct gk20a *g) 1464{ 1465 struct av_list_gk20a *sw_bundle_init = &g->gr.ctx_vars.sw_bundle_init; 1466 u32 last_bundle_data = 0; 1467 u32 err = 0; 1468 unsigned int i; 1469 1470 /* disable fe_go_idle */ 1471 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1472 gr_fe_go_idle_timeout_count_disabled_f()); 1473 /* enable pipe mode override */ 1474 gk20a_writel(g, gr_pipe_bundle_config_r(), 1475 gr_pipe_bundle_config_override_pipe_mode_enabled_f()); 1476 1477 /* load bundle init */ 1478 for (i = 0; i < sw_bundle_init->count; i++) { 1479 if (i == 0 || last_bundle_data != sw_bundle_init->l[i].value) { 1480 gk20a_writel(g, gr_pipe_bundle_data_r(), 1481 sw_bundle_init->l[i].value); 1482 last_bundle_data = sw_bundle_init->l[i].value; 1483 } 1484 1485 gk20a_writel(g, gr_pipe_bundle_address_r(), 1486 sw_bundle_init->l[i].addr); 1487 1488 if (gr_pipe_bundle_address_value_v(sw_bundle_init->l[i].addr) == 1489 GR_GO_IDLE_BUNDLE) { 1490 err = gr_gk20a_wait_idle(g, 1491 gk20a_get_gr_idle_timeout(g), 1492 GR_IDLE_CHECK_DEFAULT); 1493 if (err != 0U) { 1494 goto error; 1495 } 1496 } 1497 1498 err = gr_gk20a_wait_fe_idle(g, gk20a_get_gr_idle_timeout(g), 1499 GR_IDLE_CHECK_DEFAULT); 1500 if (err != 0U) { 1501 goto error; 1502 } 1503 } 1504 1505 if ((err == 0U) && (g->ops.gr.init_sw_veid_bundle != NULL)) { 1506 err = g->ops.gr.init_sw_veid_bundle(g); 1507 if (err != 0U) { 1508 goto error; 1509 } 1510 } 1511 1512 if (g->ops.gr.init_sw_bundle64) { 1513 err = g->ops.gr.init_sw_bundle64(g); 1514 if (err != 0U) { 1515 goto error; 1516 } 1517 } 1518 1519 /* disable pipe mode override */ 1520 gk20a_writel(g, gr_pipe_bundle_config_r(), 1521 gr_pipe_bundle_config_override_pipe_mode_disabled_f()); 1522 1523 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 1524 GR_IDLE_CHECK_DEFAULT); 1525 1526 /* restore fe_go_idle */ 1527 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1528 gr_fe_go_idle_timeout_count_prod_f()); 1529 1530 return err; 1531 1532error: 1533 /* in case of error skip waiting for GR idle - just restore state */ 1534 gk20a_writel(g, gr_pipe_bundle_config_r(), 1535 gr_pipe_bundle_config_override_pipe_mode_disabled_f()); 1536 1537 /* restore fe_go_idle */ 1538 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1539 gr_fe_go_idle_timeout_count_prod_f()); 1540 1541 return err; 1542} 1543 1544/* init global golden image from a fresh gr_ctx in channel ctx. 1545 save a copy in local_golden_image in ctx_vars */ 1546static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, 1547 struct channel_gk20a *c) 1548{ 1549 struct gr_gk20a *gr = &g->gr; 1550 struct tsg_gk20a *tsg; 1551 struct nvgpu_gr_ctx *gr_ctx = NULL; 1552 u32 ctx_header_bytes = ctxsw_prog_fecs_header_v(); 1553 u32 ctx_header_words; 1554 u32 i; 1555 u32 data; 1556 struct nvgpu_mem *gold_mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; 1557 struct nvgpu_mem *gr_mem; 1558 u32 err = 0; 1559 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load; 1560 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 1561 u32 last_method_data = 0; 1562 1563 nvgpu_log_fn(g, " "); 1564 1565 tsg = tsg_gk20a_from_ch(c); 1566 if (tsg == NULL) { 1567 return -EINVAL; 1568 } 1569 1570 gr_ctx = &tsg->gr_ctx; 1571 gr_mem = &gr_ctx->mem; 1572 1573 /* golden ctx is global to all channels. Although only the first 1574 channel initializes golden image, driver needs to prevent multiple 1575 channels from initializing golden ctx at the same time */ 1576 nvgpu_mutex_acquire(&gr->ctx_mutex); 1577 1578 if (gr->ctx_vars.golden_image_initialized) { 1579 goto clean_up; 1580 } 1581 if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 1582 struct nvgpu_timeout timeout; 1583 1584 nvgpu_timeout_init(g, &timeout, 1585 FE_PWR_MODE_TIMEOUT_MAX / 1586 FE_PWR_MODE_TIMEOUT_DEFAULT, 1587 NVGPU_TIMER_RETRY_TIMER); 1588 gk20a_writel(g, gr_fe_pwr_mode_r(), 1589 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_force_on_f()); 1590 do { 1591 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1592 if (req == gr_fe_pwr_mode_req_done_v()) { 1593 break; 1594 } 1595 nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); 1596 } while (nvgpu_timeout_expired_msg(&timeout, 1597 "timeout forcing FE on") == 0); 1598 } 1599 1600 1601 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), 1602 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | 1603 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f() | 1604 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f() | 1605 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f() | 1606 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f() | 1607 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f() | 1608 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f() | 1609 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f() | 1610 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f()); 1611 (void) gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); 1612 nvgpu_udelay(10); 1613 1614 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), 1615 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | 1616 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f() | 1617 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f() | 1618 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f() | 1619 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f() | 1620 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f() | 1621 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f() | 1622 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f() | 1623 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f()); 1624 (void) gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); 1625 nvgpu_udelay(10); 1626 1627 if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 1628 struct nvgpu_timeout timeout; 1629 1630 nvgpu_timeout_init(g, &timeout, 1631 FE_PWR_MODE_TIMEOUT_MAX / 1632 FE_PWR_MODE_TIMEOUT_DEFAULT, 1633 NVGPU_TIMER_RETRY_TIMER); 1634 gk20a_writel(g, gr_fe_pwr_mode_r(), 1635 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_auto_f()); 1636 1637 do { 1638 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1639 if (req == gr_fe_pwr_mode_req_done_v()) { 1640 break; 1641 } 1642 nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); 1643 } while (nvgpu_timeout_expired_msg(&timeout, 1644 "timeout setting FE power to auto") == 0); 1645 } 1646 1647 /* clear scc ram */ 1648 gk20a_writel(g, gr_scc_init_r(), 1649 gr_scc_init_ram_trigger_f()); 1650 1651 err = gr_gk20a_fecs_ctx_bind_channel(g, c); 1652 if (err != 0U) { 1653 goto clean_up; 1654 } 1655 1656 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 1657 GR_IDLE_CHECK_DEFAULT); 1658 1659 /* load ctx init */ 1660 for (i = 0; i < sw_ctx_load->count; i++) { 1661 gk20a_writel(g, sw_ctx_load->l[i].addr, 1662 sw_ctx_load->l[i].value); 1663 } 1664 1665 if (g->ops.gr.disable_rd_coalesce) { 1666 g->ops.gr.disable_rd_coalesce(g); 1667 } 1668 1669 if (g->ops.gr.init_preemption_state) { 1670 g->ops.gr.init_preemption_state(g); 1671 } 1672 1673 if (g->ops.clock_gating.blcg_gr_load_gating_prod) { 1674 g->ops.clock_gating.blcg_gr_load_gating_prod(g, g->blcg_enabled); 1675 } 1676 1677 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 1678 GR_IDLE_CHECK_DEFAULT); 1679 if (err != 0U) { 1680 goto clean_up; 1681 } 1682 1683 /* disable fe_go_idle */ 1684 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1685 gr_fe_go_idle_timeout_count_disabled_f()); 1686 1687 err = g->ops.gr.commit_global_ctx_buffers(g, c, false); 1688 if (err != 0U) { 1689 goto clean_up; 1690 } 1691 1692 /* override a few ctx state registers */ 1693 g->ops.gr.commit_global_timeslice(g, c); 1694 1695 /* floorsweep anything left */ 1696 err = g->ops.gr.init_fs_state(g); 1697 if (err != 0U) { 1698 goto clean_up; 1699 } 1700 1701 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 1702 GR_IDLE_CHECK_DEFAULT); 1703 if (err != 0U) { 1704 goto restore_fe_go_idle; 1705 } 1706 1707 err = gk20a_init_sw_bundle(g); 1708 if (err != 0U) { 1709 goto clean_up; 1710 } 1711 1712restore_fe_go_idle: 1713 /* restore fe_go_idle */ 1714 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1715 gr_fe_go_idle_timeout_count_prod_f()); 1716 1717 if ((err != 0U) || (gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 1718 GR_IDLE_CHECK_DEFAULT) != 0)) { 1719 goto clean_up; 1720 } 1721 1722 /* load method init */ 1723 if (sw_method_init->count) { 1724 gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(), 1725 sw_method_init->l[0].value); 1726 gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(), 1727 gr_pri_mme_shadow_raw_index_write_trigger_f() | 1728 sw_method_init->l[0].addr); 1729 last_method_data = sw_method_init->l[0].value; 1730 } 1731 for (i = 1; i < sw_method_init->count; i++) { 1732 if (sw_method_init->l[i].value != last_method_data) { 1733 gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(), 1734 sw_method_init->l[i].value); 1735 last_method_data = sw_method_init->l[i].value; 1736 } 1737 gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(), 1738 gr_pri_mme_shadow_raw_index_write_trigger_f() | 1739 sw_method_init->l[i].addr); 1740 } 1741 1742 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 1743 GR_IDLE_CHECK_DEFAULT); 1744 if (err != 0U) { 1745 goto clean_up; 1746 } 1747 1748 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32)); 1749 ctx_header_words >>= 2; 1750 1751 g->ops.mm.l2_flush(g, true); 1752 1753 for (i = 0; i < ctx_header_words; i++) { 1754 data = nvgpu_mem_rd32(g, gr_mem, i); 1755 nvgpu_mem_wr32(g, gold_mem, i, data); 1756 } 1757 nvgpu_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(), 1758 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v()); 1759 1760 g->ops.gr.write_zcull_ptr(g, gold_mem, 0); 1761 1762 err = g->ops.gr.commit_inst(c, gr_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]); 1763 if (err != 0U) { 1764 goto clean_up; 1765 } 1766 1767 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v()); 1768 1769 1770 1771 if (gr->ctx_vars.local_golden_image == NULL) { 1772 1773 gr->ctx_vars.local_golden_image = 1774 nvgpu_vzalloc(g, gr->ctx_vars.golden_image_size); 1775 1776 if (gr->ctx_vars.local_golden_image == NULL) { 1777 err = -ENOMEM; 1778 goto clean_up; 1779 } 1780 nvgpu_mem_rd_n(g, gold_mem, 0, 1781 gr->ctx_vars.local_golden_image, 1782 gr->ctx_vars.golden_image_size); 1783 1784 } 1785 1786 err = g->ops.gr.commit_inst(c, gr_mem->gpu_va); 1787 if (err != 0U) { 1788 goto clean_up; 1789 } 1790 1791 gr->ctx_vars.golden_image_initialized = true; 1792 1793 gk20a_writel(g, gr_fecs_current_ctx_r(), 1794 gr_fecs_current_ctx_valid_false_f()); 1795 1796clean_up: 1797 if (err != 0U) { 1798 nvgpu_err(g, "fail"); 1799 } else { 1800 nvgpu_log_fn(g, "done"); 1801 } 1802 1803 nvgpu_mutex_release(&gr->ctx_mutex); 1804 return err; 1805} 1806 1807int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, 1808 struct channel_gk20a *c, 1809 bool enable_smpc_ctxsw) 1810{ 1811 struct tsg_gk20a *tsg; 1812 struct nvgpu_gr_ctx *gr_ctx = NULL; 1813 struct nvgpu_mem *mem = NULL; 1814 u32 data; 1815 int ret; 1816 1817 nvgpu_log_fn(g, " "); 1818 1819 tsg = tsg_gk20a_from_ch(c); 1820 if (tsg == NULL) { 1821 return -EINVAL; 1822 } 1823 1824 gr_ctx = &tsg->gr_ctx; 1825 mem = &gr_ctx->mem; 1826 if (!nvgpu_mem_is_valid(mem)) { 1827 nvgpu_err(g, "no graphics context allocated"); 1828 return -EFAULT; 1829 } 1830 1831 ret = gk20a_disable_channel_tsg(g, c); 1832 if (ret) { 1833 nvgpu_err(g, "failed to disable channel/TSG"); 1834 goto out; 1835 } 1836 ret = gk20a_fifo_preempt(g, c); 1837 if (ret) { 1838 gk20a_enable_channel_tsg(g, c); 1839 nvgpu_err(g, "failed to preempt channel/TSG"); 1840 goto out; 1841 } 1842 1843 /* Channel gr_ctx buffer is gpu cacheable. 1844 Flush and invalidate before cpu update. */ 1845 g->ops.mm.l2_flush(g, true); 1846 1847 data = nvgpu_mem_rd(g, mem, 1848 ctxsw_prog_main_image_pm_o()); 1849 1850 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m(); 1851 data |= enable_smpc_ctxsw ? 1852 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() : 1853 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(); 1854 1855 nvgpu_mem_wr(g, mem, 1856 ctxsw_prog_main_image_pm_o(), data); 1857 1858out: 1859 gk20a_enable_channel_tsg(g, c); 1860 return ret; 1861} 1862 1863int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, 1864 struct channel_gk20a *c, 1865 u64 gpu_va, 1866 u32 mode) 1867{ 1868 struct tsg_gk20a *tsg; 1869 struct nvgpu_mem *gr_mem = NULL; 1870 struct nvgpu_gr_ctx *gr_ctx; 1871 struct pm_ctx_desc *pm_ctx; 1872 u32 data; 1873 u64 virt_addr = 0; 1874 struct nvgpu_mem *ctxheader = &c->ctx_header; 1875 int ret; 1876 1877 nvgpu_log_fn(g, " "); 1878 1879 tsg = tsg_gk20a_from_ch(c); 1880 if (tsg == NULL) { 1881 return -EINVAL; 1882 } 1883 1884 gr_ctx = &tsg->gr_ctx; 1885 pm_ctx = &gr_ctx->pm_ctx; 1886 gr_mem = &gr_ctx->mem; 1887 if (!nvgpu_mem_is_valid(gr_mem)) { 1888 nvgpu_err(g, "no graphics context allocated"); 1889 return -EFAULT; 1890 } 1891 1892 if ((mode == NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW) && 1893 (g->ops.gr.get_hw_accessor_stream_out_mode == NULL)) { 1894 nvgpu_err(g, "Mode-E hwpm context switch mode is not supported"); 1895 return -EINVAL; 1896 } 1897 1898 switch (mode) { 1899 case NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW: 1900 if (pm_ctx->pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) { 1901 return 0; 1902 } 1903 break; 1904 case NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW: 1905 if (pm_ctx->pm_mode == ctxsw_prog_main_image_pm_mode_no_ctxsw_f()) { 1906 return 0; 1907 } 1908 break; 1909 case NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW: 1910 if (pm_ctx->pm_mode == g->ops.gr.get_hw_accessor_stream_out_mode()) { 1911 return 0; 1912 } 1913 break; 1914 default: 1915 nvgpu_err(g, "invalid hwpm context switch mode"); 1916 return -EINVAL; 1917 } 1918 1919 ret = gk20a_disable_channel_tsg(g, c); 1920 if (ret) { 1921 nvgpu_err(g, "failed to disable channel/TSG"); 1922 return ret; 1923 } 1924 1925 ret = gk20a_fifo_preempt(g, c); 1926 if (ret) { 1927 gk20a_enable_channel_tsg(g, c); 1928 nvgpu_err(g, "failed to preempt channel/TSG"); 1929 return ret; 1930 } 1931 1932 /* Channel gr_ctx buffer is gpu cacheable. 1933 Flush and invalidate before cpu update. */ 1934 g->ops.mm.l2_flush(g, true); 1935 1936 if (mode != NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW) { 1937 /* Allocate buffer if necessary */ 1938 if (pm_ctx->mem.gpu_va == 0) { 1939 ret = nvgpu_dma_alloc_sys(g, 1940 g->gr.ctx_vars.pm_ctxsw_image_size, 1941 &pm_ctx->mem); 1942 if (ret) { 1943 c->g->ops.fifo.enable_channel(c); 1944 nvgpu_err(g, 1945 "failed to allocate pm ctxt buffer"); 1946 return ret; 1947 } 1948 1949 pm_ctx->mem.gpu_va = nvgpu_gmmu_map_fixed(c->vm, 1950 &pm_ctx->mem, 1951 gpu_va, 1952 pm_ctx->mem.size, 1953 NVGPU_VM_MAP_CACHEABLE, 1954 gk20a_mem_flag_none, true, 1955 pm_ctx->mem.aperture); 1956 if (pm_ctx->mem.gpu_va == 0ULL) { 1957 nvgpu_err(g, 1958 "failed to map pm ctxt buffer"); 1959 nvgpu_dma_free(g, &pm_ctx->mem); 1960 c->g->ops.fifo.enable_channel(c); 1961 return -ENOMEM; 1962 } 1963 } 1964 1965 if ((mode == NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW) && 1966 (g->ops.gr.init_hwpm_pmm_register != NULL)) { 1967 g->ops.gr.init_hwpm_pmm_register(g); 1968 } 1969 } 1970 1971 data = nvgpu_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o()); 1972 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 1973 1974 switch (mode) { 1975 case NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW: 1976 pm_ctx->pm_mode = ctxsw_prog_main_image_pm_mode_ctxsw_f(); 1977 virt_addr = pm_ctx->mem.gpu_va; 1978 break; 1979 case NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW: 1980 pm_ctx->pm_mode = g->ops.gr.get_hw_accessor_stream_out_mode(); 1981 virt_addr = pm_ctx->mem.gpu_va; 1982 break; 1983 case NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW: 1984 pm_ctx->pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); 1985 virt_addr = 0; 1986 } 1987 1988 data |= pm_ctx->pm_mode; 1989 1990 nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data); 1991 1992 if (ctxheader->gpu_va) { 1993 struct channel_gk20a *ch; 1994 1995 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1996 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 1997 g->ops.gr.write_pm_ptr(g, &ch->ctx_header, virt_addr); 1998 } 1999 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 2000 } else { 2001 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr); 2002 } 2003 2004 /* enable channel */ 2005 gk20a_enable_channel_tsg(g, c); 2006 2007 return 0; 2008} 2009 2010void gk20a_gr_init_ctxsw_hdr_data(struct gk20a *g, 2011 struct nvgpu_mem *mem) 2012{ 2013 nvgpu_mem_wr(g, mem, 2014 ctxsw_prog_main_image_num_save_ops_o(), 0); 2015 nvgpu_mem_wr(g, mem, 2016 ctxsw_prog_main_image_num_restore_ops_o(), 0); 2017} 2018 2019/* load saved fresh copy of gloden image into channel gr_ctx */ 2020int gr_gk20a_load_golden_ctx_image(struct gk20a *g, 2021 struct channel_gk20a *c) 2022{ 2023 struct gr_gk20a *gr = &g->gr; 2024 struct tsg_gk20a *tsg; 2025 struct nvgpu_gr_ctx *gr_ctx; 2026 u32 virt_addr_lo; 2027 u32 virt_addr_hi; 2028 u64 virt_addr = 0; 2029 u32 v, data; 2030 int ret = 0; 2031 struct nvgpu_mem *mem; 2032 2033 nvgpu_log_fn(g, " "); 2034 2035 tsg = tsg_gk20a_from_ch(c); 2036 if (tsg == NULL) { 2037 return -EINVAL; 2038 } 2039 2040 gr_ctx = &tsg->gr_ctx; 2041 mem = &gr_ctx->mem; 2042 if (gr->ctx_vars.local_golden_image == NULL) { 2043 return -EINVAL; 2044 } 2045 2046 /* Channel gr_ctx buffer is gpu cacheable. 2047 Flush and invalidate before cpu update. */ 2048 g->ops.mm.l2_flush(g, true); 2049 2050 nvgpu_mem_wr_n(g, mem, 0, 2051 gr->ctx_vars.local_golden_image, 2052 gr->ctx_vars.golden_image_size); 2053 2054 if (g->ops.gr.init_ctxsw_hdr_data) { 2055 g->ops.gr.init_ctxsw_hdr_data(g, mem); 2056 } 2057 2058 if ((g->ops.gr.enable_cde_in_fecs != NULL) && c->cde) { 2059 g->ops.gr.enable_cde_in_fecs(g, mem); 2060 } 2061 2062 /* set priv access map */ 2063 virt_addr_lo = 2064 u64_lo32(gr_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]); 2065 virt_addr_hi = 2066 u64_hi32(gr_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]); 2067 2068 if (g->allow_all) { 2069 data = ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(); 2070 } else { 2071 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(); 2072 } 2073 2074 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(), 2075 data); 2076 2077 nvgpu_mem_wr(g, mem, 2078 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2079 virt_addr_lo); 2080 nvgpu_mem_wr(g, mem, 2081 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2082 virt_addr_hi); 2083 2084 /* disable verif features */ 2085 v = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o()); 2086 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m()); 2087 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f(); 2088 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v); 2089 2090 if (g->ops.gr.update_ctxsw_preemption_mode) { 2091 g->ops.gr.update_ctxsw_preemption_mode(g, c, mem); 2092 } 2093 2094 if (g->ops.gr.update_boosted_ctx) { 2095 g->ops.gr.update_boosted_ctx(g, mem, gr_ctx); 2096 } 2097 2098 virt_addr_lo = u64_lo32(gr_ctx->patch_ctx.mem.gpu_va); 2099 virt_addr_hi = u64_hi32(gr_ctx->patch_ctx.mem.gpu_va); 2100 2101 nvgpu_log(g, gpu_dbg_info, "write patch count = %d", 2102 gr_ctx->patch_ctx.data_count); 2103 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(), 2104 gr_ctx->patch_ctx.data_count); 2105 2106 nvgpu_mem_wr(g, mem, 2107 ctxsw_prog_main_image_patch_adr_lo_o(), 2108 virt_addr_lo); 2109 nvgpu_mem_wr(g, mem, 2110 ctxsw_prog_main_image_patch_adr_hi_o(), 2111 virt_addr_hi); 2112 2113 /* Update main header region of the context buffer with the info needed 2114 * for PM context switching, including mode and possibly a pointer to 2115 * the PM backing store. 2116 */ 2117 if (gr_ctx->pm_ctx.pm_mode != ctxsw_prog_main_image_pm_mode_no_ctxsw_f()) { 2118 if (gr_ctx->pm_ctx.mem.gpu_va == 0) { 2119 nvgpu_err(g, 2120 "context switched pm with no pm buffer!"); 2121 return -EFAULT; 2122 } 2123 2124 virt_addr = gr_ctx->pm_ctx.mem.gpu_va; 2125 } else { 2126 virt_addr = 0; 2127 } 2128 2129 data = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_pm_o()); 2130 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 2131 data |= gr_ctx->pm_ctx.pm_mode; 2132 2133 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data); 2134 2135 g->ops.gr.write_pm_ptr(g, mem, virt_addr); 2136 2137 return ret; 2138} 2139 2140static void gr_gk20a_start_falcon_ucode(struct gk20a *g) 2141{ 2142 nvgpu_log_fn(g, " "); 2143 2144 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 2145 gr_fecs_ctxsw_mailbox_clear_value_f(~0)); 2146 2147 gk20a_writel(g, gr_gpccs_dmactl_r(), gr_gpccs_dmactl_require_ctx_f(0)); 2148 gk20a_writel(g, gr_fecs_dmactl_r(), gr_fecs_dmactl_require_ctx_f(0)); 2149 2150 gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1)); 2151 gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1)); 2152 2153 nvgpu_log_fn(g, "done"); 2154} 2155 2156static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) 2157{ 2158 struct mm_gk20a *mm = &g->mm; 2159 struct vm_gk20a *vm = mm->pmu.vm; 2160 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; 2161 int err; 2162 2163 err = g->ops.mm.alloc_inst_block(g, &ucode_info->inst_blk_desc); 2164 if (err != 0) { 2165 return err; 2166 } 2167 2168 g->ops.mm.init_inst_block(&ucode_info->inst_blk_desc, vm, 0); 2169 2170 /* Map ucode surface to GMMU */ 2171 ucode_info->surface_desc.gpu_va = nvgpu_gmmu_map(vm, 2172 &ucode_info->surface_desc, 2173 ucode_info->surface_desc.size, 2174 0, /* flags */ 2175 gk20a_mem_flag_read_only, 2176 false, 2177 ucode_info->surface_desc.aperture); 2178 if (ucode_info->surface_desc.gpu_va == 0ULL) { 2179 nvgpu_err(g, "failed to update gmmu ptes"); 2180 return -ENOMEM; 2181 } 2182 2183 return 0; 2184} 2185 2186static void gr_gk20a_init_ctxsw_ucode_segment( 2187 struct gk20a_ctxsw_ucode_segment *p_seg, u32 *offset, u32 size) 2188{ 2189 p_seg->offset = *offset; 2190 p_seg->size = size; 2191 *offset = ALIGN(*offset + size, BLK_SIZE); 2192} 2193 2194static void gr_gk20a_init_ctxsw_ucode_segments( 2195 struct gk20a_ctxsw_ucode_segments *segments, u32 *offset, 2196 struct gk20a_ctxsw_bootloader_desc *bootdesc, 2197 u32 code_size, u32 data_size) 2198{ 2199 u32 boot_size = ALIGN(bootdesc->size, sizeof(u32)); 2200 segments->boot_entry = bootdesc->entry_point; 2201 segments->boot_imem_offset = bootdesc->imem_offset; 2202 gr_gk20a_init_ctxsw_ucode_segment(&segments->boot, offset, boot_size); 2203 gr_gk20a_init_ctxsw_ucode_segment(&segments->code, offset, code_size); 2204 gr_gk20a_init_ctxsw_ucode_segment(&segments->data, offset, data_size); 2205} 2206 2207static int gr_gk20a_copy_ctxsw_ucode_segments( 2208 struct gk20a *g, 2209 struct nvgpu_mem *dst, 2210 struct gk20a_ctxsw_ucode_segments *segments, 2211 u32 *bootimage, 2212 u32 *code, u32 *data) 2213{ 2214 unsigned int i; 2215 2216 nvgpu_mem_wr_n(g, dst, segments->boot.offset, bootimage, 2217 segments->boot.size); 2218 nvgpu_mem_wr_n(g, dst, segments->code.offset, code, 2219 segments->code.size); 2220 nvgpu_mem_wr_n(g, dst, segments->data.offset, data, 2221 segments->data.size); 2222 2223 /* compute a "checksum" for the boot binary to detect its version */ 2224 segments->boot_signature = 0; 2225 for (i = 0; i < segments->boot.size / sizeof(u32); i++) { 2226 segments->boot_signature += bootimage[i]; 2227 } 2228 2229 return 0; 2230} 2231 2232int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) 2233{ 2234 struct mm_gk20a *mm = &g->mm; 2235 struct vm_gk20a *vm = mm->pmu.vm; 2236 struct gk20a_ctxsw_bootloader_desc *fecs_boot_desc; 2237 struct gk20a_ctxsw_bootloader_desc *gpccs_boot_desc; 2238 struct nvgpu_firmware *fecs_fw; 2239 struct nvgpu_firmware *gpccs_fw; 2240 u32 *fecs_boot_image; 2241 u32 *gpccs_boot_image; 2242 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; 2243 u32 ucode_size; 2244 int err = 0; 2245 2246 fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0); 2247 if (fecs_fw == NULL) { 2248 nvgpu_err(g, "failed to load fecs ucode!!"); 2249 return -ENOENT; 2250 } 2251 2252 fecs_boot_desc = (void *)fecs_fw->data; 2253 fecs_boot_image = (void *)(fecs_fw->data + 2254 sizeof(struct gk20a_ctxsw_bootloader_desc)); 2255 2256 gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0); 2257 if (gpccs_fw == NULL) { 2258 nvgpu_release_firmware(g, fecs_fw); 2259 nvgpu_err(g, "failed to load gpccs ucode!!"); 2260 return -ENOENT; 2261 } 2262 2263 gpccs_boot_desc = (void *)gpccs_fw->data; 2264 gpccs_boot_image = (void *)(gpccs_fw->data + 2265 sizeof(struct gk20a_ctxsw_bootloader_desc)); 2266 2267 ucode_size = 0; 2268 gr_gk20a_init_ctxsw_ucode_segments(&ucode_info->fecs, &ucode_size, 2269 fecs_boot_desc, 2270 g->gr.ctx_vars.ucode.fecs.inst.count * sizeof(u32), 2271 g->gr.ctx_vars.ucode.fecs.data.count * sizeof(u32)); 2272 gr_gk20a_init_ctxsw_ucode_segments(&ucode_info->gpccs, &ucode_size, 2273 gpccs_boot_desc, 2274 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), 2275 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); 2276 2277 err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc); 2278 if (err != 0) { 2279 goto clean_up; 2280 } 2281 2282 gr_gk20a_copy_ctxsw_ucode_segments(g, &ucode_info->surface_desc, 2283 &ucode_info->fecs, 2284 fecs_boot_image, 2285 g->gr.ctx_vars.ucode.fecs.inst.l, 2286 g->gr.ctx_vars.ucode.fecs.data.l); 2287 2288 nvgpu_release_firmware(g, fecs_fw); 2289 fecs_fw = NULL; 2290 2291 gr_gk20a_copy_ctxsw_ucode_segments(g, &ucode_info->surface_desc, 2292 &ucode_info->gpccs, 2293 gpccs_boot_image, 2294 g->gr.ctx_vars.ucode.gpccs.inst.l, 2295 g->gr.ctx_vars.ucode.gpccs.data.l); 2296 2297 nvgpu_release_firmware(g, gpccs_fw); 2298 gpccs_fw = NULL; 2299 2300 err = gr_gk20a_init_ctxsw_ucode_vaspace(g); 2301 if (err != 0) { 2302 goto clean_up; 2303 } 2304 2305 return 0; 2306 2307clean_up: 2308 if (ucode_info->surface_desc.gpu_va) { 2309 nvgpu_gmmu_unmap(vm, &ucode_info->surface_desc, 2310 ucode_info->surface_desc.gpu_va); 2311 } 2312 nvgpu_dma_free(g, &ucode_info->surface_desc); 2313 2314 nvgpu_release_firmware(g, gpccs_fw); 2315 gpccs_fw = NULL; 2316 nvgpu_release_firmware(g, fecs_fw); 2317 fecs_fw = NULL; 2318 2319 return err; 2320} 2321 2322static void gr_gk20a_wait_for_fecs_arb_idle(struct gk20a *g) 2323{ 2324 int retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; 2325 u32 val; 2326 2327 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2328 while ((gr_fecs_arb_ctx_cmd_cmd_v(val) != 0U) && (retries != 0)) { 2329 nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); 2330 retries--; 2331 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2332 } 2333 2334 if (retries == 0) { 2335 nvgpu_err(g, "arbiter cmd timeout, fecs arb ctx cmd: 0x%08x", 2336 gk20a_readl(g, gr_fecs_arb_ctx_cmd_r())); 2337 } 2338 2339 retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; 2340 while (((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & 2341 gr_fecs_ctxsw_status_1_arb_busy_m()) != 0U) && 2342 (retries != 0)) { 2343 nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); 2344 retries--; 2345 } 2346 if (retries == 0) { 2347 nvgpu_err(g, 2348 "arbiter idle timeout, fecs ctxsw status: 0x%08x", 2349 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 2350 } 2351} 2352 2353void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g) 2354{ 2355 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; 2356 int retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; 2357 u64 inst_ptr; 2358 2359 while (((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & 2360 gr_fecs_ctxsw_status_1_arb_busy_m()) != 0U) && 2361 (retries != 0)) { 2362 nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); 2363 retries--; 2364 } 2365 if (retries == 0) { 2366 nvgpu_err(g, 2367 "arbiter idle timeout, status: %08x", 2368 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 2369 } 2370 2371 gk20a_writel(g, gr_fecs_arb_ctx_adr_r(), 0x0); 2372 2373 inst_ptr = nvgpu_inst_block_addr(g, &ucode_info->inst_blk_desc); 2374 gk20a_writel(g, gr_fecs_new_ctx_r(), 2375 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) | 2376 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc, 2377 gr_fecs_new_ctx_target_sys_mem_ncoh_f(), 2378 gr_fecs_new_ctx_target_sys_mem_coh_f(), 2379 gr_fecs_new_ctx_target_vid_mem_f()) | 2380 gr_fecs_new_ctx_valid_m()); 2381 2382 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(), 2383 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) | 2384 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc, 2385 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(), 2386 gr_fecs_arb_ctx_ptr_target_sys_mem_coh_f(), 2387 gr_fecs_arb_ctx_ptr_target_vid_mem_f())); 2388 2389 gk20a_writel(g, gr_fecs_arb_ctx_cmd_r(), 0x7); 2390 2391 /* Wait for arbiter command to complete */ 2392 gr_gk20a_wait_for_fecs_arb_idle(g); 2393 2394 gk20a_writel(g, gr_fecs_current_ctx_r(), 2395 gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) | 2396 gr_fecs_current_ctx_target_m() | 2397 gr_fecs_current_ctx_valid_m()); 2398 /* Send command to arbiter to flush */ 2399 gk20a_writel(g, gr_fecs_arb_ctx_cmd_r(), gr_fecs_arb_ctx_cmd_cmd_s()); 2400 2401 gr_gk20a_wait_for_fecs_arb_idle(g); 2402 2403} 2404 2405void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base, 2406 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset) 2407{ 2408 u32 addr_code32; 2409 u32 addr_data32; 2410 2411 addr_code32 = u64_lo32((addr_base + segments->code.offset) >> 8); 2412 addr_data32 = u64_lo32((addr_base + segments->data.offset) >> 8); 2413 2414 /* 2415 * Copy falcon bootloader header into dmem at offset 0. 2416 * Configure dmem port 0 for auto-incrementing writes starting at dmem 2417 * offset 0. 2418 */ 2419 gk20a_writel(g, reg_offset + gr_fecs_dmemc_r(0), 2420 gr_fecs_dmemc_offs_f(0) | 2421 gr_fecs_dmemc_blk_f(0) | 2422 gr_fecs_dmemc_aincw_f(1)); 2423 2424 /* Write out the actual data */ 2425 switch (segments->boot_signature) { 2426 case FALCON_UCODE_SIG_T18X_GPCCS_WITH_RESERVED: 2427 case FALCON_UCODE_SIG_T21X_FECS_WITH_DMEM_SIZE: 2428 case FALCON_UCODE_SIG_T21X_FECS_WITH_RESERVED: 2429 case FALCON_UCODE_SIG_T21X_GPCCS_WITH_RESERVED: 2430 case FALCON_UCODE_SIG_T12X_FECS_WITH_RESERVED: 2431 case FALCON_UCODE_SIG_T12X_GPCCS_WITH_RESERVED: 2432 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2433 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2434 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2435 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2436 /* fallthrough */ 2437 case FALCON_UCODE_SIG_T12X_FECS_WITHOUT_RESERVED: 2438 case FALCON_UCODE_SIG_T12X_GPCCS_WITHOUT_RESERVED: 2439 case FALCON_UCODE_SIG_T21X_FECS_WITHOUT_RESERVED: 2440 case FALCON_UCODE_SIG_T21X_FECS_WITHOUT_RESERVED2: 2441 case FALCON_UCODE_SIG_T21X_GPCCS_WITHOUT_RESERVED: 2442 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2443 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2444 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2445 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2446 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 4); 2447 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2448 addr_code32); 2449 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2450 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2451 segments->code.size); 2452 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2453 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2454 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2455 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2456 addr_data32); 2457 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2458 segments->data.size); 2459 break; 2460 case FALCON_UCODE_SIG_T12X_FECS_OLDER: 2461 case FALCON_UCODE_SIG_T12X_GPCCS_OLDER: 2462 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2463 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2464 addr_code32); 2465 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2466 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2467 segments->code.size); 2468 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2469 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2470 addr_data32); 2471 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2472 segments->data.size); 2473 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 2474 addr_code32); 2475 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2476 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2477 break; 2478 default: 2479 nvgpu_err(g, 2480 "unknown falcon ucode boot signature 0x%08x" 2481 " with reg_offset 0x%08x", 2482 segments->boot_signature, reg_offset); 2483 BUG(); 2484 } 2485} 2486 2487void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base, 2488 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset) 2489{ 2490 u32 addr_load32; 2491 u32 blocks; 2492 u32 b; 2493 u32 dst; 2494 2495 addr_load32 = u64_lo32((addr_base + segments->boot.offset) >> 8); 2496 blocks = ((segments->boot.size + 0xFF) & ~0xFF) >> 8; 2497 2498 /* 2499 * Set the base FB address for the DMA transfer. Subtract off the 256 2500 * byte IMEM block offset such that the relative FB and IMEM offsets 2501 * match, allowing the IMEM tags to be properly created. 2502 */ 2503 2504 dst = segments->boot_imem_offset; 2505 gk20a_writel(g, reg_offset + gr_fecs_dmatrfbase_r(), 2506 (addr_load32 - (dst >> 8))); 2507 2508 for (b = 0; b < blocks; b++) { 2509 /* Setup destination IMEM offset */ 2510 gk20a_writel(g, reg_offset + gr_fecs_dmatrfmoffs_r(), 2511 dst + (b << 8)); 2512 2513 /* Setup source offset (relative to BASE) */ 2514 gk20a_writel(g, reg_offset + gr_fecs_dmatrffboffs_r(), 2515 dst + (b << 8)); 2516 2517 gk20a_writel(g, reg_offset + gr_fecs_dmatrfcmd_r(), 2518 gr_fecs_dmatrfcmd_imem_f(0x01) | 2519 gr_fecs_dmatrfcmd_write_f(0x00) | 2520 gr_fecs_dmatrfcmd_size_f(0x06) | 2521 gr_fecs_dmatrfcmd_ctxdma_f(0)); 2522 } 2523 2524 /* Specify the falcon boot vector */ 2525 gk20a_writel(g, reg_offset + gr_fecs_bootvec_r(), 2526 gr_fecs_bootvec_vec_f(segments->boot_entry)); 2527} 2528 2529static void gr_gk20a_load_falcon_with_bootloader(struct gk20a *g) 2530{ 2531 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; 2532 u64 addr_base = ucode_info->surface_desc.gpu_va; 2533 2534 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 0x0); 2535 2536 gr_gk20a_load_falcon_bind_instblk(g); 2537 2538 g->ops.gr.falcon_load_ucode(g, addr_base, 2539 &g->ctxsw_ucode_info.fecs, 0); 2540 2541 g->ops.gr.falcon_load_ucode(g, addr_base, 2542 &g->ctxsw_ucode_info.gpccs, 2543 gr_gpcs_gpccs_falcon_hwcfg_r() - 2544 gr_fecs_falcon_hwcfg_r()); 2545} 2546 2547int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) 2548{ 2549 int err; 2550 2551 nvgpu_log_fn(g, " "); 2552 2553 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 2554 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), 2555 gr_fecs_ctxsw_mailbox_value_f(0xc0de7777)); 2556 gk20a_writel(g, gr_gpccs_ctxsw_mailbox_r(7), 2557 gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777)); 2558 } 2559 2560 /* 2561 * In case bootloader is not supported, revert to the old way of 2562 * loading gr ucode, without the faster bootstrap routine. 2563 */ 2564 if (!nvgpu_is_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP)) { 2565 gr_gk20a_load_falcon_dmem(g); 2566 gr_gk20a_load_falcon_imem(g); 2567 gr_gk20a_start_falcon_ucode(g); 2568 } else { 2569 if (!g->gr.skip_ucode_init) { 2570 err = gr_gk20a_init_ctxsw_ucode(g); 2571 2572 if (err != 0) { 2573 return err; 2574 } 2575 } 2576 gr_gk20a_load_falcon_with_bootloader(g); 2577 g->gr.skip_ucode_init = true; 2578 } 2579 nvgpu_log_fn(g, "done"); 2580 return 0; 2581} 2582 2583int gr_gk20a_set_fecs_watchdog_timeout(struct gk20a *g) 2584{ 2585 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 0xffffffff); 2586 gk20a_writel(g, gr_fecs_method_data_r(), 0x7fffffff); 2587 gk20a_writel(g, gr_fecs_method_push_r(), 2588 gr_fecs_method_push_adr_set_watchdog_timeout_f()); 2589 2590 return 0; 2591} 2592 2593static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) 2594{ 2595 u32 ret; 2596 2597 nvgpu_log_fn(g, " "); 2598 2599 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL, 2600 GR_IS_UCODE_OP_EQUAL, 2601 eUcodeHandshakeInitComplete, 2602 GR_IS_UCODE_OP_SKIP, 0, false); 2603 if (ret) { 2604 nvgpu_err(g, "falcon ucode init timeout"); 2605 return ret; 2606 } 2607 2608 if (nvgpu_is_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP) || 2609 nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { 2610 gk20a_writel(g, gr_fecs_current_ctx_r(), 2611 gr_fecs_current_ctx_valid_false_f()); 2612 } 2613 2614 ret = g->ops.gr.set_fecs_watchdog_timeout(g); 2615 if (ret) { 2616 nvgpu_err(g, "fail to set watchdog timeout"); 2617 return ret; 2618 } 2619 2620 nvgpu_log_fn(g, "done"); 2621 return 0; 2622} 2623 2624int gr_gk20a_init_ctx_state(struct gk20a *g) 2625{ 2626 u32 ret; 2627 struct fecs_method_op_gk20a op = { 2628 .mailbox = { .id = 0, .data = 0, 2629 .clr = ~0, .ok = 0, .fail = 0}, 2630 .method.data = 0, 2631 .cond.ok = GR_IS_UCODE_OP_NOT_EQUAL, 2632 .cond.fail = GR_IS_UCODE_OP_SKIP, 2633 }; 2634 2635 nvgpu_log_fn(g, " "); 2636 /* query ctxsw image sizes, if golden context is not created */ 2637 if (!g->gr.ctx_vars.golden_image_initialized) { 2638 op.method.addr = 2639 gr_fecs_method_push_adr_discover_image_size_v(); 2640 op.mailbox.ret = &g->gr.ctx_vars.golden_image_size; 2641 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2642 if (ret) { 2643 nvgpu_err(g, 2644 "query golden image size failed"); 2645 return ret; 2646 } 2647 op.method.addr = 2648 gr_fecs_method_push_adr_discover_zcull_image_size_v(); 2649 op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size; 2650 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2651 if (ret) { 2652 nvgpu_err(g, 2653 "query zcull ctx image size failed"); 2654 return ret; 2655 } 2656 op.method.addr = 2657 gr_fecs_method_push_adr_discover_pm_image_size_v(); 2658 op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size; 2659 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2660 if (ret) { 2661 nvgpu_err(g, 2662 "query pm ctx image size failed"); 2663 return ret; 2664 } 2665 g->gr.ctx_vars.priv_access_map_size = 512 * 1024; 2666#ifdef CONFIG_GK20A_CTXSW_TRACE 2667 g->gr.ctx_vars.fecs_trace_buffer_size = 2668 gk20a_fecs_trace_buffer_size(g); 2669#endif 2670 } 2671 2672 nvgpu_log_fn(g, "done"); 2673 return 0; 2674} 2675 2676void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, 2677 struct gr_ctx_buffer_desc *desc) 2678{ 2679 if (desc == NULL) { 2680 return; 2681 } 2682 nvgpu_dma_free(g, &desc->mem); 2683 desc->destroy = NULL; 2684} 2685 2686int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, 2687 struct gr_ctx_buffer_desc *desc, 2688 size_t size) 2689{ 2690 int err = 0; 2691 2692 nvgpu_log_fn(g, " "); 2693 2694 if (nvgpu_mem_is_valid(&desc->mem)) { 2695 return 0; 2696 } 2697 2698 err = nvgpu_dma_alloc_sys(g, size, &desc->mem); 2699 if (err != 0) { 2700 return err; 2701 } 2702 2703 desc->destroy = gk20a_gr_destroy_ctx_buffer; 2704 2705 return err; 2706} 2707 2708static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g) 2709{ 2710 struct gr_gk20a *gr = &g->gr; 2711 u32 i; 2712 2713 for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) { 2714 /* destroy exists iff buffer is allocated */ 2715 if (gr->global_ctx_buffer[i].destroy) { 2716 gr->global_ctx_buffer[i].destroy(g, 2717 &gr->global_ctx_buffer[i]); 2718 } 2719 } 2720 2721 nvgpu_log_fn(g, "done"); 2722} 2723 2724int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) 2725{ 2726 struct gr_gk20a *gr = &g->gr; 2727 int attr_buffer_size, err; 2728 2729 u32 cb_buffer_size = gr->bundle_cb_default_size * 2730 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(); 2731 2732 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * 2733 gr_scc_pagepool_total_pages_byte_granularity_v(); 2734 2735 nvgpu_log_fn(g, " "); 2736 2737 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 2738 2739 nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size); 2740 2741 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], 2742 cb_buffer_size); 2743 if (err != 0) { 2744 goto clean_up; 2745 } 2746 2747 if (g->ops.secure_alloc) { 2748 err = g->ops.secure_alloc(g, 2749 &gr->global_ctx_buffer[CIRCULAR_VPR], 2750 cb_buffer_size); 2751 if (err != 0) { 2752 goto clean_up; 2753 } 2754 } 2755 2756 nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size); 2757 2758 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], 2759 pagepool_buffer_size); 2760 if (err != 0) { 2761 goto clean_up; 2762 } 2763 2764 if (g->ops.secure_alloc) { 2765 err = g->ops.secure_alloc(g, 2766 &gr->global_ctx_buffer[PAGEPOOL_VPR], 2767 pagepool_buffer_size); 2768 if (err != 0) { 2769 goto clean_up; 2770 } 2771 } 2772 2773 nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size); 2774 2775 err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], 2776 attr_buffer_size); 2777 if (err != 0) { 2778 goto clean_up; 2779 } 2780 2781 if (g->ops.secure_alloc) { 2782 err = g->ops.secure_alloc(g, 2783 &gr->global_ctx_buffer[ATTRIBUTE_VPR], 2784 attr_buffer_size); 2785 if (err != 0) { 2786 goto clean_up; 2787 } 2788 } 2789 2790 nvgpu_log_info(g, "golden_image_size : %d", 2791 gr->ctx_vars.golden_image_size); 2792 2793 err = gk20a_gr_alloc_ctx_buffer(g, 2794 &gr->global_ctx_buffer[GOLDEN_CTX], 2795 gr->ctx_vars.golden_image_size); 2796 if (err != 0) { 2797 goto clean_up; 2798 } 2799 2800 nvgpu_log_info(g, "priv_access_map_size : %d", 2801 gr->ctx_vars.priv_access_map_size); 2802 2803 err = gk20a_gr_alloc_ctx_buffer(g, 2804 &gr->global_ctx_buffer[PRIV_ACCESS_MAP], 2805 gr->ctx_vars.priv_access_map_size); 2806 2807 if (err != 0) { 2808 goto clean_up; 2809 } 2810 2811#ifdef CONFIG_GK20A_CTXSW_TRACE 2812 nvgpu_log_info(g, "fecs_trace_buffer_size : %d", 2813 gr->ctx_vars.fecs_trace_buffer_size); 2814 2815 err = nvgpu_dma_alloc_sys(g, 2816 gr->ctx_vars.fecs_trace_buffer_size, 2817 &gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem); 2818 if (err != 0) { 2819 goto clean_up; 2820 } 2821 2822 gr->global_ctx_buffer[FECS_TRACE_BUFFER].destroy = 2823 gk20a_gr_destroy_ctx_buffer; 2824#endif 2825 2826 nvgpu_log_fn(g, "done"); 2827 return 0; 2828 2829 clean_up: 2830 nvgpu_err(g, "fail"); 2831 gr_gk20a_free_global_ctx_buffers(g); 2832 return -ENOMEM; 2833} 2834 2835static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g, 2836 struct vm_gk20a *vm, 2837 struct nvgpu_gr_ctx *gr_ctx) 2838{ 2839 u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va; 2840 u64 *g_bfr_size = gr_ctx->global_ctx_buffer_size; 2841 int *g_bfr_index = gr_ctx->global_ctx_buffer_index; 2842 u32 i; 2843 2844 nvgpu_log_fn(g, " "); 2845 2846 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 2847 if (g_bfr_index[i]) { 2848 struct nvgpu_mem *mem; 2849 2850 /* 2851 * Translate from VA index to buffer index to determine 2852 * the correct struct nvgpu_mem to use. Handles the VPR 2853 * vs non-VPR difference in context images. 2854 */ 2855 mem = &g->gr.global_ctx_buffer[g_bfr_index[i]].mem; 2856 2857 nvgpu_gmmu_unmap(vm, mem, g_bfr_va[i]); 2858 } 2859 } 2860 2861 memset(g_bfr_va, 0, sizeof(gr_ctx->global_ctx_buffer_va)); 2862 memset(g_bfr_size, 0, sizeof(gr_ctx->global_ctx_buffer_size)); 2863 memset(g_bfr_index, 0, sizeof(gr_ctx->global_ctx_buffer_index)); 2864 2865 gr_ctx->global_ctx_buffer_mapped = false; 2866} 2867 2868int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, 2869 struct channel_gk20a *c) 2870{ 2871 struct tsg_gk20a *tsg; 2872 struct vm_gk20a *ch_vm = c->vm; 2873 u64 *g_bfr_va; 2874 u64 *g_bfr_size; 2875 int *g_bfr_index; 2876 struct gr_gk20a *gr = &g->gr; 2877 struct nvgpu_mem *mem; 2878 u64 gpu_va; 2879 2880 nvgpu_log_fn(g, " "); 2881 2882 tsg = tsg_gk20a_from_ch(c); 2883 if (tsg == NULL) { 2884 return -EINVAL; 2885 } 2886 2887 g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; 2888 g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; 2889 g_bfr_index = tsg->gr_ctx.global_ctx_buffer_index; 2890 2891 /* Circular Buffer */ 2892 if (c->vpr && 2893 nvgpu_mem_is_valid(&gr->global_ctx_buffer[CIRCULAR_VPR].mem)) { 2894 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem; 2895 g_bfr_index[CIRCULAR_VA] = CIRCULAR_VPR; 2896 } else { 2897 mem = &gr->global_ctx_buffer[CIRCULAR].mem; 2898 g_bfr_index[CIRCULAR_VA] = CIRCULAR; 2899 } 2900 2901 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2902 NVGPU_VM_MAP_CACHEABLE, 2903 gk20a_mem_flag_none, true, mem->aperture); 2904 if (gpu_va == 0ULL) { 2905 goto clean_up; 2906 } 2907 g_bfr_va[CIRCULAR_VA] = gpu_va; 2908 g_bfr_size[CIRCULAR_VA] = mem->size; 2909 2910 /* Attribute Buffer */ 2911 if (c->vpr && 2912 nvgpu_mem_is_valid(&gr->global_ctx_buffer[ATTRIBUTE_VPR].mem)) { 2913 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem; 2914 g_bfr_index[ATTRIBUTE_VA] = ATTRIBUTE_VPR; 2915 } else { 2916 mem = &gr->global_ctx_buffer[ATTRIBUTE].mem; 2917 g_bfr_index[ATTRIBUTE_VA] = ATTRIBUTE; 2918 } 2919 2920 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2921 NVGPU_VM_MAP_CACHEABLE, 2922 gk20a_mem_flag_none, false, mem->aperture); 2923 if (gpu_va == 0ULL) { 2924 goto clean_up; 2925 } 2926 g_bfr_va[ATTRIBUTE_VA] = gpu_va; 2927 g_bfr_size[ATTRIBUTE_VA] = mem->size; 2928 2929 /* Page Pool */ 2930 if (c->vpr && 2931 nvgpu_mem_is_valid(&gr->global_ctx_buffer[PAGEPOOL_VPR].mem)) { 2932 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem; 2933 g_bfr_index[PAGEPOOL_VA] = PAGEPOOL_VPR; 2934 } else { 2935 mem = &gr->global_ctx_buffer[PAGEPOOL].mem; 2936 g_bfr_index[PAGEPOOL_VA] = PAGEPOOL; 2937 } 2938 2939 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2940 NVGPU_VM_MAP_CACHEABLE, 2941 gk20a_mem_flag_none, true, mem->aperture); 2942 if (gpu_va == 0ULL) { 2943 goto clean_up; 2944 } 2945 g_bfr_va[PAGEPOOL_VA] = gpu_va; 2946 g_bfr_size[PAGEPOOL_VA] = mem->size; 2947 2948 /* Golden Image */ 2949 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; 2950 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, 2951 gk20a_mem_flag_none, true, mem->aperture); 2952 if (gpu_va == 0ULL) { 2953 goto clean_up; 2954 } 2955 g_bfr_va[GOLDEN_CTX_VA] = gpu_va; 2956 g_bfr_size[GOLDEN_CTX_VA] = mem->size; 2957 g_bfr_index[GOLDEN_CTX_VA] = GOLDEN_CTX; 2958 2959 /* Priv register Access Map */ 2960 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; 2961 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, 2962 gk20a_mem_flag_none, true, mem->aperture); 2963 if (gpu_va == 0ULL) { 2964 goto clean_up; 2965 } 2966 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 2967 g_bfr_size[PRIV_ACCESS_MAP_VA] = mem->size; 2968 g_bfr_index[PRIV_ACCESS_MAP_VA] = PRIV_ACCESS_MAP; 2969 2970 tsg->gr_ctx.global_ctx_buffer_mapped = true; 2971 2972#ifdef CONFIG_GK20A_CTXSW_TRACE 2973 /* FECS trace buffer */ 2974 if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) { 2975 mem = &gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem; 2976 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, 2977 gk20a_mem_flag_none, true, mem->aperture); 2978 if (!gpu_va) 2979 goto clean_up; 2980 g_bfr_va[FECS_TRACE_BUFFER_VA] = gpu_va; 2981 g_bfr_size[FECS_TRACE_BUFFER_VA] = mem->size; 2982 g_bfr_index[FECS_TRACE_BUFFER_VA] = FECS_TRACE_BUFFER; 2983 } 2984#endif 2985 2986 return 0; 2987 2988clean_up: 2989 gr_gk20a_unmap_global_ctx_buffers(g, ch_vm, &tsg->gr_ctx); 2990 2991 return -ENOMEM; 2992} 2993 2994int gr_gk20a_alloc_gr_ctx(struct gk20a *g, 2995 struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, 2996 u32 class, 2997 u32 padding) 2998{ 2999 struct gr_gk20a *gr = &g->gr; 3000 int err = 0; 3001 3002 nvgpu_log_fn(g, " "); 3003 3004 if (gr->ctx_vars.buffer_size == 0) { 3005 return 0; 3006 } 3007 3008 /* alloc channel gr ctx buffer */ 3009 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; 3010 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; 3011 3012 err = nvgpu_dma_alloc(g, gr->ctx_vars.buffer_total_size, &gr_ctx->mem); 3013 if (err != 0) { 3014 return err; 3015 } 3016 3017 gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm, 3018 &gr_ctx->mem, 3019 gr_ctx->mem.size, 3020 0, /* not GPU-cacheable */ 3021 gk20a_mem_flag_none, true, 3022 gr_ctx->mem.aperture); 3023 if (gr_ctx->mem.gpu_va == 0ULL) { 3024 goto err_free_mem; 3025 } 3026 3027 return 0; 3028 3029 err_free_mem: 3030 nvgpu_dma_free(g, &gr_ctx->mem); 3031 3032 return err; 3033} 3034 3035static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, 3036 struct tsg_gk20a *tsg, u32 class, u32 padding) 3037{ 3038 struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; 3039 int err; 3040 3041 if (tsg->vm == NULL) { 3042 nvgpu_err(tsg->g, "No address space bound"); 3043 return -ENOMEM; 3044 } 3045 3046 err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, tsg->vm, class, padding); 3047 if (err != 0) { 3048 return err; 3049 } 3050 3051 gr_ctx->tsgid = tsg->tsgid; 3052 3053 return 0; 3054} 3055 3056void gr_gk20a_free_gr_ctx(struct gk20a *g, 3057 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) 3058{ 3059 nvgpu_log_fn(g, " "); 3060 3061 if (gr_ctx->mem.gpu_va) { 3062 gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); 3063 gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx); 3064 gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx); 3065 3066 if ((g->ops.gr.dump_ctxsw_stats != NULL) && 3067 g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close) { 3068 g->ops.gr.dump_ctxsw_stats(g, vm, gr_ctx); 3069 } 3070 3071 nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer); 3072 nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer); 3073 nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer); 3074 nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer); 3075 nvgpu_dma_unmap_free(vm, &gr_ctx->mem); 3076 3077 memset(gr_ctx, 0, sizeof(*gr_ctx)); 3078 } 3079} 3080 3081void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) 3082{ 3083 struct gk20a *g = tsg->g; 3084 3085 if (tsg->vm == NULL) { 3086 nvgpu_err(g, "No address space bound"); 3087 return; 3088 } 3089 tsg->g->ops.gr.free_gr_ctx(g, tsg->vm, &tsg->gr_ctx); 3090} 3091 3092u32 gr_gk20a_get_patch_slots(struct gk20a *g) 3093{ 3094 return PATCH_CTX_SLOTS_PER_PAGE; 3095} 3096 3097static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, 3098 struct channel_gk20a *c) 3099{ 3100 struct tsg_gk20a *tsg; 3101 struct patch_desc *patch_ctx; 3102 struct vm_gk20a *ch_vm = c->vm; 3103 u32 alloc_size; 3104 int err = 0; 3105 3106 nvgpu_log_fn(g, " "); 3107 3108 tsg = tsg_gk20a_from_ch(c); 3109 if (tsg == NULL) { 3110 return -EINVAL; 3111 } 3112 3113 patch_ctx = &tsg->gr_ctx.patch_ctx; 3114 alloc_size = g->ops.gr.get_patch_slots(g) * 3115 PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY; 3116 3117 nvgpu_log(g, gpu_dbg_info, "patch buffer size in entries: %d", 3118 alloc_size); 3119 3120 err = nvgpu_dma_alloc_map_sys(ch_vm, 3121 alloc_size * sizeof(u32), &patch_ctx->mem); 3122 if (err != 0) { 3123 return err; 3124 } 3125 3126 nvgpu_log_fn(g, "done"); 3127 return 0; 3128} 3129 3130static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g, 3131 struct vm_gk20a *vm, 3132 struct nvgpu_gr_ctx *gr_ctx) 3133{ 3134 struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; 3135 3136 nvgpu_log_fn(g, " "); 3137 3138 if (patch_ctx->mem.gpu_va) { 3139 nvgpu_gmmu_unmap(vm, &patch_ctx->mem, 3140 patch_ctx->mem.gpu_va); 3141 } 3142 3143 nvgpu_dma_free(g, &patch_ctx->mem); 3144 patch_ctx->data_count = 0; 3145} 3146 3147static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g, 3148 struct vm_gk20a *vm, 3149 struct nvgpu_gr_ctx *gr_ctx) 3150{ 3151 struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; 3152 3153 nvgpu_log_fn(g, " "); 3154 3155 if (pm_ctx->mem.gpu_va) { 3156 nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va); 3157 3158 nvgpu_dma_free(g, &pm_ctx->mem); 3159 } 3160} 3161 3162int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) 3163{ 3164 struct gk20a *g = c->g; 3165 struct nvgpu_gr_ctx *gr_ctx; 3166 struct tsg_gk20a *tsg = NULL; 3167 int err = 0; 3168 3169 nvgpu_log_fn(g, " "); 3170 3171 /* an address space needs to have been bound at this point.*/ 3172 if (!gk20a_channel_as_bound(c) && (c->vm == NULL)) { 3173 nvgpu_err(g, 3174 "not bound to address space at time" 3175 " of grctx allocation"); 3176 return -EINVAL; 3177 } 3178 3179 if (!g->ops.gr.is_valid_class(g, class_num)) { 3180 nvgpu_err(g, 3181 "invalid obj class 0x%x", class_num); 3182 err = -EINVAL; 3183 goto out; 3184 } 3185 c->obj_class = class_num; 3186 3187 tsg = tsg_gk20a_from_ch(c); 3188 if (tsg == NULL) { 3189 return -EINVAL; 3190 } 3191 3192 gr_ctx = &tsg->gr_ctx; 3193 3194 if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { 3195 tsg->vm = c->vm; 3196 nvgpu_vm_get(tsg->vm); 3197 err = gr_gk20a_alloc_tsg_gr_ctx(g, tsg, 3198 class_num, 3199 flags); 3200 if (err != 0) { 3201 nvgpu_err(g, 3202 "fail to allocate TSG gr ctx buffer"); 3203 nvgpu_vm_put(tsg->vm); 3204 tsg->vm = NULL; 3205 goto out; 3206 } 3207 3208 /* allocate patch buffer */ 3209 if (!nvgpu_mem_is_valid(&gr_ctx->patch_ctx.mem)) { 3210 gr_ctx->patch_ctx.data_count = 0; 3211 err = gr_gk20a_alloc_channel_patch_ctx(g, c); 3212 if (err != 0) { 3213 nvgpu_err(g, 3214 "fail to allocate patch buffer"); 3215 goto out; 3216 } 3217 } 3218 3219 /* map global buffer to channel gpu_va and commit */ 3220 err = g->ops.gr.map_global_ctx_buffers(g, c); 3221 if (err != 0) { 3222 nvgpu_err(g, 3223 "fail to map global ctx buffer"); 3224 goto out; 3225 } 3226 g->ops.gr.commit_global_ctx_buffers(g, c, true); 3227 3228 /* commit gr ctx buffer */ 3229 err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); 3230 if (err != 0) { 3231 nvgpu_err(g, 3232 "fail to commit gr ctx buffer"); 3233 goto out; 3234 } 3235 3236 /* init golden image */ 3237 err = gr_gk20a_init_golden_ctx_image(g, c); 3238 if (err != 0) { 3239 nvgpu_err(g, 3240 "fail to init golden ctx image"); 3241 goto out; 3242 } 3243 3244 /* Re-enable ELPG now that golden image has been initialized. 3245 * The PMU PG init code may already have tried to enable elpg, but 3246 * would not have been able to complete this action since the golden 3247 * image hadn't been initialized yet, so do this now. 3248 */ 3249 err = nvgpu_pmu_reenable_elpg(g); 3250 if (err != 0) { 3251 nvgpu_err(g, "fail to re-enable elpg"); 3252 goto out; 3253 } 3254 3255 /* load golden image */ 3256 gr_gk20a_load_golden_ctx_image(g, c); 3257 if (err != 0) { 3258 nvgpu_err(g, 3259 "fail to load golden ctx image"); 3260 goto out; 3261 } 3262#ifdef CONFIG_GK20A_CTXSW_TRACE 3263 if (g->ops.fecs_trace.bind_channel && !c->vpr) { 3264 err = g->ops.fecs_trace.bind_channel(g, c); 3265 if (err != 0) { 3266 nvgpu_warn(g, 3267 "fail to bind channel for ctxsw trace"); 3268 } 3269 } 3270#endif 3271 3272 if (g->ops.gr.set_czf_bypass) { 3273 g->ops.gr.set_czf_bypass(g, c); 3274 } 3275 3276 /* PM ctxt switch is off by default */ 3277 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); 3278 } else { 3279 /* commit gr ctx buffer */ 3280 err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); 3281 if (err != 0) { 3282 nvgpu_err(g, 3283 "fail to commit gr ctx buffer"); 3284 goto out; 3285 } 3286#ifdef CONFIG_GK20A_CTXSW_TRACE 3287 if (g->ops.fecs_trace.bind_channel && !c->vpr) { 3288 err = g->ops.fecs_trace.bind_channel(g, c); 3289 if (err != 0) { 3290 nvgpu_warn(g, 3291 "fail to bind channel for ctxsw trace"); 3292 } 3293 } 3294#endif 3295 } 3296 3297 nvgpu_log_fn(g, "done"); 3298 return 0; 3299out: 3300 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping 3301 can be reused so no need to release them. 3302 2. golden image init and load is a one time thing so if 3303 they pass, no need to undo. */ 3304 nvgpu_err(g, "fail"); 3305 return err; 3306} 3307 3308static void gk20a_remove_gr_support(struct gr_gk20a *gr) 3309{ 3310 struct gk20a *g = gr->g; 3311 3312 nvgpu_log_fn(g, " "); 3313 3314 gr_gk20a_free_cyclestats_snapshot_data(g); 3315 3316 gr_gk20a_free_global_ctx_buffers(g); 3317 3318 nvgpu_dma_free(g, &gr->compbit_store.mem); 3319 3320 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); 3321 3322 nvgpu_kfree(g, gr->gpc_tpc_count); 3323 nvgpu_kfree(g, gr->gpc_zcb_count); 3324 nvgpu_kfree(g, gr->gpc_ppc_count); 3325 nvgpu_kfree(g, gr->pes_tpc_count[0]); 3326 nvgpu_kfree(g, gr->pes_tpc_count[1]); 3327 nvgpu_kfree(g, gr->pes_tpc_mask[0]); 3328 nvgpu_kfree(g, gr->pes_tpc_mask[1]); 3329 nvgpu_kfree(g, gr->sm_to_cluster); 3330 nvgpu_kfree(g, gr->gpc_skip_mask); 3331 nvgpu_kfree(g, gr->map_tiles); 3332 nvgpu_kfree(g, gr->fbp_rop_l2_en_mask); 3333 gr->gpc_tpc_count = NULL; 3334 gr->gpc_zcb_count = NULL; 3335 gr->gpc_ppc_count = NULL; 3336 gr->pes_tpc_count[0] = NULL; 3337 gr->pes_tpc_count[1] = NULL; 3338 gr->pes_tpc_mask[0] = NULL; 3339 gr->pes_tpc_mask[1] = NULL; 3340 gr->gpc_skip_mask = NULL; 3341 gr->map_tiles = NULL; 3342 gr->fbp_rop_l2_en_mask = NULL; 3343 3344 gr->ctx_vars.valid = false; 3345 nvgpu_kfree(g, gr->ctx_vars.ucode.fecs.inst.l); 3346 nvgpu_kfree(g, gr->ctx_vars.ucode.fecs.data.l); 3347 nvgpu_kfree(g, gr->ctx_vars.ucode.gpccs.inst.l); 3348 nvgpu_kfree(g, gr->ctx_vars.ucode.gpccs.data.l); 3349 nvgpu_kfree(g, gr->ctx_vars.sw_bundle_init.l); 3350 nvgpu_kfree(g, gr->ctx_vars.sw_veid_bundle_init.l); 3351 nvgpu_kfree(g, gr->ctx_vars.sw_method_init.l); 3352 nvgpu_kfree(g, gr->ctx_vars.sw_ctx_load.l); 3353 nvgpu_kfree(g, gr->ctx_vars.sw_non_ctx_load.l); 3354 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.sys.l); 3355 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.gpc.l); 3356 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.tpc.l); 3357 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.zcull_gpc.l); 3358 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.ppc.l); 3359 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_sys.l); 3360 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_gpc.l); 3361 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_tpc.l); 3362 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ppc.l); 3363 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.perf_sys.l); 3364 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.fbp.l); 3365 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.perf_gpc.l); 3366 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.fbp_router.l); 3367 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.gpc_router.l); 3368 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ltc.l); 3369 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_fbpa.l); 3370 nvgpu_kfree(g, gr->ctx_vars.sw_bundle64_init.l); 3371 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_cau.l); 3372 3373 nvgpu_vfree(g, gr->ctx_vars.local_golden_image); 3374 gr->ctx_vars.local_golden_image = NULL; 3375 3376 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map) { 3377 nvgpu_big_free(g, gr->ctx_vars.hwpm_ctxsw_buffer_offset_map); 3378 } 3379 gr->ctx_vars.hwpm_ctxsw_buffer_offset_map = NULL; 3380 3381 gk20a_comptag_allocator_destroy(g, &gr->comp_tags); 3382 3383 nvgpu_ecc_remove_support(g); 3384} 3385 3386static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) 3387{ 3388 u32 gpc_index, pes_index; 3389 u32 pes_tpc_mask; 3390 u32 pes_tpc_count; 3391 u32 pes_heavy_index; 3392 u32 gpc_new_skip_mask; 3393 u32 tmp; 3394 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 3395 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 3396 3397 tmp = gk20a_readl(g, pri_ringmaster_enum_fbp_r()); 3398 gr->num_fbps = pri_ringmaster_enum_fbp_count_v(tmp); 3399 3400 tmp = gk20a_readl(g, top_num_gpcs_r()); 3401 gr->max_gpc_count = top_num_gpcs_value_v(tmp); 3402 3403 tmp = gk20a_readl(g, top_num_fbps_r()); 3404 gr->max_fbps_count = top_num_fbps_value_v(tmp); 3405 3406 gr->fbp_en_mask = g->ops.gr.get_fbp_en_mask(g); 3407 3408 if (gr->fbp_rop_l2_en_mask == NULL) { 3409 gr->fbp_rop_l2_en_mask = 3410 nvgpu_kzalloc(g, gr->max_fbps_count * sizeof(u32)); 3411 if (gr->fbp_rop_l2_en_mask == NULL) { 3412 goto clean_up; 3413 } 3414 } else { 3415 memset(gr->fbp_rop_l2_en_mask, 0, gr->max_fbps_count * 3416 sizeof(u32)); 3417 } 3418 3419 tmp = gk20a_readl(g, top_tpc_per_gpc_r()); 3420 gr->max_tpc_per_gpc_count = top_tpc_per_gpc_value_v(tmp); 3421 3422 gr->max_tpc_count = gr->max_gpc_count * gr->max_tpc_per_gpc_count; 3423 3424 tmp = gk20a_readl(g, top_num_fbps_r()); 3425 gr->sys_count = top_num_fbps_value_v(tmp); 3426 3427 tmp = gk20a_readl(g, pri_ringmaster_enum_gpc_r()); 3428 gr->gpc_count = pri_ringmaster_enum_gpc_count_v(tmp); 3429 3430 gr->pe_count_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); 3431 if (WARN(gr->pe_count_per_gpc > GK20A_GR_MAX_PES_PER_GPC, 3432 "too many pes per gpc\n")) { 3433 goto clean_up; 3434 } 3435 3436 gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS); 3437 3438 if (gr->gpc_count == 0U) { 3439 nvgpu_err(g, "gpc_count==0!"); 3440 goto clean_up; 3441 } 3442 3443 if (gr->gpc_tpc_count == NULL) { 3444 gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * 3445 sizeof(u32)); 3446 } else { 3447 memset(gr->gpc_tpc_count, 0, gr->gpc_count * 3448 sizeof(u32)); 3449 } 3450 3451 if (gr->gpc_tpc_mask == NULL) { 3452 gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->max_gpc_count * 3453 sizeof(u32)); 3454 } else { 3455 memset(gr->gpc_tpc_mask, 0, gr->max_gpc_count * 3456 sizeof(u32)); 3457 } 3458 3459 if (gr->gpc_zcb_count == NULL) { 3460 gr->gpc_zcb_count = nvgpu_kzalloc(g, gr->gpc_count * 3461 sizeof(u32)); 3462 } else { 3463 memset(gr->gpc_zcb_count, 0, gr->gpc_count * 3464 sizeof(u32)); 3465 } 3466 3467 if (gr->gpc_ppc_count == NULL) { 3468 gr->gpc_ppc_count = nvgpu_kzalloc(g, gr->gpc_count * 3469 sizeof(u32)); 3470 } else { 3471 memset(gr->gpc_ppc_count, 0, gr->gpc_count * 3472 sizeof(u32)); 3473 } 3474 3475 if (gr->gpc_skip_mask == NULL) { 3476 gr->gpc_skip_mask = 3477 nvgpu_kzalloc(g, gr_pd_dist_skip_table__size_1_v() * 3478 4 * sizeof(u32)); 3479 } else { 3480 memset(gr->gpc_skip_mask, 0, gr_pd_dist_skip_table__size_1_v() * 3481 4 * sizeof(u32)); 3482 } 3483 3484 if ((gr->gpc_tpc_count == NULL) || (gr->gpc_tpc_mask == NULL) || 3485 (gr->gpc_zcb_count == NULL) || (gr->gpc_ppc_count == NULL) || 3486 (gr->gpc_skip_mask == NULL)) { 3487 goto clean_up; 3488 } 3489 3490 for (gpc_index = 0; gpc_index < gr->max_gpc_count; gpc_index++) { 3491 if (g->ops.gr.get_gpc_tpc_mask) { 3492 gr->gpc_tpc_mask[gpc_index] = 3493 g->ops.gr.get_gpc_tpc_mask(g, gpc_index); 3494 } 3495 } 3496 3497 gr->ppc_count = 0; 3498 gr->tpc_count = 0; 3499 gr->zcb_count = 0; 3500 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3501 tmp = gk20a_readl(g, gr_gpc0_fs_gpc_r() + 3502 gpc_stride * gpc_index); 3503 3504 gr->gpc_tpc_count[gpc_index] = 3505 gr_gpc0_fs_gpc_num_available_tpcs_v(tmp); 3506 gr->tpc_count += gr->gpc_tpc_count[gpc_index]; 3507 3508 gr->gpc_zcb_count[gpc_index] = 3509 gr_gpc0_fs_gpc_num_available_zculls_v(tmp); 3510 gr->zcb_count += gr->gpc_zcb_count[gpc_index]; 3511 3512 for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) { 3513 if (gr->pes_tpc_count[pes_index] == NULL) { 3514 gr->pes_tpc_count[pes_index] = 3515 nvgpu_kzalloc(g, gr->gpc_count * 3516 sizeof(u32)); 3517 gr->pes_tpc_mask[pes_index] = 3518 nvgpu_kzalloc(g, gr->gpc_count * 3519 sizeof(u32)); 3520 if ((gr->pes_tpc_count[pes_index] == NULL) || 3521 (gr->pes_tpc_mask[pes_index] == NULL)) { 3522 goto clean_up; 3523 } 3524 } 3525 3526 tmp = gk20a_readl(g, 3527 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(pes_index) + 3528 gpc_index * gpc_stride); 3529 3530 pes_tpc_mask = gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(tmp); 3531 pes_tpc_count = count_bits(pes_tpc_mask); 3532 3533 /* detect PES presence by seeing if there are 3534 * TPCs connected to it. 3535 */ 3536 if (pes_tpc_count != 0) { 3537 gr->gpc_ppc_count[gpc_index]++; 3538 } 3539 3540 gr->pes_tpc_count[pes_index][gpc_index] = pes_tpc_count; 3541 gr->pes_tpc_mask[pes_index][gpc_index] = pes_tpc_mask; 3542 } 3543 3544 gr->ppc_count += gr->gpc_ppc_count[gpc_index]; 3545 3546 gpc_new_skip_mask = 0; 3547 if (gr->pe_count_per_gpc > 1 && 3548 gr->pes_tpc_count[0][gpc_index] + 3549 gr->pes_tpc_count[1][gpc_index] == 5) { 3550 pes_heavy_index = 3551 gr->pes_tpc_count[0][gpc_index] > 3552 gr->pes_tpc_count[1][gpc_index] ? 0 : 1; 3553 3554 gpc_new_skip_mask = 3555 gr->pes_tpc_mask[pes_heavy_index][gpc_index] ^ 3556 (gr->pes_tpc_mask[pes_heavy_index][gpc_index] & 3557 (gr->pes_tpc_mask[pes_heavy_index][gpc_index] - 1)); 3558 3559 } else if (gr->pe_count_per_gpc > 1 && 3560 (gr->pes_tpc_count[0][gpc_index] + 3561 gr->pes_tpc_count[1][gpc_index] == 4) && 3562 (gr->pes_tpc_count[0][gpc_index] != 3563 gr->pes_tpc_count[1][gpc_index])) { 3564 pes_heavy_index = 3565 gr->pes_tpc_count[0][gpc_index] > 3566 gr->pes_tpc_count[1][gpc_index] ? 0 : 1; 3567 3568 gpc_new_skip_mask = 3569 gr->pes_tpc_mask[pes_heavy_index][gpc_index] ^ 3570 (gr->pes_tpc_mask[pes_heavy_index][gpc_index] & 3571 (gr->pes_tpc_mask[pes_heavy_index][gpc_index] - 1)); 3572 } 3573 gr->gpc_skip_mask[gpc_index] = gpc_new_skip_mask; 3574 } 3575 3576 /* allocate for max tpc per gpc */ 3577 if (gr->sm_to_cluster == NULL) { 3578 gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count * 3579 gr->max_tpc_per_gpc_count * 3580 sm_per_tpc * sizeof(struct sm_info)); 3581 if (!gr->sm_to_cluster) 3582 goto clean_up; 3583 } else { 3584 memset(gr->sm_to_cluster, 0, gr->gpc_count * 3585 gr->max_tpc_per_gpc_count * 3586 sm_per_tpc * sizeof(struct sm_info)); 3587 } 3588 gr->no_of_sm = 0; 3589 3590 nvgpu_log_info(g, "fbps: %d", gr->num_fbps); 3591 nvgpu_log_info(g, "max_gpc_count: %d", gr->max_gpc_count); 3592 nvgpu_log_info(g, "max_fbps_count: %d", gr->max_fbps_count); 3593 nvgpu_log_info(g, "max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count); 3594 nvgpu_log_info(g, "max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count); 3595 nvgpu_log_info(g, "max_tpc_count: %d", gr->max_tpc_count); 3596 nvgpu_log_info(g, "sys_count: %d", gr->sys_count); 3597 nvgpu_log_info(g, "gpc_count: %d", gr->gpc_count); 3598 nvgpu_log_info(g, "pe_count_per_gpc: %d", gr->pe_count_per_gpc); 3599 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count); 3600 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count); 3601 3602 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3603 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", 3604 gpc_index, gr->gpc_tpc_count[gpc_index]); 3605 } 3606 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3607 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", 3608 gpc_index, gr->gpc_zcb_count[gpc_index]); 3609 } 3610 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3611 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d", 3612 gpc_index, gr->gpc_ppc_count[gpc_index]); 3613 } 3614 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3615 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d", 3616 gpc_index, gr->gpc_skip_mask[gpc_index]); 3617 } 3618 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3619 for (pes_index = 0; 3620 pes_index < gr->pe_count_per_gpc; 3621 pes_index++) { 3622 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d", 3623 pes_index, gpc_index, 3624 gr->pes_tpc_count[pes_index][gpc_index]); 3625 } 3626 } 3627 3628 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3629 for (pes_index = 0; 3630 pes_index < gr->pe_count_per_gpc; 3631 pes_index++) { 3632 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d", 3633 pes_index, gpc_index, 3634 gr->pes_tpc_mask[pes_index][gpc_index]); 3635 } 3636 } 3637 3638 g->ops.gr.bundle_cb_defaults(g); 3639 g->ops.gr.cb_size_default(g); 3640 g->ops.gr.calc_global_ctx_buffer_size(g); 3641 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); 3642 3643 nvgpu_log_info(g, "bundle_cb_default_size: %d", 3644 gr->bundle_cb_default_size); 3645 nvgpu_log_info(g, "min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth); 3646 nvgpu_log_info(g, "bundle_cb_token_limit: %d", gr->bundle_cb_token_limit); 3647 nvgpu_log_info(g, "attrib_cb_default_size: %d", 3648 gr->attrib_cb_default_size); 3649 nvgpu_log_info(g, "attrib_cb_size: %d", gr->attrib_cb_size); 3650 nvgpu_log_info(g, "alpha_cb_default_size: %d", gr->alpha_cb_default_size); 3651 nvgpu_log_info(g, "alpha_cb_size: %d", gr->alpha_cb_size); 3652 nvgpu_log_info(g, "timeslice_mode: %d", gr->timeslice_mode); 3653 3654 return 0; 3655 3656clean_up: 3657 return -ENOMEM; 3658} 3659 3660static u32 prime_set[18] = { 3661 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 }; 3662 3663static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr) 3664{ 3665 s32 comm_denom; 3666 s32 mul_factor; 3667 s32 *init_frac = NULL; 3668 s32 *init_err = NULL; 3669 s32 *run_err = NULL; 3670 s32 *sorted_num_tpcs = NULL; 3671 s32 *sorted_to_unsorted_gpc_map = NULL; 3672 u32 gpc_index; 3673 u32 gpc_mark = 0; 3674 u32 num_tpc; 3675 u32 max_tpc_count = 0; 3676 u32 swap; 3677 u32 tile_count; 3678 u32 index; 3679 bool delete_map = false; 3680 bool gpc_sorted; 3681 int ret = 0; 3682 int num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); 3683 int num_tpc_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_TPC_PER_GPC); 3684 int map_tile_count = num_gpcs * num_tpc_per_gpc; 3685 3686 init_frac = nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); 3687 init_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); 3688 run_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); 3689 sorted_num_tpcs = 3690 nvgpu_kzalloc(g, num_gpcs * num_tpc_per_gpc * sizeof(s32)); 3691 sorted_to_unsorted_gpc_map = 3692 nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); 3693 3694 if (!((init_frac != NULL) && 3695 (init_err != NULL) && 3696 (run_err != NULL) && 3697 (sorted_num_tpcs != NULL) && 3698 (sorted_to_unsorted_gpc_map != NULL))) { 3699 ret = -ENOMEM; 3700 goto clean_up; 3701 } 3702 3703 gr->map_row_offset = INVALID_SCREEN_TILE_ROW_OFFSET; 3704 3705 if (gr->tpc_count == 3) { 3706 gr->map_row_offset = 2; 3707 } else if (gr->tpc_count < 3) { 3708 gr->map_row_offset = 1; 3709 } else { 3710 gr->map_row_offset = 3; 3711 3712 for (index = 1; index < 18; index++) { 3713 u32 prime = prime_set[index]; 3714 if ((gr->tpc_count % prime) != 0) { 3715 gr->map_row_offset = prime; 3716 break; 3717 } 3718 } 3719 } 3720 3721 switch (gr->tpc_count) { 3722 case 15: 3723 gr->map_row_offset = 6; 3724 break; 3725 case 14: 3726 gr->map_row_offset = 5; 3727 break; 3728 case 13: 3729 gr->map_row_offset = 2; 3730 break; 3731 case 11: 3732 gr->map_row_offset = 7; 3733 break; 3734 case 10: 3735 gr->map_row_offset = 6; 3736 break; 3737 case 7: 3738 case 5: 3739 gr->map_row_offset = 1; 3740 break; 3741 default: 3742 break; 3743 } 3744 3745 if (gr->map_tiles) { 3746 if (gr->map_tile_count != gr->tpc_count) { 3747 delete_map = true; 3748 } 3749 3750 for (tile_count = 0; tile_count < gr->map_tile_count; tile_count++) { 3751 if (gr_gk20a_get_map_tile_count(gr, tile_count) 3752 >= gr->tpc_count) { 3753 delete_map = true; 3754 } 3755 } 3756 3757 if (delete_map) { 3758 nvgpu_kfree(g, gr->map_tiles); 3759 gr->map_tiles = NULL; 3760 gr->map_tile_count = 0; 3761 } 3762 } 3763 3764 if (gr->map_tiles == NULL) { 3765 gr->map_tiles = nvgpu_kzalloc(g, map_tile_count * sizeof(u8)); 3766 if (gr->map_tiles == NULL) { 3767 ret = -ENOMEM; 3768 goto clean_up; 3769 } 3770 gr->map_tile_count = map_tile_count; 3771 3772 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3773 sorted_num_tpcs[gpc_index] = gr->gpc_tpc_count[gpc_index]; 3774 sorted_to_unsorted_gpc_map[gpc_index] = gpc_index; 3775 } 3776 3777 gpc_sorted = false; 3778 while (!gpc_sorted) { 3779 gpc_sorted = true; 3780 for (gpc_index = 0; gpc_index < gr->gpc_count - 1; gpc_index++) { 3781 if (sorted_num_tpcs[gpc_index + 1] > sorted_num_tpcs[gpc_index]) { 3782 gpc_sorted = false; 3783 swap = sorted_num_tpcs[gpc_index]; 3784 sorted_num_tpcs[gpc_index] = sorted_num_tpcs[gpc_index + 1]; 3785 sorted_num_tpcs[gpc_index + 1] = swap; 3786 swap = sorted_to_unsorted_gpc_map[gpc_index]; 3787 sorted_to_unsorted_gpc_map[gpc_index] = 3788 sorted_to_unsorted_gpc_map[gpc_index + 1]; 3789 sorted_to_unsorted_gpc_map[gpc_index + 1] = swap; 3790 } 3791 } 3792 } 3793 3794 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3795 if (gr->gpc_tpc_count[gpc_index] > max_tpc_count) { 3796 max_tpc_count = gr->gpc_tpc_count[gpc_index]; 3797 } 3798 } 3799 3800 mul_factor = gr->gpc_count * max_tpc_count; 3801 if (mul_factor & 0x1) { 3802 mul_factor = 2; 3803 } else { 3804 mul_factor = 1; 3805 } 3806 3807 comm_denom = gr->gpc_count * max_tpc_count * mul_factor; 3808 3809 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3810 num_tpc = sorted_num_tpcs[gpc_index]; 3811 3812 init_frac[gpc_index] = num_tpc * gr->gpc_count * mul_factor; 3813 3814 if (num_tpc != 0) { 3815 init_err[gpc_index] = gpc_index * max_tpc_count * mul_factor - comm_denom/2; 3816 } else { 3817 init_err[gpc_index] = 0; 3818 } 3819 3820 run_err[gpc_index] = init_frac[gpc_index] + init_err[gpc_index]; 3821 } 3822 3823 while (gpc_mark < gr->tpc_count) { 3824 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 3825 if ((run_err[gpc_index] * 2) >= comm_denom) { 3826 gr->map_tiles[gpc_mark++] = (u8)sorted_to_unsorted_gpc_map[gpc_index]; 3827 run_err[gpc_index] += init_frac[gpc_index] - comm_denom; 3828 } else { 3829 run_err[gpc_index] += init_frac[gpc_index]; 3830 } 3831 } 3832 } 3833 } 3834 3835clean_up: 3836 nvgpu_kfree(g, init_frac); 3837 nvgpu_kfree(g, init_err); 3838 nvgpu_kfree(g, run_err); 3839 nvgpu_kfree(g, sorted_num_tpcs); 3840 nvgpu_kfree(g, sorted_to_unsorted_gpc_map); 3841 3842 if (ret) { 3843 nvgpu_err(g, "fail"); 3844 } else { 3845 nvgpu_log_fn(g, "done"); 3846 } 3847 3848 return ret; 3849} 3850 3851static int gr_gk20a_init_zcull(struct gk20a *g, struct gr_gk20a *gr) 3852{ 3853 struct gr_zcull_gk20a *zcull = &gr->zcull; 3854 3855 zcull->aliquot_width = gr->tpc_count * 16; 3856 zcull->aliquot_height = 16; 3857 3858 zcull->width_align_pixels = gr->tpc_count * 16; 3859 zcull->height_align_pixels = 32; 3860 3861 zcull->aliquot_size = 3862 zcull->aliquot_width * zcull->aliquot_height; 3863 3864 /* assume no floor sweeping since we only have 1 tpc in 1 gpc */ 3865 zcull->pixel_squares_by_aliquots = 3866 gr->zcb_count * 16 * 16 * gr->tpc_count / 3867 (gr->gpc_count * gr->gpc_tpc_count[0]); 3868 3869 zcull->total_aliquots = 3870 gr_gpc0_zcull_total_ram_size_num_aliquots_f( 3871 gk20a_readl(g, gr_gpc0_zcull_total_ram_size_r())); 3872 3873 return 0; 3874} 3875 3876u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr) 3877{ 3878 /* assuming gr has already been initialized */ 3879 return gr->ctx_vars.zcull_ctxsw_image_size; 3880} 3881 3882int gr_gk20a_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, 3883 struct channel_gk20a *c, u64 zcull_va, u32 mode) 3884{ 3885 struct tsg_gk20a *tsg; 3886 struct zcull_ctx_desc *zcull_ctx; 3887 3888 tsg = tsg_gk20a_from_ch(c); 3889 if (tsg == NULL) { 3890 return -EINVAL; 3891 } 3892 3893 zcull_ctx = &tsg->gr_ctx.zcull_ctx; 3894 zcull_ctx->ctx_sw_mode = mode; 3895 zcull_ctx->gpu_va = zcull_va; 3896 3897 /* TBD: don't disable channel in sw method processing */ 3898 return gr_gk20a_ctx_zcull_setup(g, c); 3899} 3900 3901int gr_gk20a_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, 3902 struct gr_zcull_info *zcull_params) 3903{ 3904 struct gr_zcull_gk20a *zcull = &gr->zcull; 3905 3906 zcull_params->width_align_pixels = zcull->width_align_pixels; 3907 zcull_params->height_align_pixels = zcull->height_align_pixels; 3908 zcull_params->pixel_squares_by_aliquots = 3909 zcull->pixel_squares_by_aliquots; 3910 zcull_params->aliquot_total = zcull->total_aliquots; 3911 3912 zcull_params->region_byte_multiplier = 3913 gr->gpc_count * gr_zcull_bytes_per_aliquot_per_gpu_v(); 3914 zcull_params->region_header_size = 3915 nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS) * 3916 gr_zcull_save_restore_header_bytes_per_gpc_v(); 3917 3918 zcull_params->subregion_header_size = 3919 nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS) * 3920 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(); 3921 3922 zcull_params->subregion_width_align_pixels = 3923 gr->tpc_count * gr_gpc0_zcull_zcsize_width_subregion__multiple_v(); 3924 zcull_params->subregion_height_align_pixels = 3925 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(); 3926 zcull_params->subregion_count = gr_zcull_subregion_qty_v(); 3927 3928 return 0; 3929} 3930 3931int gr_gk20a_add_zbc_color(struct gk20a *g, struct gr_gk20a *gr, 3932 struct zbc_entry *color_val, u32 index) 3933{ 3934 u32 i; 3935 3936 /* update l2 table */ 3937 g->ops.ltc.set_zbc_color_entry(g, color_val, index); 3938 3939 /* update ds table */ 3940 gk20a_writel(g, gr_ds_zbc_color_r_r(), 3941 gr_ds_zbc_color_r_val_f(color_val->color_ds[0])); 3942 gk20a_writel(g, gr_ds_zbc_color_g_r(), 3943 gr_ds_zbc_color_g_val_f(color_val->color_ds[1])); 3944 gk20a_writel(g, gr_ds_zbc_color_b_r(), 3945 gr_ds_zbc_color_b_val_f(color_val->color_ds[2])); 3946 gk20a_writel(g, gr_ds_zbc_color_a_r(), 3947 gr_ds_zbc_color_a_val_f(color_val->color_ds[3])); 3948 3949 gk20a_writel(g, gr_ds_zbc_color_fmt_r(), 3950 gr_ds_zbc_color_fmt_val_f(color_val->format)); 3951 3952 gk20a_writel(g, gr_ds_zbc_tbl_index_r(), 3953 gr_ds_zbc_tbl_index_val_f(index + GK20A_STARTOF_ZBC_TABLE)); 3954 3955 /* trigger the write */ 3956 gk20a_writel(g, gr_ds_zbc_tbl_ld_r(), 3957 gr_ds_zbc_tbl_ld_select_c_f() | 3958 gr_ds_zbc_tbl_ld_action_write_f() | 3959 gr_ds_zbc_tbl_ld_trigger_active_f()); 3960 3961 /* update local copy */ 3962 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 3963 gr->zbc_col_tbl[index].color_l2[i] = color_val->color_l2[i]; 3964 gr->zbc_col_tbl[index].color_ds[i] = color_val->color_ds[i]; 3965 } 3966 gr->zbc_col_tbl[index].format = color_val->format; 3967 gr->zbc_col_tbl[index].ref_cnt++; 3968 3969 return 0; 3970} 3971 3972int gr_gk20a_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr, 3973 struct zbc_entry *depth_val, u32 index) 3974{ 3975 /* update l2 table */ 3976 g->ops.ltc.set_zbc_depth_entry(g, depth_val, index); 3977 3978 /* update ds table */ 3979 gk20a_writel(g, gr_ds_zbc_z_r(), 3980 gr_ds_zbc_z_val_f(depth_val->depth)); 3981 3982 gk20a_writel(g, gr_ds_zbc_z_fmt_r(), 3983 gr_ds_zbc_z_fmt_val_f(depth_val->format)); 3984 3985 gk20a_writel(g, gr_ds_zbc_tbl_index_r(), 3986 gr_ds_zbc_tbl_index_val_f(index + GK20A_STARTOF_ZBC_TABLE)); 3987 3988 /* trigger the write */ 3989 gk20a_writel(g, gr_ds_zbc_tbl_ld_r(), 3990 gr_ds_zbc_tbl_ld_select_z_f() | 3991 gr_ds_zbc_tbl_ld_action_write_f() | 3992 gr_ds_zbc_tbl_ld_trigger_active_f()); 3993 3994 /* update local copy */ 3995 gr->zbc_dep_tbl[index].depth = depth_val->depth; 3996 gr->zbc_dep_tbl[index].format = depth_val->format; 3997 gr->zbc_dep_tbl[index].ref_cnt++; 3998 3999 return 0; 4000} 4001 4002void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) 4003{ 4004 struct fifo_gk20a *f = &g->fifo; 4005 struct fifo_engine_info_gk20a *gr_info = NULL; 4006 u32 ret; 4007 u32 engine_id; 4008 4009 engine_id = gk20a_fifo_get_gr_engine_id(g); 4010 gr_info = (f->engine_info + engine_id); 4011 4012 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); 4013 if (ret) { 4014 nvgpu_err(g, 4015 "failed to disable gr engine activity"); 4016 return; 4017 } 4018 4019 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 4020 GR_IDLE_CHECK_DEFAULT); 4021 if (ret) { 4022 nvgpu_err(g, 4023 "failed to idle graphics"); 4024 goto clean_up; 4025 } 4026 4027 /* update zbc */ 4028 g->ops.gr.pmu_save_zbc(g, entries); 4029 4030clean_up: 4031 ret = gk20a_fifo_enable_engine_activity(g, gr_info); 4032 if (ret) { 4033 nvgpu_err(g, 4034 "failed to enable gr engine activity"); 4035 } 4036} 4037 4038int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, 4039 struct zbc_entry *zbc_val) 4040{ 4041 struct zbc_color_table *c_tbl; 4042 struct zbc_depth_table *d_tbl; 4043 u32 i; 4044 int ret = -ENOSPC; 4045 bool added = false; 4046 u32 entries; 4047 4048 /* no endian swap ? */ 4049 4050 nvgpu_mutex_acquire(&gr->zbc_lock); 4051 nvgpu_speculation_barrier(); 4052 switch (zbc_val->type) { 4053 case GK20A_ZBC_TYPE_COLOR: 4054 /* search existing tables */ 4055 for (i = 0; i < gr->max_used_color_index; i++) { 4056 4057 c_tbl = &gr->zbc_col_tbl[i]; 4058 4059 if ((c_tbl->ref_cnt != 0U) && 4060 (c_tbl->format == zbc_val->format) && 4061 (memcmp(c_tbl->color_ds, zbc_val->color_ds, 4062 sizeof(zbc_val->color_ds)) == 0) && 4063 (memcmp(c_tbl->color_l2, zbc_val->color_l2, 4064 sizeof(zbc_val->color_l2)) == 0)) { 4065 4066 added = true; 4067 c_tbl->ref_cnt++; 4068 ret = 0; 4069 break; 4070 } 4071 } 4072 /* add new table */ 4073 if (!added && 4074 gr->max_used_color_index < GK20A_ZBC_TABLE_SIZE) { 4075 4076 c_tbl = 4077 &gr->zbc_col_tbl[gr->max_used_color_index]; 4078 WARN_ON(c_tbl->ref_cnt != 0); 4079 4080 ret = g->ops.gr.add_zbc_color(g, gr, 4081 zbc_val, gr->max_used_color_index); 4082 4083 if (ret == 0) { 4084 gr->max_used_color_index++; 4085 } 4086 } 4087 break; 4088 case GK20A_ZBC_TYPE_DEPTH: 4089 /* search existing tables */ 4090 for (i = 0; i < gr->max_used_depth_index; i++) { 4091 4092 d_tbl = &gr->zbc_dep_tbl[i]; 4093 4094 if ((d_tbl->ref_cnt != 0U) && 4095 (d_tbl->depth == zbc_val->depth) && 4096 (d_tbl->format == zbc_val->format)) { 4097 added = true; 4098 d_tbl->ref_cnt++; 4099 ret = 0; 4100 break; 4101 } 4102 } 4103 /* add new table */ 4104 if (!added && 4105 gr->max_used_depth_index < GK20A_ZBC_TABLE_SIZE) { 4106 4107 d_tbl = 4108 &gr->zbc_dep_tbl[gr->max_used_depth_index]; 4109 WARN_ON(d_tbl->ref_cnt != 0); 4110 4111 ret = g->ops.gr.add_zbc_depth(g, gr, 4112 zbc_val, gr->max_used_depth_index); 4113 4114 if (ret == 0) { 4115 gr->max_used_depth_index++; 4116 } 4117 } 4118 break; 4119 case T19X_ZBC: 4120 if (g->ops.gr.add_zbc_type_s) { 4121 added = g->ops.gr.add_zbc_type_s(g, gr, zbc_val, &ret); 4122 } else { 4123 nvgpu_err(g, 4124 "invalid zbc table type %d", zbc_val->type); 4125 ret = -EINVAL; 4126 goto err_mutex; 4127 } 4128 break; 4129 default: 4130 nvgpu_err(g, 4131 "invalid zbc table type %d", zbc_val->type); 4132 ret = -EINVAL; 4133 goto err_mutex; 4134 } 4135 4136 if (!added && ret == 0) { 4137 /* update zbc for elpg only when new entry is added */ 4138 entries = max(gr->max_used_color_index, 4139 gr->max_used_depth_index); 4140 g->ops.gr.pmu_save_zbc(g, entries); 4141 } 4142 4143err_mutex: 4144 nvgpu_mutex_release(&gr->zbc_lock); 4145 return ret; 4146} 4147 4148/* get a zbc table entry specified by index 4149 * return table size when type is invalid */ 4150int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr, 4151 struct zbc_query_params *query_params) 4152{ 4153 u32 index = query_params->index_size; 4154 u32 i; 4155 4156 nvgpu_speculation_barrier(); 4157 switch (query_params->type) { 4158 case GK20A_ZBC_TYPE_INVALID: 4159 query_params->index_size = GK20A_ZBC_TABLE_SIZE; 4160 break; 4161 case GK20A_ZBC_TYPE_COLOR: 4162 if (index >= GK20A_ZBC_TABLE_SIZE) { 4163 nvgpu_err(g, 4164 "invalid zbc color table index"); 4165 return -EINVAL; 4166 } 4167 4168 nvgpu_speculation_barrier(); 4169 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 4170 query_params->color_l2[i] = 4171 gr->zbc_col_tbl[index].color_l2[i]; 4172 query_params->color_ds[i] = 4173 gr->zbc_col_tbl[index].color_ds[i]; 4174 } 4175 query_params->format = gr->zbc_col_tbl[index].format; 4176 query_params->ref_cnt = gr->zbc_col_tbl[index].ref_cnt; 4177 break; 4178 case GK20A_ZBC_TYPE_DEPTH: 4179 if (index >= GK20A_ZBC_TABLE_SIZE) { 4180 nvgpu_err(g, 4181 "invalid zbc depth table index"); 4182 return -EINVAL; 4183 } 4184 4185 nvgpu_speculation_barrier(); 4186 query_params->depth = gr->zbc_dep_tbl[index].depth; 4187 query_params->format = gr->zbc_dep_tbl[index].format; 4188 query_params->ref_cnt = gr->zbc_dep_tbl[index].ref_cnt; 4189 break; 4190 case T19X_ZBC: 4191 if (g->ops.gr.zbc_s_query_table) { 4192 return g->ops.gr.zbc_s_query_table(g, gr, 4193 query_params); 4194 } else { 4195 nvgpu_err(g, 4196 "invalid zbc table type"); 4197 return -EINVAL; 4198 } 4199 break; 4200 default: 4201 nvgpu_err(g, 4202 "invalid zbc table type"); 4203 return -EINVAL; 4204 } 4205 4206 return 0; 4207} 4208 4209static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) 4210{ 4211 unsigned int i; 4212 int ret; 4213 4214 for (i = 0; i < gr->max_used_color_index; i++) { 4215 struct zbc_color_table *c_tbl = &gr->zbc_col_tbl[i]; 4216 struct zbc_entry zbc_val; 4217 4218 zbc_val.type = GK20A_ZBC_TYPE_COLOR; 4219 memcpy(zbc_val.color_ds, 4220 c_tbl->color_ds, sizeof(zbc_val.color_ds)); 4221 memcpy(zbc_val.color_l2, 4222 c_tbl->color_l2, sizeof(zbc_val.color_l2)); 4223 zbc_val.format = c_tbl->format; 4224 4225 ret = g->ops.gr.add_zbc_color(g, gr, &zbc_val, i); 4226 4227 if (ret) { 4228 return ret; 4229 } 4230 } 4231 for (i = 0; i < gr->max_used_depth_index; i++) { 4232 struct zbc_depth_table *d_tbl = &gr->zbc_dep_tbl[i]; 4233 struct zbc_entry zbc_val; 4234 4235 zbc_val.type = GK20A_ZBC_TYPE_DEPTH; 4236 zbc_val.depth = d_tbl->depth; 4237 zbc_val.format = d_tbl->format; 4238 4239 ret = g->ops.gr.add_zbc_depth(g, gr, &zbc_val, i); 4240 if (ret) { 4241 return ret; 4242 } 4243 } 4244 4245 if (g->ops.gr.load_zbc_s_tbl) { 4246 ret = g->ops.gr.load_zbc_s_tbl(g, gr); 4247 if (ret) { 4248 return ret; 4249 } 4250 } 4251 4252 return 0; 4253} 4254 4255int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) 4256{ 4257 struct zbc_entry zbc_val; 4258 u32 i = 0; 4259 int err = 0; 4260 4261 err = nvgpu_mutex_init(&gr->zbc_lock); 4262 if (err != 0) { 4263 nvgpu_err(g, "Error in zbc_lock mutex initialization"); 4264 return err; 4265 } 4266 4267 /* load default color table */ 4268 zbc_val.type = GK20A_ZBC_TYPE_COLOR; 4269 4270 /* Opaque black (i.e. solid black, fmt 0x28 = A8B8G8R8) */ 4271 zbc_val.format = gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(); 4272 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 4273 zbc_val.color_ds[i] = 0; 4274 zbc_val.color_l2[i] = 0; 4275 } 4276 zbc_val.color_l2[0] = 0xff000000; 4277 zbc_val.color_ds[3] = 0x3f800000; 4278 err = gr_gk20a_add_zbc(g, gr, &zbc_val); 4279 if (err != 0) { 4280 goto color_fail; 4281 } 4282 4283 /* Transparent black = (fmt 1 = zero) */ 4284 zbc_val.format = gr_ds_zbc_color_fmt_val_zero_v(); 4285 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 4286 zbc_val.color_ds[i] = 0; 4287 zbc_val.color_l2[i] = 0; 4288 } 4289 err = gr_gk20a_add_zbc(g, gr, &zbc_val); 4290 if (err != 0) { 4291 goto color_fail; 4292 } 4293 4294 /* Opaque white (i.e. solid white) = (fmt 2 = uniform 1) */ 4295 zbc_val.format = gr_ds_zbc_color_fmt_val_unorm_one_v(); 4296 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 4297 zbc_val.color_ds[i] = 0x3f800000; 4298 zbc_val.color_l2[i] = 0xffffffff; 4299 } 4300 err = gr_gk20a_add_zbc(g, gr, &zbc_val); 4301 if (err != 0) { 4302 goto color_fail; 4303 } 4304 4305 gr->max_default_color_index = 3; 4306 4307 /* load default depth table */ 4308 zbc_val.type = GK20A_ZBC_TYPE_DEPTH; 4309 4310 zbc_val.format = gr_ds_zbc_z_fmt_val_fp32_v(); 4311 zbc_val.depth = 0x3f800000; 4312 err = gr_gk20a_add_zbc(g, gr, &zbc_val); 4313 if (err != 0) { 4314 goto depth_fail; 4315 } 4316 4317 zbc_val.format = gr_ds_zbc_z_fmt_val_fp32_v(); 4318 zbc_val.depth = 0; 4319 err = gr_gk20a_add_zbc(g, gr, &zbc_val); 4320 if (err != 0) { 4321 goto depth_fail; 4322 } 4323 4324 gr->max_default_depth_index = 2; 4325 4326 if (g->ops.gr.load_zbc_s_default_tbl) { 4327 err = g->ops.gr.load_zbc_s_default_tbl(g, gr); 4328 if (err != 0) { 4329 return err; 4330 } 4331 } 4332 4333 return 0; 4334 4335color_fail: 4336 nvgpu_err(g, "fail to load default zbc color table"); 4337 return err; 4338depth_fail: 4339 nvgpu_err(g, "fail to load default zbc depth table"); 4340 return err; 4341} 4342 4343int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, 4344 struct zbc_entry *zbc_val) 4345{ 4346 struct fifo_gk20a *f = &g->fifo; 4347 struct fifo_engine_info_gk20a *gr_info = NULL; 4348 int ret; 4349 u32 engine_id; 4350 4351 engine_id = gk20a_fifo_get_gr_engine_id(g); 4352 gr_info = (f->engine_info + engine_id); 4353 4354 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); 4355 if (ret) { 4356 nvgpu_err(g, 4357 "failed to disable gr engine activity"); 4358 return ret; 4359 } 4360 4361 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 4362 GR_IDLE_CHECK_DEFAULT); 4363 if (ret) { 4364 nvgpu_err(g, 4365 "failed to idle graphics"); 4366 goto clean_up; 4367 } 4368 4369 ret = gr_gk20a_add_zbc(g, gr, zbc_val); 4370 4371clean_up: 4372 if (gk20a_fifo_enable_engine_activity(g, gr_info)) { 4373 nvgpu_err(g, 4374 "failed to enable gr engine activity"); 4375 } 4376 4377 return ret; 4378} 4379 4380int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, 4381 struct zbc_entry *zbc_val) 4382{ 4383 nvgpu_log_fn(g, " "); 4384 4385 return gr_gk20a_elpg_protected_call(g, 4386 gr_gk20a_add_zbc(g, gr, zbc_val)); 4387} 4388 4389void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, 4390 u32 *zcull_map_tiles) 4391{ 4392 u32 val; 4393 4394 nvgpu_log_fn(g, " "); 4395 4396 if (zcull_num_entries >= 8) { 4397 nvgpu_log_fn(g, "map0"); 4398 val = 4399 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f( 4400 zcull_map_tiles[0]) | 4401 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_1_f( 4402 zcull_map_tiles[1]) | 4403 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_2_f( 4404 zcull_map_tiles[2]) | 4405 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_3_f( 4406 zcull_map_tiles[3]) | 4407 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_4_f( 4408 zcull_map_tiles[4]) | 4409 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_5_f( 4410 zcull_map_tiles[5]) | 4411 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_6_f( 4412 zcull_map_tiles[6]) | 4413 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_7_f( 4414 zcull_map_tiles[7]); 4415 4416 gk20a_writel(g, gr_gpcs_zcull_sm_in_gpc_number_map0_r(), val); 4417 } 4418 4419 if (zcull_num_entries >= 16) { 4420 nvgpu_log_fn(g, "map1"); 4421 val = 4422 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f( 4423 zcull_map_tiles[8]) | 4424 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_9_f( 4425 zcull_map_tiles[9]) | 4426 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_10_f( 4427 zcull_map_tiles[10]) | 4428 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_11_f( 4429 zcull_map_tiles[11]) | 4430 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_12_f( 4431 zcull_map_tiles[12]) | 4432 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_13_f( 4433 zcull_map_tiles[13]) | 4434 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_14_f( 4435 zcull_map_tiles[14]) | 4436 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_15_f( 4437 zcull_map_tiles[15]); 4438 4439 gk20a_writel(g, gr_gpcs_zcull_sm_in_gpc_number_map1_r(), val); 4440 } 4441 4442 if (zcull_num_entries >= 24) { 4443 nvgpu_log_fn(g, "map2"); 4444 val = 4445 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f( 4446 zcull_map_tiles[16]) | 4447 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_17_f( 4448 zcull_map_tiles[17]) | 4449 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_18_f( 4450 zcull_map_tiles[18]) | 4451 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_19_f( 4452 zcull_map_tiles[19]) | 4453 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_20_f( 4454 zcull_map_tiles[20]) | 4455 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_21_f( 4456 zcull_map_tiles[21]) | 4457 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_22_f( 4458 zcull_map_tiles[22]) | 4459 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_f( 4460 zcull_map_tiles[23]); 4461 4462 gk20a_writel(g, gr_gpcs_zcull_sm_in_gpc_number_map2_r(), val); 4463 } 4464 4465 if (zcull_num_entries >= 32) { 4466 nvgpu_log_fn(g, "map3"); 4467 val = 4468 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f( 4469 zcull_map_tiles[24]) | 4470 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_25_f( 4471 zcull_map_tiles[25]) | 4472 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_26_f( 4473 zcull_map_tiles[26]) | 4474 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_27_f( 4475 zcull_map_tiles[27]) | 4476 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_28_f( 4477 zcull_map_tiles[28]) | 4478 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_29_f( 4479 zcull_map_tiles[29]) | 4480 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_30_f( 4481 zcull_map_tiles[30]) | 4482 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_31_f( 4483 zcull_map_tiles[31]); 4484 4485 gk20a_writel(g, gr_gpcs_zcull_sm_in_gpc_number_map3_r(), val); 4486 } 4487 4488} 4489 4490static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) 4491{ 4492 u32 gpc_index, gpc_tpc_count, gpc_zcull_count; 4493 u32 *zcull_map_tiles, *zcull_bank_counters; 4494 u32 map_counter; 4495 u32 rcp_conserv; 4496 u32 offset; 4497 bool floorsweep = false; 4498 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 4499 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); 4500 u32 num_tpc_per_gpc = nvgpu_get_litter_value(g, 4501 GPU_LIT_NUM_TPC_PER_GPC); 4502 u32 zcull_alloc_num = num_gpcs * num_tpc_per_gpc; 4503 u32 map_tile_count; 4504 4505 if (gr->map_tiles == NULL) { 4506 return -1; 4507 } 4508 4509 if (zcull_alloc_num % 8 != 0) { 4510 /* Total 8 fields per map reg i.e. tile_0 to tile_7*/ 4511 zcull_alloc_num += (zcull_alloc_num % 8); 4512 } 4513 zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); 4514 4515 if (zcull_map_tiles == NULL) { 4516 nvgpu_err(g, 4517 "failed to allocate zcull map titles"); 4518 return -ENOMEM; 4519 } 4520 4521 zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); 4522 4523 if (zcull_bank_counters == NULL) { 4524 nvgpu_err(g, 4525 "failed to allocate zcull bank counters"); 4526 nvgpu_kfree(g, zcull_map_tiles); 4527 return -ENOMEM; 4528 } 4529 4530 for (map_counter = 0; map_counter < gr->tpc_count; map_counter++) { 4531 map_tile_count = gr_gk20a_get_map_tile_count(gr, map_counter); 4532 zcull_map_tiles[map_counter] = 4533 zcull_bank_counters[map_tile_count]; 4534 zcull_bank_counters[map_tile_count]++; 4535 } 4536 4537 if (g->ops.gr.program_zcull_mapping != NULL) { 4538 g->ops.gr.program_zcull_mapping(g, zcull_alloc_num, 4539 zcull_map_tiles); 4540 } 4541 4542 nvgpu_kfree(g, zcull_map_tiles); 4543 nvgpu_kfree(g, zcull_bank_counters); 4544 4545 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 4546 gpc_tpc_count = gr->gpc_tpc_count[gpc_index]; 4547 gpc_zcull_count = gr->gpc_zcb_count[gpc_index]; 4548 4549 if (gpc_zcull_count != gr->max_zcull_per_gpc_count && 4550 gpc_zcull_count < gpc_tpc_count) { 4551 nvgpu_err(g, 4552 "zcull_banks (%d) less than tpcs (%d) for gpc (%d)", 4553 gpc_zcull_count, gpc_tpc_count, gpc_index); 4554 return -EINVAL; 4555 } 4556 if (gpc_zcull_count != gr->max_zcull_per_gpc_count && 4557 gpc_zcull_count != 0) { 4558 floorsweep = true; 4559 } 4560 } 4561 4562 /* ceil(1.0f / SM_NUM * gr_gpc0_zcull_sm_num_rcp_conservative__max_v()) */ 4563 rcp_conserv = DIV_ROUND_UP(gr_gpc0_zcull_sm_num_rcp_conservative__max_v(), 4564 gr->gpc_tpc_count[0]); 4565 4566 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 4567 offset = gpc_index * gpc_stride; 4568 4569 if (floorsweep) { 4570 gk20a_writel(g, gr_gpc0_zcull_ram_addr_r() + offset, 4571 gr_gpc0_zcull_ram_addr_row_offset_f(gr->map_row_offset) | 4572 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f( 4573 gr->max_zcull_per_gpc_count)); 4574 } else { 4575 gk20a_writel(g, gr_gpc0_zcull_ram_addr_r() + offset, 4576 gr_gpc0_zcull_ram_addr_row_offset_f(gr->map_row_offset) | 4577 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f( 4578 gr->gpc_tpc_count[gpc_index])); 4579 } 4580 4581 gk20a_writel(g, gr_gpc0_zcull_fs_r() + offset, 4582 gr_gpc0_zcull_fs_num_active_banks_f(gr->gpc_zcb_count[gpc_index]) | 4583 gr_gpc0_zcull_fs_num_sms_f(gr->tpc_count)); 4584 4585 gk20a_writel(g, gr_gpc0_zcull_sm_num_rcp_r() + offset, 4586 gr_gpc0_zcull_sm_num_rcp_conservative_f(rcp_conserv)); 4587 } 4588 4589 gk20a_writel(g, gr_gpcs_ppcs_wwdx_sm_num_rcp_r(), 4590 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(rcp_conserv)); 4591 4592 return 0; 4593} 4594 4595void gk20a_gr_enable_exceptions(struct gk20a *g) 4596{ 4597 gk20a_writel(g, gr_exception_r(), 0xFFFFFFFF); 4598 gk20a_writel(g, gr_exception_en_r(), 0xFFFFFFFF); 4599 gk20a_writel(g, gr_exception1_r(), 0xFFFFFFFF); 4600 gk20a_writel(g, gr_exception1_en_r(), 0xFFFFFFFF); 4601 gk20a_writel(g, gr_exception2_r(), 0xFFFFFFFF); 4602 gk20a_writel(g, gr_exception2_en_r(), 0xFFFFFFFF); 4603} 4604 4605void gk20a_gr_enable_gpc_exceptions(struct gk20a *g) 4606{ 4607 struct gr_gk20a *gr = &g->gr; 4608 u32 tpc_mask; 4609 4610 gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), 4611 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f() | 4612 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f()); 4613 4614 tpc_mask = 4615 gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->max_tpc_per_gpc_count) - 1); 4616 4617 gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(), tpc_mask); 4618} 4619 4620 4621void gr_gk20a_enable_hww_exceptions(struct gk20a *g) 4622{ 4623 /* enable exceptions */ 4624 gk20a_writel(g, gr_fe_hww_esr_r(), 4625 gr_fe_hww_esr_en_enable_f() | 4626 gr_fe_hww_esr_reset_active_f()); 4627 gk20a_writel(g, gr_memfmt_hww_esr_r(), 4628 gr_memfmt_hww_esr_en_enable_f() | 4629 gr_memfmt_hww_esr_reset_active_f()); 4630} 4631 4632void gr_gk20a_fecs_host_int_enable(struct gk20a *g) 4633{ 4634 gk20a_writel(g, gr_fecs_host_int_enable_r(), 4635 gr_fecs_host_int_enable_ctxsw_intr1_enable_f() | 4636 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f() | 4637 gr_fecs_host_int_enable_umimp_firmware_method_enable_f() | 4638 gr_fecs_host_int_enable_umimp_illegal_method_enable_f() | 4639 gr_fecs_host_int_enable_watchdog_enable_f()); 4640} 4641 4642static int gk20a_init_gr_setup_hw(struct gk20a *g) 4643{ 4644 struct gr_gk20a *gr = &g->gr; 4645 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load; 4646 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 4647 u32 data; 4648 u32 last_method_data = 0; 4649 u32 i, err; 4650 4651 nvgpu_log_fn(g, " "); 4652 4653 if (g->ops.gr.init_gpc_mmu) { 4654 g->ops.gr.init_gpc_mmu(g); 4655 } 4656 4657 /* load gr floorsweeping registers */ 4658 data = gk20a_readl(g, gr_gpc0_ppc0_pes_vsc_strem_r()); 4659 data = set_field(data, gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(), 4660 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f()); 4661 gk20a_writel(g, gr_gpc0_ppc0_pes_vsc_strem_r(), data); 4662 4663 gr_gk20a_zcull_init_hw(g, gr); 4664 4665 if (g->ops.priv_ring.set_ppriv_timeout_settings != NULL) { 4666 g->ops.priv_ring.set_ppriv_timeout_settings(g); 4667 } 4668 4669 /* enable fifo access */ 4670 gk20a_writel(g, gr_gpfifo_ctl_r(), 4671 gr_gpfifo_ctl_access_enabled_f() | 4672 gr_gpfifo_ctl_semaphore_access_enabled_f()); 4673 4674 /* TBD: reload gr ucode when needed */ 4675 4676 /* enable interrupts */ 4677 gk20a_writel(g, gr_intr_r(), 0xFFFFFFFF); 4678 gk20a_writel(g, gr_intr_en_r(), 0xFFFFFFFF); 4679 4680 /* enable fecs error interrupts */ 4681 g->ops.gr.fecs_host_int_enable(g); 4682 4683 g->ops.gr.enable_hww_exceptions(g); 4684 g->ops.gr.set_hww_esr_report_mask(g); 4685 4686 /* enable TPC exceptions per GPC */ 4687 if (g->ops.gr.enable_gpc_exceptions) { 4688 g->ops.gr.enable_gpc_exceptions(g); 4689 } 4690 4691 /* enable ECC for L1/SM */ 4692 if (g->ops.gr.ecc_init_scrub_reg) { 4693 g->ops.gr.ecc_init_scrub_reg(g); 4694 } 4695 4696 /* TBD: enable per BE exceptions */ 4697 4698 /* reset and enable exceptions */ 4699 g->ops.gr.enable_exceptions(g); 4700 4701 gr_gk20a_load_zbc_table(g, gr); 4702 4703 if (g->ops.ltc.init_cbc) { 4704 g->ops.ltc.init_cbc(g, gr); 4705 } 4706 4707 if (g->ops.fb.init_cbc) { 4708 g->ops.fb.init_cbc(g, gr); 4709 } 4710 4711 /* load ctx init */ 4712 for (i = 0; i < sw_ctx_load->count; i++) { 4713 gk20a_writel(g, sw_ctx_load->l[i].addr, 4714 sw_ctx_load->l[i].value); 4715 } 4716 4717 if (g->ops.gr.disable_rd_coalesce) { 4718 g->ops.gr.disable_rd_coalesce(g); 4719 } 4720 4721 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4722 GR_IDLE_CHECK_DEFAULT); 4723 if (err != 0U) { 4724 goto out; 4725 } 4726 4727 if (g->ops.gr.init_preemption_state) { 4728 err = g->ops.gr.init_preemption_state(g); 4729 if (err != 0U) { 4730 goto out; 4731 } 4732 } 4733 4734 /* disable fe_go_idle */ 4735 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 4736 gr_fe_go_idle_timeout_count_disabled_f()); 4737 4738 /* override a few ctx state registers */ 4739 g->ops.gr.commit_global_timeslice(g, NULL); 4740 4741 /* floorsweep anything left */ 4742 err = g->ops.gr.init_fs_state(g); 4743 if (err != 0U) { 4744 goto out; 4745 } 4746 4747 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4748 GR_IDLE_CHECK_DEFAULT); 4749 if (err != 0U) { 4750 goto restore_fe_go_idle; 4751 } 4752 4753restore_fe_go_idle: 4754 /* restore fe_go_idle */ 4755 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 4756 gr_fe_go_idle_timeout_count_prod_f()); 4757 4758 if ((err != 0U) || (gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4759 GR_IDLE_CHECK_DEFAULT) != 0)) { 4760 goto out; 4761 } 4762 4763 /* load method init */ 4764 if (sw_method_init->count) { 4765 gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(), 4766 sw_method_init->l[0].value); 4767 gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(), 4768 gr_pri_mme_shadow_raw_index_write_trigger_f() | 4769 sw_method_init->l[0].addr); 4770 last_method_data = sw_method_init->l[0].value; 4771 } 4772 for (i = 1; i < sw_method_init->count; i++) { 4773 if (sw_method_init->l[i].value != last_method_data) { 4774 gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(), 4775 sw_method_init->l[i].value); 4776 last_method_data = sw_method_init->l[i].value; 4777 } 4778 gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(), 4779 gr_pri_mme_shadow_raw_index_write_trigger_f() | 4780 sw_method_init->l[i].addr); 4781 } 4782 4783 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4784 GR_IDLE_CHECK_DEFAULT); 4785out: 4786 nvgpu_log_fn(g, "done"); 4787 return err; 4788} 4789 4790static int gk20a_init_gr_prepare(struct gk20a *g) 4791{ 4792 u32 err = 0; 4793 4794 /* reset gr engine */ 4795 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_GRAPH) | 4796 g->ops.mc.reset_mask(g, NVGPU_UNIT_BLG) | 4797 g->ops.mc.reset_mask(g, NVGPU_UNIT_PERFMON)); 4798 4799 nvgpu_cg_init_gr_load_gating_prod(g); 4800 4801 /* Disable elcg until it gets enabled later in the init*/ 4802 nvgpu_cg_elcg_disable_no_wait(g); 4803 4804 /* enable fifo access */ 4805 gk20a_writel(g, gr_gpfifo_ctl_r(), 4806 gr_gpfifo_ctl_access_enabled_f() | 4807 gr_gpfifo_ctl_semaphore_access_enabled_f()); 4808 4809 if (!g->gr.ctx_vars.valid) { 4810 err = gr_gk20a_init_ctx_vars(g, &g->gr); 4811 if (err != 0U) { 4812 nvgpu_err(g, 4813 "fail to load gr init ctx"); 4814 } 4815 } 4816 return err; 4817} 4818 4819static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) 4820{ 4821 struct nvgpu_timeout timeout; 4822 bool fecs_scrubbing; 4823 bool gpccs_scrubbing; 4824 4825 nvgpu_log_fn(g, " "); 4826 4827 nvgpu_timeout_init(g, &timeout, 4828 CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 4829 CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT, 4830 NVGPU_TIMER_RETRY_TIMER); 4831 do { 4832 fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) & 4833 (gr_fecs_dmactl_imem_scrubbing_m() | 4834 gr_fecs_dmactl_dmem_scrubbing_m()); 4835 4836 gpccs_scrubbing = gk20a_readl(g, gr_gpccs_dmactl_r()) & 4837 (gr_gpccs_dmactl_imem_scrubbing_m() | 4838 gr_gpccs_dmactl_imem_scrubbing_m()); 4839 4840 if (!fecs_scrubbing && !gpccs_scrubbing) { 4841 nvgpu_log_fn(g, "done"); 4842 return 0; 4843 } 4844 4845 nvgpu_udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); 4846 } while (nvgpu_timeout_expired(&timeout) == 0); 4847 4848 nvgpu_err(g, "Falcon mem scrubbing timeout"); 4849 return -ETIMEDOUT; 4850} 4851 4852static int gr_gk20a_init_ctxsw(struct gk20a *g) 4853{ 4854 u32 err = 0; 4855 4856 err = g->ops.gr.load_ctxsw_ucode(g); 4857 if (err != 0U) { 4858 goto out; 4859 } 4860 4861 err = gr_gk20a_wait_ctxsw_ready(g); 4862 if (err != 0U) { 4863 goto out; 4864 } 4865 4866out: 4867 if (err != 0U) { 4868 nvgpu_err(g, "fail"); 4869 } else { 4870 nvgpu_log_fn(g, "done"); 4871 } 4872 4873 return err; 4874} 4875 4876static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) 4877{ 4878 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; 4879 u32 i, err = 0; 4880 4881 nvgpu_log_fn(g, " "); 4882 4883 /* enable interrupts */ 4884 gk20a_writel(g, gr_intr_r(), ~0); 4885 gk20a_writel(g, gr_intr_en_r(), ~0); 4886 4887 /* load non_ctx init */ 4888 for (i = 0; i < sw_non_ctx_load->count; i++) { 4889 gk20a_writel(g, sw_non_ctx_load->l[i].addr, 4890 sw_non_ctx_load->l[i].value); 4891 } 4892 4893 err = gr_gk20a_wait_mem_scrubbing(g); 4894 if (err != 0U) { 4895 goto out; 4896 } 4897 4898 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4899 GR_IDLE_CHECK_DEFAULT); 4900 if (err != 0U) { 4901 goto out; 4902 } 4903 4904out: 4905 if (err != 0U) { 4906 nvgpu_err(g, "fail"); 4907 } else { 4908 nvgpu_log_fn(g, "done"); 4909 } 4910 4911 return 0; 4912} 4913 4914static int gr_gk20a_init_access_map(struct gk20a *g) 4915{ 4916 struct gr_gk20a *gr = &g->gr; 4917 struct nvgpu_mem *mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; 4918 u32 nr_pages = 4919 DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size, 4920 PAGE_SIZE); 4921 u32 *whitelist = NULL; 4922 int w, num_entries = 0; 4923 4924 nvgpu_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages); 4925 4926 g->ops.gr.get_access_map(g, &whitelist, &num_entries); 4927 4928 for (w = 0; w < num_entries; w++) { 4929 u32 map_bit, map_byte, map_shift, x; 4930 map_bit = whitelist[w] >> 2; 4931 map_byte = map_bit >> 3; 4932 map_shift = map_bit & 0x7; /* i.e. 0-7 */ 4933 nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d", 4934 whitelist[w], map_byte, map_shift); 4935 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32)); 4936 x |= 1 << ( 4937 (map_byte % sizeof(u32) * BITS_PER_BYTE) 4938 + map_shift); 4939 nvgpu_mem_wr32(g, mem, map_byte / sizeof(u32), x); 4940 } 4941 4942 return 0; 4943} 4944 4945static int gk20a_init_gr_setup_sw(struct gk20a *g) 4946{ 4947 struct gr_gk20a *gr = &g->gr; 4948 int err = 0; 4949 4950 nvgpu_log_fn(g, " "); 4951 4952 if (gr->sw_ready) { 4953 nvgpu_log_fn(g, "skip init"); 4954 return 0; 4955 } 4956 4957 gr->g = g; 4958 4959#if defined(CONFIG_GK20A_CYCLE_STATS) 4960 err = nvgpu_mutex_init(&g->gr.cs_lock); 4961 if (err != 0) { 4962 nvgpu_err(g, "Error in gr.cs_lock mutex initialization"); 4963 return err; 4964 } 4965#endif 4966 4967 err = gr_gk20a_init_gr_config(g, gr); 4968 if (err != 0) { 4969 goto clean_up; 4970 } 4971 4972 err = gr_gk20a_init_map_tiles(g, gr); 4973 if (err != 0) { 4974 goto clean_up; 4975 } 4976 4977 if (g->ops.ltc.init_comptags) { 4978 err = g->ops.ltc.init_comptags(g, gr); 4979 if (err != 0) { 4980 goto clean_up; 4981 } 4982 } 4983 4984 err = gr_gk20a_init_zcull(g, gr); 4985 if (err != 0) { 4986 goto clean_up; 4987 } 4988 4989 err = g->ops.gr.alloc_global_ctx_buffers(g); 4990 if (err != 0) { 4991 goto clean_up; 4992 } 4993 4994 err = gr_gk20a_init_access_map(g); 4995 if (err != 0) { 4996 goto clean_up; 4997 } 4998 4999 gr_gk20a_load_zbc_default_table(g, gr); 5000 5001 if (g->ops.gr.init_czf_bypass) { 5002 g->ops.gr.init_czf_bypass(g); 5003 } 5004 5005 if (g->ops.gr.init_gfxp_wfi_timeout_count) { 5006 g->ops.gr.init_gfxp_wfi_timeout_count(g); 5007 } 5008 5009 err = nvgpu_mutex_init(&gr->ctx_mutex); 5010 if (err != 0) { 5011 nvgpu_err(g, "Error in gr.ctx_mutex initialization"); 5012 goto clean_up; 5013 } 5014 5015 nvgpu_spinlock_init(&gr->ch_tlb_lock); 5016 5017 gr->remove_support = gk20a_remove_gr_support; 5018 gr->sw_ready = true; 5019 5020 err = nvgpu_ecc_init_support(g); 5021 if (err != 0) { 5022 goto clean_up; 5023 } 5024 5025 nvgpu_log_fn(g, "done"); 5026 return 0; 5027 5028clean_up: 5029 nvgpu_err(g, "fail"); 5030 gk20a_remove_gr_support(gr); 5031 return err; 5032} 5033 5034static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) 5035{ 5036 struct nvgpu_pmu *pmu = &g->pmu; 5037 struct mm_gk20a *mm = &g->mm; 5038 struct vm_gk20a *vm = mm->pmu.vm; 5039 int err = 0; 5040 5041 u32 size; 5042 5043 nvgpu_log_fn(g, " "); 5044 5045 size = 0; 5046 5047 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 5048 if (err != 0) { 5049 nvgpu_err(g, 5050 "fail to query fecs pg buffer size"); 5051 return err; 5052 } 5053 5054 if (pmu->pg_buf.cpu_va == NULL) { 5055 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); 5056 if (err != 0) { 5057 nvgpu_err(g, "failed to allocate memory"); 5058 return -ENOMEM; 5059 } 5060 } 5061 5062 5063 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block); 5064 if (err != 0) { 5065 nvgpu_err(g, 5066 "fail to bind pmu inst to gr"); 5067 return err; 5068 } 5069 5070 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va); 5071 if (err != 0) { 5072 nvgpu_err(g, 5073 "fail to set pg buffer pmu va"); 5074 return err; 5075 } 5076 5077 return err; 5078} 5079 5080int gk20a_init_gr_support(struct gk20a *g) 5081{ 5082 int err = 0; 5083 5084 nvgpu_log_fn(g, " "); 5085 5086 g->gr.initialized = false; 5087 5088 /* this is required before gr_gk20a_init_ctx_state */ 5089 err = nvgpu_mutex_init(&g->gr.fecs_mutex); 5090 if (err != 0) { 5091 nvgpu_err(g, "Error in gr.fecs_mutex initialization"); 5092 return err; 5093 } 5094 5095 err = gr_gk20a_init_ctxsw(g); 5096 if (err != 0) { 5097 return err; 5098 } 5099 5100 /* this appears query for sw states but fecs actually init 5101 ramchain, etc so this is hw init */ 5102 err = g->ops.gr.init_ctx_state(g); 5103 if (err != 0) { 5104 return err; 5105 } 5106 5107 err = gk20a_init_gr_setup_sw(g); 5108 if (err != 0) { 5109 return err; 5110 } 5111 5112 err = gk20a_init_gr_setup_hw(g); 5113 if (err != 0) { 5114 return err; 5115 } 5116 5117 if (g->can_elpg) { 5118 err = gk20a_init_gr_bind_fecs_elpg(g); 5119 if (err != 0) { 5120 return err; 5121 } 5122 } 5123 5124 /* GR is inialized, signal possible waiters */ 5125 g->gr.initialized = true; 5126 nvgpu_cond_signal(&g->gr.init_wq); 5127 5128 return 0; 5129} 5130 5131/* Wait until GR is initialized */ 5132void gk20a_gr_wait_initialized(struct gk20a *g) 5133{ 5134 NVGPU_COND_WAIT(&g->gr.init_wq, g->gr.initialized, 0); 5135} 5136 5137#define NVA297_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc 5138#define NVA297_SET_CIRCULAR_BUFFER_SIZE 0x1280 5139#define NVA297_SET_SHADER_EXCEPTIONS 0x1528 5140#define NVA0C0_SET_SHADER_EXCEPTIONS 0x1528 5141 5142#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0 5143 5144void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) 5145{ 5146 nvgpu_log_fn(g, " "); 5147 5148 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { 5149 gk20a_writel(g, 5150 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(), 0); 5151 gk20a_writel(g, 5152 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r(), 0); 5153 } else { 5154 /* setup sm warp esr report masks */ 5155 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(), 5156 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_error_report_f() | 5157 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_api_stack_error_report_f() | 5158 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_ret_empty_stack_error_report_f() | 5159 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_wrap_report_f() | 5160 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_pc_report_f() | 5161 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_overflow_report_f() | 5162 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_immc_addr_report_f() | 5163 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_reg_report_f() | 5164 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_encoding_report_f() | 5165 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_sph_instr_combo_report_f() | 5166 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param_report_f() | 5167 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_report_f() | 5168 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_reg_report_f() | 5169 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_addr_report_f() | 5170 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_addr_report_f() | 5171 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_addr_space_report_f() | 5172 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param2_report_f() | 5173 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f() | 5174 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_geometry_sm_error_report_f() | 5175 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_divergent_report_f()); 5176 5177 /* setup sm global esr report mask */ 5178 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r(), 5179 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_sm_to_sm_fault_report_f() | 5180 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_l1_error_report_f() | 5181 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f() | 5182 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_physical_stack_overflow_error_report_f() | 5183 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_int_report_f() | 5184 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_pause_report_f() | 5185 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_single_step_complete_report_f()); 5186 } 5187} 5188 5189int gk20a_enable_gr_hw(struct gk20a *g) 5190{ 5191 int err; 5192 5193 nvgpu_log_fn(g, " "); 5194 5195 err = gk20a_init_gr_prepare(g); 5196 if (err != 0) { 5197 return err; 5198 } 5199 5200 err = gk20a_init_gr_reset_enable_hw(g); 5201 if (err != 0) { 5202 return err; 5203 } 5204 5205 nvgpu_log_fn(g, "done"); 5206 5207 return 0; 5208} 5209 5210int gk20a_gr_reset(struct gk20a *g) 5211{ 5212 int err; 5213 u32 size; 5214 5215 g->gr.initialized = false; 5216 5217 nvgpu_mutex_acquire(&g->gr.fecs_mutex); 5218 5219 err = gk20a_enable_gr_hw(g); 5220 if (err != 0) { 5221 nvgpu_mutex_release(&g->gr.fecs_mutex); 5222 return err; 5223 } 5224 5225 err = gk20a_init_gr_setup_hw(g); 5226 if (err != 0) { 5227 nvgpu_mutex_release(&g->gr.fecs_mutex); 5228 return err; 5229 } 5230 5231 err = gr_gk20a_init_ctxsw(g); 5232 if (err != 0) { 5233 nvgpu_mutex_release(&g->gr.fecs_mutex); 5234 return err; 5235 } 5236 5237 nvgpu_mutex_release(&g->gr.fecs_mutex); 5238 5239 /* this appears query for sw states but fecs actually init 5240 ramchain, etc so this is hw init */ 5241 err = g->ops.gr.init_ctx_state(g); 5242 if (err != 0) { 5243 return err; 5244 } 5245 5246 size = 0; 5247 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 5248 if (err != 0) { 5249 nvgpu_err(g, 5250 "fail to query fecs pg buffer size"); 5251 return err; 5252 } 5253 5254 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block); 5255 if (err != 0) { 5256 nvgpu_err(g, 5257 "fail to bind pmu inst to gr"); 5258 return err; 5259 } 5260 5261 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va); 5262 if (err != 0) { 5263 nvgpu_err(g, 5264 "fail to set pg buffer pmu va"); 5265 return err; 5266 } 5267 5268 nvgpu_cg_init_gr_load_gating_prod(g); 5269 nvgpu_cg_elcg_enable_no_wait(g); 5270 5271 /* GR is inialized, signal possible waiters */ 5272 g->gr.initialized = true; 5273 nvgpu_cond_signal(&g->gr.init_wq); 5274 5275 return err; 5276} 5277 5278static void gk20a_gr_set_error_notifier(struct gk20a *g, 5279 struct gr_gk20a_isr_data *isr_data, u32 error_notifier) 5280{ 5281 struct channel_gk20a *ch; 5282 struct tsg_gk20a *tsg; 5283 struct channel_gk20a *ch_tsg; 5284 5285 ch = isr_data->ch; 5286 5287 if (ch == NULL) { 5288 return; 5289 } 5290 5291 tsg = tsg_gk20a_from_ch(ch); 5292 if (tsg != NULL) { 5293 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 5294 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, 5295 channel_gk20a, ch_entry) { 5296 if (gk20a_channel_get(ch_tsg)) { 5297 g->ops.fifo.set_error_notifier(ch_tsg, 5298 error_notifier); 5299 gk20a_channel_put(ch_tsg); 5300 } 5301 5302 } 5303 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 5304 } else { 5305 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); 5306 } 5307} 5308 5309static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, 5310 struct gr_gk20a_isr_data *isr_data) 5311{ 5312 nvgpu_log_fn(g, " "); 5313 gk20a_gr_set_error_notifier(g, isr_data, 5314 NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); 5315 nvgpu_err(g, 5316 "gr semaphore timeout"); 5317 return -EINVAL; 5318} 5319 5320static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, 5321 struct gr_gk20a_isr_data *isr_data) 5322{ 5323 nvgpu_log_fn(g, " "); 5324 gk20a_gr_set_error_notifier(g, isr_data, 5325 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); 5326 /* This is an unrecoverable error, reset is needed */ 5327 nvgpu_err(g, 5328 "gr semaphore timeout"); 5329 return -EINVAL; 5330} 5331 5332static int gk20a_gr_handle_illegal_method(struct gk20a *g, 5333 struct gr_gk20a_isr_data *isr_data) 5334{ 5335 int ret = g->ops.gr.handle_sw_method(g, isr_data->addr, 5336 isr_data->class_num, isr_data->offset, 5337 isr_data->data_lo); 5338 if (ret) { 5339 gk20a_gr_set_error_notifier(g, isr_data, 5340 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); 5341 nvgpu_err(g, "invalid method class 0x%08x" 5342 ", offset 0x%08x address 0x%08x", 5343 isr_data->class_num, isr_data->offset, isr_data->addr); 5344 } 5345 return ret; 5346} 5347 5348static int gk20a_gr_handle_illegal_class(struct gk20a *g, 5349 struct gr_gk20a_isr_data *isr_data) 5350{ 5351 nvgpu_log_fn(g, " "); 5352 gk20a_gr_set_error_notifier(g, isr_data, 5353 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5354 nvgpu_err(g, 5355 "invalid class 0x%08x, offset 0x%08x", 5356 isr_data->class_num, isr_data->offset); 5357 return -EINVAL; 5358} 5359 5360int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, 5361 struct gr_gk20a_isr_data *isr_data) 5362{ 5363 u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); 5364 int ret = 0; 5365 u32 chid = isr_data->ch != NULL ? 5366 isr_data->ch->chid : FIFO_INVAL_CHANNEL_ID; 5367 5368 if (gr_fecs_intr == 0U) { 5369 return 0; 5370 } 5371 5372 if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) { 5373 gk20a_gr_set_error_notifier(g, isr_data, 5374 NVGPU_ERR_NOTIFIER_FECS_ERR_UNIMP_FIRMWARE_METHOD); 5375 nvgpu_err(g, 5376 "firmware method error 0x%08x for offset 0x%04x", 5377 gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)), 5378 isr_data->data_lo); 5379 ret = -1; 5380 } else if ((gr_fecs_intr & 5381 gr_fecs_host_int_status_watchdog_active_f()) != 0U) { 5382 /* currently, recovery is not initiated */ 5383 nvgpu_err(g, "fecs watchdog triggered for channel %u", chid); 5384 gk20a_fecs_dump_falcon_stats(g); 5385 gk20a_gpccs_dump_falcon_stats(g); 5386 gk20a_gr_debug_dump(g); 5387 } else if ((gr_fecs_intr & 5388 gr_fecs_host_int_status_ctxsw_intr_f(CTXSW_INTR0)) != 0U) { 5389 u32 mailbox_value = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)); 5390 5391 if (mailbox_value == MAILBOX_VALUE_TIMESTAMP_BUFFER_FULL) { 5392 nvgpu_info(g, "ctxsw intr0 set by ucode, " 5393 "timestamp buffer full"); 5394#ifdef CONFIG_GK20A_CTXSW_TRACE 5395 gk20a_fecs_trace_reset_buffer(g); 5396#else 5397 ret = -1; 5398#endif 5399 } else { 5400 nvgpu_err(g, 5401 "ctxsw intr0 set by ucode, error_code: 0x%08x", 5402 mailbox_value); 5403 ret = -1; 5404 } 5405 } else { 5406 nvgpu_err(g, 5407 "unhandled fecs error interrupt 0x%08x for channel %u", 5408 gr_fecs_intr, chid); 5409 gk20a_fecs_dump_falcon_stats(g); 5410 gk20a_gpccs_dump_falcon_stats(g); 5411 } 5412 5413 gk20a_writel(g, gr_fecs_host_int_clear_r(), gr_fecs_intr); 5414 return ret; 5415} 5416 5417static int gk20a_gr_handle_class_error(struct gk20a *g, 5418 struct gr_gk20a_isr_data *isr_data) 5419{ 5420 u32 gr_class_error; 5421 u32 chid = isr_data->ch != NULL ? 5422 isr_data->ch->chid : FIFO_INVAL_CHANNEL_ID; 5423 5424 nvgpu_log_fn(g, " "); 5425 5426 gr_class_error = 5427 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); 5428 gk20a_gr_set_error_notifier(g, isr_data, 5429 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5430 nvgpu_err(g, "class error 0x%08x, offset 0x%08x," 5431 "sub channel 0x%08x mme generated %d," 5432 " mme pc 0x%08xdata high %d priv status %d" 5433 " unhandled intr 0x%08x for channel %u", 5434 isr_data->class_num, (isr_data->offset << 2), 5435 gr_trapped_addr_subch_v(isr_data->addr), 5436 gr_trapped_addr_mme_generated_v(isr_data->addr), 5437 gr_trapped_data_mme_pc_v( 5438 gk20a_readl(g, gr_trapped_data_mme_r())), 5439 gr_trapped_addr_datahigh_v(isr_data->addr), 5440 gr_trapped_addr_priv_v(isr_data->addr), 5441 gr_class_error, chid); 5442 5443 nvgpu_err(g, "trapped data low 0x%08x", 5444 gk20a_readl(g, gr_trapped_data_lo_r())); 5445 if (gr_trapped_addr_datahigh_v(isr_data->addr)) { 5446 nvgpu_err(g, "trapped data high 0x%08x", 5447 gk20a_readl(g, gr_trapped_data_hi_r())); 5448 } 5449 5450 return -EINVAL; 5451} 5452 5453static int gk20a_gr_handle_firmware_method(struct gk20a *g, 5454 struct gr_gk20a_isr_data *isr_data) 5455{ 5456 u32 chid = isr_data->ch != NULL ? 5457 isr_data->ch->chid : FIFO_INVAL_CHANNEL_ID; 5458 5459 nvgpu_log_fn(g, " "); 5460 5461 gk20a_gr_set_error_notifier(g, isr_data, 5462 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); 5463 nvgpu_err(g, 5464 "firmware method 0x%08x, offset 0x%08x for channel %u", 5465 isr_data->class_num, isr_data->offset, 5466 chid); 5467 return -EINVAL; 5468} 5469 5470int gk20a_gr_handle_semaphore_pending(struct gk20a *g, 5471 struct gr_gk20a_isr_data *isr_data) 5472{ 5473 struct channel_gk20a *ch = isr_data->ch; 5474 struct tsg_gk20a *tsg; 5475 5476 if (ch == NULL) { 5477 return 0; 5478 } 5479 5480 tsg = tsg_gk20a_from_ch(ch); 5481 if (tsg != NULL) { 5482 g->ops.fifo.post_event_id(tsg, 5483 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN); 5484 5485 nvgpu_cond_broadcast(&ch->semaphore_wq); 5486 } else { 5487 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); 5488 } 5489 5490 return 0; 5491} 5492 5493#if defined(CONFIG_GK20A_CYCLE_STATS) 5494static inline bool is_valid_cyclestats_bar0_offset_gk20a(struct gk20a *g, 5495 u32 offset) 5496{ 5497 /* support only 24-bit 4-byte aligned offsets */ 5498 bool valid = !(offset & 0xFF000003); 5499 5500 if (g->allow_all) 5501 return true; 5502 5503 /* whitelist check */ 5504 valid = valid && 5505 is_bar0_global_offset_whitelisted_gk20a(g, offset); 5506 /* resource size check in case there was a problem 5507 * with allocating the assumed size of bar0 */ 5508 valid = valid && gk20a_io_valid_reg(g, offset); 5509 return valid; 5510} 5511#endif 5512 5513int gk20a_gr_handle_notify_pending(struct gk20a *g, 5514 struct gr_gk20a_isr_data *isr_data) 5515{ 5516 struct channel_gk20a *ch = isr_data->ch; 5517 5518#if defined(CONFIG_GK20A_CYCLE_STATS) 5519 void *virtual_address; 5520 u32 buffer_size; 5521 u32 offset; 5522 bool exit; 5523#endif 5524 if (ch == NULL || tsg_gk20a_from_ch(ch) == NULL) { 5525 return 0; 5526 } 5527 5528#if defined(CONFIG_GK20A_CYCLE_STATS) 5529 /* GL will never use payload 0 for cycle state */ 5530 if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) 5531 return 0; 5532 5533 nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex); 5534 5535 virtual_address = ch->cyclestate.cyclestate_buffer; 5536 buffer_size = ch->cyclestate.cyclestate_buffer_size; 5537 offset = isr_data->data_lo; 5538 exit = false; 5539 while (!exit) { 5540 struct share_buffer_head *sh_hdr; 5541 u32 min_element_size; 5542 5543 /* validate offset */ 5544 if (offset + sizeof(struct share_buffer_head) > buffer_size || 5545 offset + sizeof(struct share_buffer_head) < offset) { 5546 nvgpu_err(g, 5547 "cyclestats buffer overrun at offset 0x%x", 5548 offset); 5549 break; 5550 } 5551 5552 sh_hdr = (struct share_buffer_head *) 5553 ((char *)virtual_address + offset); 5554 5555 min_element_size = 5556 (sh_hdr->operation == OP_END ? 5557 sizeof(struct share_buffer_head) : 5558 sizeof(struct gk20a_cyclestate_buffer_elem)); 5559 5560 /* validate sh_hdr->size */ 5561 if (sh_hdr->size < min_element_size || 5562 offset + sh_hdr->size > buffer_size || 5563 offset + sh_hdr->size < offset) { 5564 nvgpu_err(g, 5565 "bad cyclestate buffer header size at offset 0x%x", 5566 offset); 5567 sh_hdr->failed = true; 5568 break; 5569 } 5570 5571 switch (sh_hdr->operation) { 5572 case OP_END: 5573 exit = true; 5574 break; 5575 5576 case BAR0_READ32: 5577 case BAR0_WRITE32: 5578 { 5579 struct gk20a_cyclestate_buffer_elem *op_elem = 5580 (struct gk20a_cyclestate_buffer_elem *)sh_hdr; 5581 bool valid = is_valid_cyclestats_bar0_offset_gk20a( 5582 g, op_elem->offset_bar0); 5583 u32 raw_reg; 5584 u64 mask_orig; 5585 u64 v; 5586 5587 if (!valid) { 5588 nvgpu_err(g, 5589 "invalid cycletstats op offset: 0x%x", 5590 op_elem->offset_bar0); 5591 5592 sh_hdr->failed = exit = true; 5593 break; 5594 } 5595 5596 5597 mask_orig = 5598 ((1ULL << 5599 (op_elem->last_bit + 1)) 5600 -1)&~((1ULL << 5601 op_elem->first_bit)-1); 5602 5603 raw_reg = 5604 gk20a_readl(g, 5605 op_elem->offset_bar0); 5606 5607 switch (sh_hdr->operation) { 5608 case BAR0_READ32: 5609 op_elem->data = 5610 (raw_reg & mask_orig) 5611 >> op_elem->first_bit; 5612 break; 5613 5614 case BAR0_WRITE32: 5615 v = 0; 5616 if ((unsigned int)mask_orig != 5617 (unsigned int)~0) { 5618 v = (unsigned int) 5619 (raw_reg & ~mask_orig); 5620 } 5621 5622 v |= ((op_elem->data 5623 << op_elem->first_bit) 5624 & mask_orig); 5625 5626 gk20a_writel(g, 5627 op_elem->offset_bar0, 5628 (unsigned int)v); 5629 break; 5630 default: 5631 /* nop ok?*/ 5632 break; 5633 } 5634 } 5635 break; 5636 5637 default: 5638 /* no operation content case */ 5639 exit = true; 5640 break; 5641 } 5642 sh_hdr->completed = true; 5643 offset += sh_hdr->size; 5644 } 5645 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); 5646#endif 5647 nvgpu_log_fn(g, " "); 5648 nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); 5649 return 0; 5650} 5651 5652/* Used by sw interrupt thread to translate current ctx to chid. 5653 * Also used by regops to translate current ctx to chid and tsgid. 5654 * For performance, we don't want to go through 128 channels every time. 5655 * curr_ctx should be the value read from gr_fecs_current_ctx_r(). 5656 * A small tlb is used here to cache translation. 5657 * 5658 * Returned channel must be freed with gk20a_channel_put() */ 5659static struct channel_gk20a *gk20a_gr_get_channel_from_ctx( 5660 struct gk20a *g, u32 curr_ctx, u32 *curr_tsgid) 5661{ 5662 struct fifo_gk20a *f = &g->fifo; 5663 struct gr_gk20a *gr = &g->gr; 5664 u32 chid = -1; 5665 u32 tsgid = NVGPU_INVALID_TSG_ID; 5666 u32 i; 5667 struct channel_gk20a *ret = NULL; 5668 5669 /* when contexts are unloaded from GR, the valid bit is reset 5670 * but the instance pointer information remains intact. 5671 * This might be called from gr_isr where contexts might be 5672 * unloaded. No need to check ctx_valid bit 5673 */ 5674 5675 nvgpu_spinlock_acquire(&gr->ch_tlb_lock); 5676 5677 /* check cache first */ 5678 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { 5679 if (gr->chid_tlb[i].curr_ctx == curr_ctx) { 5680 chid = gr->chid_tlb[i].chid; 5681 tsgid = gr->chid_tlb[i].tsgid; 5682 ret = gk20a_channel_from_id(g, chid); 5683 goto unlock; 5684 } 5685 } 5686 5687 /* slow path */ 5688 for (chid = 0; chid < f->num_channels; chid++) { 5689 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); 5690 5691 if (ch == NULL) { 5692 continue; 5693 } 5694 5695 if ((u32)(nvgpu_inst_block_addr(g, &ch->inst_block) >> 5696 ram_in_base_shift_v()) == 5697 gr_fecs_current_ctx_ptr_v(curr_ctx)) { 5698 tsgid = ch->tsgid; 5699 /* found it */ 5700 ret = ch; 5701 break; 5702 } 5703 gk20a_channel_put(ch); 5704 } 5705 5706 if (ret == NULL) { 5707 goto unlock; 5708 } 5709 5710 /* add to free tlb entry */ 5711 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { 5712 if (gr->chid_tlb[i].curr_ctx == 0) { 5713 gr->chid_tlb[i].curr_ctx = curr_ctx; 5714 gr->chid_tlb[i].chid = chid; 5715 gr->chid_tlb[i].tsgid = tsgid; 5716 goto unlock; 5717 } 5718 } 5719 5720 /* no free entry, flush one */ 5721 gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx; 5722 gr->chid_tlb[gr->channel_tlb_flush_index].chid = chid; 5723 gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid; 5724 5725 gr->channel_tlb_flush_index = 5726 (gr->channel_tlb_flush_index + 1) & 5727 (GR_CHANNEL_MAP_TLB_SIZE - 1); 5728 5729unlock: 5730 nvgpu_spinlock_release(&gr->ch_tlb_lock); 5731 if (curr_tsgid) { 5732 *curr_tsgid = tsgid; 5733 } 5734 return ret; 5735} 5736 5737int gk20a_gr_lock_down_sm(struct gk20a *g, 5738 u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask, 5739 bool check_errors) 5740{ 5741 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 5742 u32 dbgr_control0; 5743 5744 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5745 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); 5746 5747 /* assert stop trigger */ 5748 dbgr_control0 = 5749 gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 5750 dbgr_control0 |= gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(); 5751 gk20a_writel(g, 5752 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); 5753 5754 return g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, global_esr_mask, 5755 check_errors); 5756} 5757 5758bool gk20a_gr_sm_debugger_attached(struct gk20a *g) 5759{ 5760 u32 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r()); 5761 5762 /* check if an sm debugger is attached. 5763 * assumption: all SMs will have debug mode enabled/disabled 5764 * uniformly. */ 5765 if (gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(dbgr_control0) == 5766 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v()) { 5767 return true; 5768 } 5769 5770 return false; 5771} 5772 5773int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 5774 bool *post_event, struct channel_gk20a *fault_ch, 5775 u32 *hww_global_esr) 5776{ 5777 int ret = 0; 5778 bool do_warp_sync = false, early_exit = false, ignore_debugger = false; 5779 bool disable_sm_exceptions = true; 5780 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 5781 bool sm_debugger_attached; 5782 u32 global_esr, warp_esr, global_mask; 5783 5784 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); 5785 5786 sm_debugger_attached = g->ops.gr.sm_debugger_attached(g); 5787 5788 global_esr = g->ops.gr.get_sm_hww_global_esr(g, gpc, tpc, sm); 5789 *hww_global_esr = global_esr; 5790 warp_esr = g->ops.gr.get_sm_hww_warp_esr(g, gpc, tpc, sm); 5791 global_mask = g->ops.gr.get_sm_no_lock_down_hww_global_esr_mask(g); 5792 5793 if (!sm_debugger_attached) { 5794 nvgpu_err(g, "sm hww global 0x%08x warp 0x%08x", 5795 global_esr, warp_esr); 5796 return -EFAULT; 5797 } 5798 5799 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5800 "sm hww global 0x%08x warp 0x%08x", global_esr, warp_esr); 5801 5802 gr_gk20a_elpg_protected_call(g, 5803 g->ops.gr.record_sm_error_state(g, gpc, tpc, sm, fault_ch)); 5804 5805 if (g->ops.gr.pre_process_sm_exception) { 5806 ret = g->ops.gr.pre_process_sm_exception(g, gpc, tpc, sm, 5807 global_esr, warp_esr, 5808 sm_debugger_attached, 5809 fault_ch, 5810 &early_exit, 5811 &ignore_debugger); 5812 if (ret) { 5813 nvgpu_err(g, "could not pre-process sm error!"); 5814 return ret; 5815 } 5816 } 5817 5818 if (early_exit) { 5819 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5820 "returning early"); 5821 return ret; 5822 } 5823 5824 /* 5825 * Disable forwarding of tpc exceptions, 5826 * the debugger will reenable exceptions after servicing them. 5827 * 5828 * Do not disable exceptions if the only SM exception is BPT_INT 5829 */ 5830 if ((global_esr == gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) 5831 && (warp_esr == 0)) { 5832 disable_sm_exceptions = false; 5833 } 5834 5835 if (!ignore_debugger && disable_sm_exceptions) { 5836 u32 tpc_exception_en = gk20a_readl(g, 5837 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + 5838 offset); 5839 tpc_exception_en &= ~gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(); 5840 gk20a_writel(g, 5841 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset, 5842 tpc_exception_en); 5843 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM Exceptions disabled"); 5844 } 5845 5846 /* if a debugger is present and an error has occurred, do a warp sync */ 5847 if (!ignore_debugger && 5848 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { 5849 nvgpu_log(g, gpu_dbg_intr, "warp sync needed"); 5850 do_warp_sync = true; 5851 } 5852 5853 if (do_warp_sync) { 5854 ret = g->ops.gr.lock_down_sm(g, gpc, tpc, sm, 5855 global_mask, true); 5856 if (ret) { 5857 nvgpu_err(g, "sm did not lock down!"); 5858 return ret; 5859 } 5860 } 5861 5862 if (ignore_debugger) { 5863 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5864 "ignore_debugger set, skipping event posting"); 5865 } else { 5866 *post_event = true; 5867 } 5868 5869 return ret; 5870} 5871 5872int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, 5873 bool *post_event) 5874{ 5875 int ret = 0; 5876 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 5877 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 5878 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; 5879 u32 esr; 5880 5881 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); 5882 5883 esr = gk20a_readl(g, 5884 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); 5885 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); 5886 5887 gk20a_writel(g, 5888 gr_gpc0_tpc0_tex_m_hww_esr_r() + offset, 5889 esr); 5890 5891 return ret; 5892} 5893 5894void gk20a_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, 5895 u32 *esr_sm_sel) 5896{ 5897 *esr_sm_sel = 1; 5898} 5899 5900static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, 5901 bool *post_event, struct channel_gk20a *fault_ch, 5902 u32 *hww_global_esr) 5903{ 5904 int ret = 0; 5905 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 5906 u32 tpc_exception = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_r() 5907 + offset); 5908 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 5909 5910 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5911 "GPC%d TPC%d: pending exception 0x%x", 5912 gpc, tpc, tpc_exception); 5913 5914 /* check if an sm exeption is pending */ 5915 if (gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(tpc_exception) == 5916 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { 5917 u32 esr_sm_sel, sm; 5918 5919 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5920 "GPC%d TPC%d: SM exception pending", gpc, tpc); 5921 5922 if (g->ops.gr.handle_tpc_sm_ecc_exception) { 5923 g->ops.gr.handle_tpc_sm_ecc_exception(g, gpc, tpc, 5924 post_event, fault_ch, hww_global_esr); 5925 } 5926 5927 g->ops.gr.get_esr_sm_sel(g, gpc, tpc, &esr_sm_sel); 5928 5929 for (sm = 0; sm < sm_per_tpc; sm++) { 5930 5931 if ((esr_sm_sel & BIT32(sm)) == 0U) { 5932 continue; 5933 } 5934 5935 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5936 "GPC%d TPC%d: SM%d exception pending", 5937 gpc, tpc, sm); 5938 5939 ret |= g->ops.gr.handle_sm_exception(g, 5940 gpc, tpc, sm, post_event, fault_ch, 5941 hww_global_esr); 5942 /* clear the hwws, also causes tpc and gpc 5943 * exceptions to be cleared. Should be cleared 5944 * only if SM is locked down or empty. 5945 */ 5946 g->ops.gr.clear_sm_hww(g, 5947 gpc, tpc, sm, *hww_global_esr); 5948 5949 } 5950 5951 } 5952 5953 /* check if a tex exeption is pending */ 5954 if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) == 5955 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) { 5956 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5957 "GPC%d TPC%d: TEX exception pending", gpc, tpc); 5958 ret |= g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event); 5959 } 5960 5961 if (g->ops.gr.handle_tpc_mpc_exception) { 5962 ret |= g->ops.gr.handle_tpc_mpc_exception(g, 5963 gpc, tpc, post_event); 5964 } 5965 5966 return ret; 5967} 5968 5969static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, 5970 struct channel_gk20a *fault_ch, u32 *hww_global_esr) 5971{ 5972 int ret = 0; 5973 u32 gpc_offset, gpc, tpc; 5974 struct gr_gk20a *gr = &g->gr; 5975 u32 exception1 = gk20a_readl(g, gr_exception1_r()); 5976 u32 gpc_exception; 5977 5978 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, " "); 5979 5980 for (gpc = 0; gpc < gr->gpc_count; gpc++) { 5981 if ((exception1 & (1 << gpc)) == 0) { 5982 continue; 5983 } 5984 5985 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 5986 "GPC%d exception pending", gpc); 5987 5988 gpc_offset = gk20a_gr_gpc_offset(g, gpc); 5989 5990 gpc_exception = gk20a_readl(g, gr_gpc0_gpccs_gpc_exception_r() 5991 + gpc_offset); 5992 5993 /* check if any tpc has an exception */ 5994 for (tpc = 0; tpc < gr->gpc_tpc_count[gpc]; tpc++) { 5995 if ((gr_gpc0_gpccs_gpc_exception_tpc_v(gpc_exception) & 5996 (1 << tpc)) == 0) { 5997 continue; 5998 } 5999 6000 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 6001 "GPC%d: TPC%d exception pending", gpc, tpc); 6002 6003 ret |= gk20a_gr_handle_tpc_exception(g, gpc, tpc, 6004 post_event, fault_ch, hww_global_esr); 6005 6006 } 6007 6008 /* Handle GCC exception */ 6009 if ((gr_gpc0_gpccs_gpc_exception_gcc_v(gpc_exception) != 0U) && 6010 (g->ops.gr.handle_gcc_exception != NULL)) { 6011 int gcc_ret = 0; 6012 gcc_ret = g->ops.gr.handle_gcc_exception(g, gpc, tpc, 6013 post_event, fault_ch, hww_global_esr); 6014 ret |= (ret != 0) ? ret : gcc_ret; 6015 } 6016 6017 /* Handle GPCCS exceptions */ 6018 if (g->ops.gr.handle_gpc_gpccs_exception) { 6019 int ret_ecc = 0; 6020 ret_ecc = g->ops.gr.handle_gpc_gpccs_exception(g, gpc, 6021 gpc_exception); 6022 ret |= (ret != 0) ? ret : ret_ecc; 6023 } 6024 6025 /* Handle GPCMMU exceptions */ 6026 if (g->ops.gr.handle_gpc_gpcmmu_exception) { 6027 int ret_mmu = 0; 6028 6029 ret_mmu = g->ops.gr.handle_gpc_gpcmmu_exception(g, gpc, 6030 gpc_exception); 6031 ret |= (ret != 0) ? ret : ret_mmu; 6032 } 6033 6034 } 6035 6036 return ret; 6037} 6038 6039static int gk20a_gr_post_bpt_events(struct gk20a *g, struct tsg_gk20a *tsg, 6040 u32 global_esr) 6041{ 6042 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) { 6043 g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_INT); 6044 } 6045 6046 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) { 6047 g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_PAUSE); 6048 } 6049 6050 return 0; 6051} 6052 6053int gk20a_gr_isr(struct gk20a *g) 6054{ 6055 struct gr_gk20a_isr_data isr_data; 6056 u32 grfifo_ctl; 6057 u32 obj_table; 6058 bool need_reset = false; 6059 u32 gr_intr = gk20a_readl(g, gr_intr_r()); 6060 struct channel_gk20a *ch = NULL; 6061 struct channel_gk20a *fault_ch = NULL; 6062 u32 tsgid = NVGPU_INVALID_TSG_ID; 6063 struct tsg_gk20a *tsg = NULL; 6064 u32 gr_engine_id; 6065 u32 global_esr = 0; 6066 u32 chid; 6067 6068 nvgpu_log_fn(g, " "); 6069 nvgpu_log(g, gpu_dbg_intr, "pgraph intr 0x%08x", gr_intr); 6070 6071 if (gr_intr == 0U) { 6072 return 0; 6073 } 6074 6075 gr_engine_id = gk20a_fifo_get_gr_engine_id(g); 6076 if (gr_engine_id != FIFO_INVAL_ENGINE_ID) { 6077 gr_engine_id = BIT(gr_engine_id); 6078 } 6079 6080 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r()); 6081 grfifo_ctl &= ~gr_gpfifo_ctl_semaphore_access_f(1); 6082 grfifo_ctl &= ~gr_gpfifo_ctl_access_f(1); 6083 6084 gk20a_writel(g, gr_gpfifo_ctl_r(), 6085 grfifo_ctl | gr_gpfifo_ctl_access_f(0) | 6086 gr_gpfifo_ctl_semaphore_access_f(0)); 6087 6088 isr_data.addr = gk20a_readl(g, gr_trapped_addr_r()); 6089 isr_data.data_lo = gk20a_readl(g, gr_trapped_data_lo_r()); 6090 isr_data.data_hi = gk20a_readl(g, gr_trapped_data_hi_r()); 6091 isr_data.curr_ctx = gk20a_readl(g, gr_fecs_current_ctx_r()); 6092 isr_data.offset = gr_trapped_addr_mthd_v(isr_data.addr); 6093 isr_data.sub_chan = gr_trapped_addr_subch_v(isr_data.addr); 6094 obj_table = (isr_data.sub_chan < 4) ? gk20a_readl(g, 6095 gr_fe_object_table_r(isr_data.sub_chan)) : 0; 6096 isr_data.class_num = gr_fe_object_table_nvclass_v(obj_table); 6097 6098 ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid); 6099 isr_data.ch = ch; 6100 chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID; 6101 6102 if (ch == NULL) { 6103 nvgpu_err(g, "pgraph intr: 0x%08x, chid: INVALID", gr_intr); 6104 } else { 6105 tsg = tsg_gk20a_from_ch(ch); 6106 if (tsg == NULL) { 6107 nvgpu_err(g, "pgraph intr: 0x%08x, chid: %d " 6108 "not bound to tsg", gr_intr, chid); 6109 } 6110 } 6111 6112 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 6113 "channel %d: addr 0x%08x, " 6114 "data 0x%08x 0x%08x," 6115 "ctx 0x%08x, offset 0x%08x, " 6116 "subchannel 0x%08x, class 0x%08x", 6117 chid, isr_data.addr, 6118 isr_data.data_hi, isr_data.data_lo, 6119 isr_data.curr_ctx, isr_data.offset, 6120 isr_data.sub_chan, isr_data.class_num); 6121 6122 if (gr_intr & gr_intr_notify_pending_f()) { 6123 g->ops.gr.handle_notify_pending(g, &isr_data); 6124 gk20a_writel(g, gr_intr_r(), 6125 gr_intr_notify_reset_f()); 6126 gr_intr &= ~gr_intr_notify_pending_f(); 6127 } 6128 6129 if (gr_intr & gr_intr_semaphore_pending_f()) { 6130 g->ops.gr.handle_semaphore_pending(g, &isr_data); 6131 gk20a_writel(g, gr_intr_r(), 6132 gr_intr_semaphore_reset_f()); 6133 gr_intr &= ~gr_intr_semaphore_pending_f(); 6134 } 6135 6136 if (gr_intr & gr_intr_semaphore_timeout_pending_f()) { 6137 if (gk20a_gr_handle_semaphore_timeout_pending(g, 6138 &isr_data) != 0) { 6139 need_reset = true; 6140 } 6141 gk20a_writel(g, gr_intr_r(), 6142 gr_intr_semaphore_reset_f()); 6143 gr_intr &= ~gr_intr_semaphore_pending_f(); 6144 } 6145 6146 if (gr_intr & gr_intr_illegal_notify_pending_f()) { 6147 if (gk20a_gr_intr_illegal_notify_pending(g, 6148 &isr_data) != 0) { 6149 need_reset = true; 6150 } 6151 gk20a_writel(g, gr_intr_r(), 6152 gr_intr_illegal_notify_reset_f()); 6153 gr_intr &= ~gr_intr_illegal_notify_pending_f(); 6154 } 6155 6156 if (gr_intr & gr_intr_illegal_method_pending_f()) { 6157 if (gk20a_gr_handle_illegal_method(g, &isr_data) != 0) { 6158 need_reset = true; 6159 } 6160 gk20a_writel(g, gr_intr_r(), 6161 gr_intr_illegal_method_reset_f()); 6162 gr_intr &= ~gr_intr_illegal_method_pending_f(); 6163 } 6164 6165 if (gr_intr & gr_intr_illegal_class_pending_f()) { 6166 if (gk20a_gr_handle_illegal_class(g, &isr_data) != 0) { 6167 need_reset = true; 6168 } 6169 gk20a_writel(g, gr_intr_r(), 6170 gr_intr_illegal_class_reset_f()); 6171 gr_intr &= ~gr_intr_illegal_class_pending_f(); 6172 } 6173 6174 if (gr_intr & gr_intr_fecs_error_pending_f()) { 6175 if (g->ops.gr.handle_fecs_error(g, ch, &isr_data) != 0) { 6176 need_reset = true; 6177 } 6178 gk20a_writel(g, gr_intr_r(), 6179 gr_intr_fecs_error_reset_f()); 6180 gr_intr &= ~gr_intr_fecs_error_pending_f(); 6181 } 6182 6183 if (gr_intr & gr_intr_class_error_pending_f()) { 6184 if (gk20a_gr_handle_class_error(g, &isr_data) != 0) { 6185 need_reset = true; 6186 } 6187 gk20a_writel(g, gr_intr_r(), 6188 gr_intr_class_error_reset_f()); 6189 gr_intr &= ~gr_intr_class_error_pending_f(); 6190 } 6191 6192 /* this one happens if someone tries to hit a non-whitelisted 6193 * register using set_falcon[4] */ 6194 if (gr_intr & gr_intr_firmware_method_pending_f()) { 6195 if (gk20a_gr_handle_firmware_method(g, &isr_data) != 0) { 6196 need_reset = true; 6197 } 6198 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n"); 6199 gk20a_writel(g, gr_intr_r(), 6200 gr_intr_firmware_method_reset_f()); 6201 gr_intr &= ~gr_intr_firmware_method_pending_f(); 6202 } 6203 6204 if (gr_intr & gr_intr_exception_pending_f()) { 6205 u32 exception = gk20a_readl(g, gr_exception_r()); 6206 6207 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); 6208 6209 if (exception & gr_exception_fe_m()) { 6210 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); 6211 u32 info = gk20a_readl(g, gr_fe_hww_esr_info_r()); 6212 6213 nvgpu_err(g, "fe exception: esr 0x%08x, info 0x%08x", 6214 fe, info); 6215 gk20a_writel(g, gr_fe_hww_esr_r(), 6216 gr_fe_hww_esr_reset_active_f()); 6217 need_reset = true; 6218 } 6219 6220 if (exception & gr_exception_memfmt_m()) { 6221 u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); 6222 6223 nvgpu_err(g, "memfmt exception: esr %08x", memfmt); 6224 gk20a_writel(g, gr_memfmt_hww_esr_r(), 6225 gr_memfmt_hww_esr_reset_active_f()); 6226 need_reset = true; 6227 } 6228 6229 if (exception & gr_exception_pd_m()) { 6230 u32 pd = gk20a_readl(g, gr_pd_hww_esr_r()); 6231 6232 nvgpu_err(g, "pd exception: esr 0x%08x", pd); 6233 gk20a_writel(g, gr_pd_hww_esr_r(), 6234 gr_pd_hww_esr_reset_active_f()); 6235 need_reset = true; 6236 } 6237 6238 if (exception & gr_exception_scc_m()) { 6239 u32 scc = gk20a_readl(g, gr_scc_hww_esr_r()); 6240 6241 nvgpu_err(g, "scc exception: esr 0x%08x", scc); 6242 gk20a_writel(g, gr_scc_hww_esr_r(), 6243 gr_scc_hww_esr_reset_active_f()); 6244 need_reset = true; 6245 } 6246 6247 if (exception & gr_exception_ds_m()) { 6248 u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); 6249 6250 nvgpu_err(g, "ds exception: esr: 0x%08x", ds); 6251 gk20a_writel(g, gr_ds_hww_esr_r(), 6252 gr_ds_hww_esr_reset_task_f()); 6253 need_reset = true; 6254 } 6255 6256 if (exception & gr_exception_ssync_m()) { 6257 if (g->ops.gr.handle_ssync_hww) { 6258 if (g->ops.gr.handle_ssync_hww(g) != 0) { 6259 need_reset = true; 6260 } 6261 } else { 6262 nvgpu_err(g, "unhandled ssync exception"); 6263 } 6264 } 6265 6266 if (exception & gr_exception_mme_m()) { 6267 u32 mme = gk20a_readl(g, gr_mme_hww_esr_r()); 6268 u32 info = gk20a_readl(g, gr_mme_hww_esr_info_r()); 6269 6270 nvgpu_err(g, "mme exception: esr 0x%08x info:0x%08x", 6271 mme, info); 6272 gk20a_writel(g, gr_mme_hww_esr_r(), 6273 gr_mme_hww_esr_reset_active_f()); 6274 need_reset = true; 6275 } 6276 6277 if (exception & gr_exception_sked_m()) { 6278 u32 sked = gk20a_readl(g, gr_sked_hww_esr_r()); 6279 6280 nvgpu_err(g, "sked exception: esr 0x%08x", sked); 6281 gk20a_writel(g, gr_sked_hww_esr_r(), 6282 gr_sked_hww_esr_reset_active_f()); 6283 need_reset = true; 6284 } 6285 6286 /* check if a gpc exception has occurred */ 6287 if (((exception & gr_exception_gpc_m()) != 0U) && 6288 !need_reset) { 6289 bool post_event = false; 6290 6291 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 6292 "GPC exception pending"); 6293 6294 if (tsg != NULL) { 6295 fault_ch = isr_data.ch; 6296 } 6297 6298 /* fault_ch can be NULL */ 6299 /* check if any gpc has an exception */ 6300 if (gk20a_gr_handle_gpc_exception(g, &post_event, 6301 fault_ch, &global_esr) != 0) { 6302 need_reset = true; 6303 } 6304 6305 /* signal clients waiting on an event */ 6306 if (g->ops.gr.sm_debugger_attached(g) && 6307 post_event && (fault_ch != NULL)) { 6308 g->ops.debugger.post_events(fault_ch); 6309 } 6310 } 6311 6312 gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f()); 6313 gr_intr &= ~gr_intr_exception_pending_f(); 6314 6315 if (need_reset) { 6316 nvgpu_err(g, "set gr exception notifier"); 6317 gk20a_gr_set_error_notifier(g, &isr_data, 6318 NVGPU_ERR_NOTIFIER_GR_EXCEPTION); 6319 } 6320 } 6321 6322 if (need_reset) { 6323 if (tsg != NULL) { 6324 gk20a_fifo_recover(g, gr_engine_id, 6325 tsgid, true, true, true, 6326 RC_TYPE_GR_FAULT); 6327 } else { 6328 if (ch != NULL) { 6329 nvgpu_err(g, "chid: %d referenceable but not " 6330 "bound to tsg", chid); 6331 } 6332 gk20a_fifo_recover(g, gr_engine_id, 6333 0, false, false, true, 6334 RC_TYPE_GR_FAULT); 6335 } 6336 } 6337 6338 if (gr_intr != 0U) { 6339 /* clear unhandled interrupts */ 6340 if (ch == NULL) { 6341 /* 6342 * This is probably an interrupt during 6343 * gk20a_free_channel() 6344 */ 6345 nvgpu_err(g, "unhandled gr intr 0x%08x for " 6346 "unreferenceable channel, clearing", 6347 gr_intr); 6348 } else { 6349 nvgpu_err(g, "unhandled gr intr 0x%08x for chid: %d", 6350 gr_intr, chid); 6351 } 6352 gk20a_writel(g, gr_intr_r(), gr_intr); 6353 } 6354 6355 gk20a_writel(g, gr_gpfifo_ctl_r(), 6356 grfifo_ctl | gr_gpfifo_ctl_access_f(1) | 6357 gr_gpfifo_ctl_semaphore_access_f(1)); 6358 6359 6360 /* Posting of BPT events should be the last thing in this function */ 6361 if ((global_esr != 0U) && (tsg != NULL)) { 6362 gk20a_gr_post_bpt_events(g, tsg, global_esr); 6363 } 6364 6365 if (ch) { 6366 gk20a_channel_put(ch); 6367 } 6368 6369 return 0; 6370} 6371 6372u32 gk20a_gr_nonstall_isr(struct gk20a *g) 6373{ 6374 u32 ops = 0; 6375 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); 6376 6377 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); 6378 6379 if ((gr_intr & gr_intr_nonstall_trap_pending_f()) != 0U) { 6380 /* Clear the interrupt */ 6381 gk20a_writel(g, gr_intr_nonstall_r(), 6382 gr_intr_nonstall_trap_pending_f()); 6383 ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE | 6384 GK20A_NONSTALL_OPS_POST_EVENTS); 6385 } 6386 return ops; 6387} 6388 6389int gr_gk20a_fecs_get_reglist_img_size(struct gk20a *g, u32 *size) 6390{ 6391 BUG_ON(size == NULL); 6392 return gr_gk20a_submit_fecs_method_op(g, 6393 (struct fecs_method_op_gk20a) { 6394 .mailbox.id = 0, 6395 .mailbox.data = 0, 6396 .mailbox.clr = ~0, 6397 .method.data = 1, 6398 .method.addr = gr_fecs_method_push_adr_discover_reglist_image_size_v(), 6399 .mailbox.ret = size, 6400 .cond.ok = GR_IS_UCODE_OP_NOT_EQUAL, 6401 .mailbox.ok = 0, 6402 .cond.fail = GR_IS_UCODE_OP_SKIP, 6403 .mailbox.fail = 0}, false); 6404} 6405 6406int gr_gk20a_fecs_set_reglist_bind_inst(struct gk20a *g, 6407 struct nvgpu_mem *inst_block) 6408{ 6409 u32 data = fecs_current_ctx_data(g, inst_block); 6410 6411 return gr_gk20a_submit_fecs_method_op(g, 6412 (struct fecs_method_op_gk20a){ 6413 .mailbox.id = 4, 6414 .mailbox.data = data, 6415 .mailbox.clr = ~0, 6416 .method.data = 1, 6417 .method.addr = gr_fecs_method_push_adr_set_reglist_bind_instance_v(), 6418 .mailbox.ret = NULL, 6419 .cond.ok = GR_IS_UCODE_OP_EQUAL, 6420 .mailbox.ok = 1, 6421 .cond.fail = GR_IS_UCODE_OP_SKIP, 6422 .mailbox.fail = 0}, false); 6423} 6424 6425int gr_gk20a_fecs_set_reglist_virtual_addr(struct gk20a *g, u64 pmu_va) 6426{ 6427 return gr_gk20a_submit_fecs_method_op(g, 6428 (struct fecs_method_op_gk20a) { 6429 .mailbox.id = 4, 6430 .mailbox.data = u64_lo32(pmu_va >> 8), 6431 .mailbox.clr = ~0, 6432 .method.data = 1, 6433 .method.addr = gr_fecs_method_push_adr_set_reglist_virtual_address_v(), 6434 .mailbox.ret = NULL, 6435 .cond.ok = GR_IS_UCODE_OP_EQUAL, 6436 .mailbox.ok = 1, 6437 .cond.fail = GR_IS_UCODE_OP_SKIP, 6438 .mailbox.fail = 0}, false); 6439} 6440 6441int gk20a_gr_suspend(struct gk20a *g) 6442{ 6443 u32 ret = 0; 6444 6445 nvgpu_log_fn(g, " "); 6446 6447 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 6448 GR_IDLE_CHECK_DEFAULT); 6449 if (ret) { 6450 return ret; 6451 } 6452 6453 gk20a_writel(g, gr_gpfifo_ctl_r(), 6454 gr_gpfifo_ctl_access_disabled_f()); 6455 6456 /* disable gr intr */ 6457 gk20a_writel(g, gr_intr_r(), 0); 6458 gk20a_writel(g, gr_intr_en_r(), 0); 6459 6460 /* disable all exceptions */ 6461 gk20a_writel(g, gr_exception_r(), 0); 6462 gk20a_writel(g, gr_exception_en_r(), 0); 6463 gk20a_writel(g, gr_exception1_r(), 0); 6464 gk20a_writel(g, gr_exception1_en_r(), 0); 6465 gk20a_writel(g, gr_exception2_r(), 0); 6466 gk20a_writel(g, gr_exception2_en_r(), 0); 6467 6468 gk20a_gr_flush_channel_tlb(&g->gr); 6469 6470 g->gr.initialized = false; 6471 6472 nvgpu_log_fn(g, "done"); 6473 return ret; 6474} 6475 6476static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, 6477 u32 addr, 6478 bool is_quad, u32 quad, 6479 u32 *context_buffer, 6480 u32 context_buffer_size, 6481 u32 *priv_offset); 6482 6483static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, 6484 u32 addr, 6485 u32 *priv_offset); 6486 6487/* This function will decode a priv address and return the partition type and numbers. */ 6488int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, 6489 enum ctxsw_addr_type *addr_type, 6490 u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num, 6491 u32 *broadcast_flags) 6492{ 6493 u32 gpc_addr; 6494 6495 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6496 6497 /* setup defaults */ 6498 *addr_type = CTXSW_ADDR_TYPE_SYS; 6499 *broadcast_flags = PRI_BROADCAST_FLAGS_NONE; 6500 *gpc_num = 0; 6501 *tpc_num = 0; 6502 *ppc_num = 0; 6503 *be_num = 0; 6504 6505 if (pri_is_gpc_addr(g, addr)) { 6506 *addr_type = CTXSW_ADDR_TYPE_GPC; 6507 gpc_addr = pri_gpccs_addr_mask(addr); 6508 if (pri_is_gpc_addr_shared(g, addr)) { 6509 *addr_type = CTXSW_ADDR_TYPE_GPC; 6510 *broadcast_flags |= PRI_BROADCAST_FLAGS_GPC; 6511 } else { 6512 *gpc_num = pri_get_gpc_num(g, addr); 6513 } 6514 6515 if (pri_is_ppc_addr(g, gpc_addr)) { 6516 *addr_type = CTXSW_ADDR_TYPE_PPC; 6517 if (pri_is_ppc_addr_shared(g, gpc_addr)) { 6518 *broadcast_flags |= PRI_BROADCAST_FLAGS_PPC; 6519 return 0; 6520 } 6521 } 6522 if (g->ops.gr.is_tpc_addr(g, gpc_addr)) { 6523 *addr_type = CTXSW_ADDR_TYPE_TPC; 6524 if (pri_is_tpc_addr_shared(g, gpc_addr)) { 6525 *broadcast_flags |= PRI_BROADCAST_FLAGS_TPC; 6526 return 0; 6527 } 6528 *tpc_num = g->ops.gr.get_tpc_num(g, gpc_addr); 6529 } 6530 return 0; 6531 } else if (pri_is_be_addr(g, addr)) { 6532 *addr_type = CTXSW_ADDR_TYPE_BE; 6533 if (pri_is_be_addr_shared(g, addr)) { 6534 *broadcast_flags |= PRI_BROADCAST_FLAGS_BE; 6535 return 0; 6536 } 6537 *be_num = pri_get_be_num(g, addr); 6538 return 0; 6539 } else if (g->ops.ltc.pri_is_ltc_addr(g, addr)) { 6540 *addr_type = CTXSW_ADDR_TYPE_LTCS; 6541 if (g->ops.ltc.is_ltcs_ltss_addr(g, addr)) { 6542 *broadcast_flags |= PRI_BROADCAST_FLAGS_LTCS; 6543 } else if (g->ops.ltc.is_ltcn_ltss_addr(g, addr)) { 6544 *broadcast_flags |= PRI_BROADCAST_FLAGS_LTSS; 6545 } 6546 return 0; 6547 } else if (pri_is_fbpa_addr(g, addr)) { 6548 *addr_type = CTXSW_ADDR_TYPE_FBPA; 6549 if (pri_is_fbpa_addr_shared(g, addr)) { 6550 *broadcast_flags |= PRI_BROADCAST_FLAGS_FBPA; 6551 return 0; 6552 } 6553 return 0; 6554 } else if ((g->ops.gr.is_egpc_addr != NULL) && 6555 g->ops.gr.is_egpc_addr(g, addr)) { 6556 return g->ops.gr.decode_egpc_addr(g, 6557 addr, addr_type, gpc_num, 6558 tpc_num, broadcast_flags); 6559 } else { 6560 *addr_type = CTXSW_ADDR_TYPE_SYS; 6561 return 0; 6562 } 6563 /* PPC!?!?!?! */ 6564 6565 /*NOTREACHED*/ 6566 return -EINVAL; 6567} 6568 6569void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr, 6570 u32 num_fbpas, 6571 u32 *priv_addr_table, u32 *t) 6572{ 6573 u32 fbpa_id; 6574 6575 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) { 6576 priv_addr_table[(*t)++] = pri_fbpa_addr(g, 6577 pri_fbpa_addr_mask(g, addr), fbpa_id); 6578 } 6579} 6580 6581int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, 6582 u32 gpc_num, 6583 u32 *priv_addr_table, u32 *t) 6584{ 6585 u32 ppc_num; 6586 6587 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6588 6589 for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++) { 6590 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), 6591 gpc_num, ppc_num); 6592 } 6593 6594 return 0; 6595} 6596 6597/* 6598 * The context buffer is indexed using BE broadcast addresses and GPC/TPC 6599 * unicast addresses. This function will convert a BE unicast address to a BE 6600 * broadcast address and split a GPC/TPC broadcast address into a table of 6601 * GPC/TPC addresses. The addresses generated by this function can be 6602 * successfully processed by gr_gk20a_find_priv_offset_in_buffer 6603 */ 6604int gr_gk20a_create_priv_addr_table(struct gk20a *g, 6605 u32 addr, 6606 u32 *priv_addr_table, 6607 u32 *num_registers) 6608{ 6609 enum ctxsw_addr_type addr_type; 6610 u32 gpc_num, tpc_num, ppc_num, be_num; 6611 u32 priv_addr, gpc_addr; 6612 u32 broadcast_flags; 6613 u32 t; 6614 int err; 6615 6616 t = 0; 6617 *num_registers = 0; 6618 6619 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6620 6621 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 6622 &gpc_num, &tpc_num, &ppc_num, &be_num, 6623 &broadcast_flags); 6624 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); 6625 if (err != 0) { 6626 return err; 6627 } 6628 6629 if ((addr_type == CTXSW_ADDR_TYPE_SYS) || 6630 (addr_type == CTXSW_ADDR_TYPE_BE)) { 6631 /* The BE broadcast registers are included in the compressed PRI 6632 * table. Convert a BE unicast address to a broadcast address 6633 * so that we can look up the offset. */ 6634 if ((addr_type == CTXSW_ADDR_TYPE_BE) && 6635 ((broadcast_flags & PRI_BROADCAST_FLAGS_BE) == 0U)) { 6636 priv_addr_table[t++] = pri_be_shared_addr(g, addr); 6637 } else { 6638 priv_addr_table[t++] = addr; 6639 } 6640 6641 *num_registers = t; 6642 return 0; 6643 } 6644 6645 /* The GPC/TPC unicast registers are included in the compressed PRI 6646 * tables. Convert a GPC/TPC broadcast address to unicast addresses so 6647 * that we can look up the offsets. */ 6648 if (broadcast_flags & PRI_BROADCAST_FLAGS_GPC) { 6649 for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) { 6650 6651 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { 6652 for (tpc_num = 0; 6653 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 6654 tpc_num++) { 6655 priv_addr_table[t++] = 6656 pri_tpc_addr(g, pri_tpccs_addr_mask(addr), 6657 gpc_num, tpc_num); 6658 } 6659 6660 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { 6661 err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, 6662 priv_addr_table, &t); 6663 if (err != 0) { 6664 return err; 6665 } 6666 } else { 6667 priv_addr = pri_gpc_addr(g, 6668 pri_gpccs_addr_mask(addr), 6669 gpc_num); 6670 6671 gpc_addr = pri_gpccs_addr_mask(priv_addr); 6672 tpc_num = g->ops.gr.get_tpc_num(g, gpc_addr); 6673 if (tpc_num >= g->gr.gpc_tpc_count[gpc_num]) { 6674 continue; 6675 } 6676 6677 priv_addr_table[t++] = priv_addr; 6678 } 6679 } 6680 } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || 6681 (addr_type == CTXSW_ADDR_TYPE_ETPC)) && 6682 (g->ops.gr.egpc_etpc_priv_addr_table != NULL)) { 6683 nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); 6684 g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, 6685 broadcast_flags, priv_addr_table, &t); 6686 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { 6687 g->ops.ltc.split_lts_broadcast_addr(g, addr, 6688 priv_addr_table, &t); 6689 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) { 6690 g->ops.ltc.split_ltc_broadcast_addr(g, addr, 6691 priv_addr_table, &t); 6692 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) { 6693 g->ops.gr.split_fbpa_broadcast_addr(g, addr, 6694 nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS), 6695 priv_addr_table, &t); 6696 } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) == 0U) { 6697 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { 6698 for (tpc_num = 0; 6699 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 6700 tpc_num++) { 6701 priv_addr_table[t++] = 6702 pri_tpc_addr(g, pri_tpccs_addr_mask(addr), 6703 gpc_num, tpc_num); 6704 } 6705 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { 6706 err = gr_gk20a_split_ppc_broadcast_addr(g, 6707 addr, gpc_num, priv_addr_table, &t); 6708 } else { 6709 priv_addr_table[t++] = addr; 6710 } 6711 } 6712 6713 *num_registers = t; 6714 return 0; 6715} 6716 6717int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, 6718 u32 addr, 6719 u32 max_offsets, 6720 u32 *offsets, u32 *offset_addrs, 6721 u32 *num_offsets, 6722 bool is_quad, u32 quad) 6723{ 6724 u32 i; 6725 u32 priv_offset = 0; 6726 u32 *priv_registers; 6727 u32 num_registers = 0; 6728 int err = 0; 6729 struct gr_gk20a *gr = &g->gr; 6730 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 6731 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 6732 sm_per_tpc; 6733 6734 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6735 6736 /* implementation is crossed-up if either of these happen */ 6737 if (max_offsets > potential_offsets) { 6738 nvgpu_log_fn(g, "max_offsets > potential_offsets"); 6739 return -EINVAL; 6740 } 6741 6742 if (!g->gr.ctx_vars.golden_image_initialized) { 6743 return -ENODEV; 6744 } 6745 6746 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); 6747 if (priv_registers == NULL) { 6748 nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); 6749 err = PTR_ERR(priv_registers); 6750 goto cleanup; 6751 } 6752 memset(offsets, 0, sizeof(u32) * max_offsets); 6753 memset(offset_addrs, 0, sizeof(u32) * max_offsets); 6754 *num_offsets = 0; 6755 6756 g->ops.gr.create_priv_addr_table(g, addr, &priv_registers[0], 6757 &num_registers); 6758 6759 if ((max_offsets > 1) && (num_registers > max_offsets)) { 6760 nvgpu_log_fn(g, "max_offsets = %d, num_registers = %d", 6761 max_offsets, num_registers); 6762 err = -EINVAL; 6763 goto cleanup; 6764 } 6765 6766 if ((max_offsets == 1) && (num_registers > 1)) { 6767 num_registers = 1; 6768 } 6769 6770 if (g->gr.ctx_vars.local_golden_image == NULL) { 6771 nvgpu_log_fn(g, "no context switch header info to work with"); 6772 err = -EINVAL; 6773 goto cleanup; 6774 } 6775 6776 for (i = 0; i < num_registers; i++) { 6777 err = gr_gk20a_find_priv_offset_in_buffer(g, 6778 priv_registers[i], 6779 is_quad, quad, 6780 g->gr.ctx_vars.local_golden_image, 6781 g->gr.ctx_vars.golden_image_size, 6782 &priv_offset); 6783 if (err != 0) { 6784 nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", 6785 addr); /*, grPriRegStr(addr)));*/ 6786 goto cleanup; 6787 } 6788 6789 offsets[i] = priv_offset; 6790 offset_addrs[i] = priv_registers[i]; 6791 } 6792 6793 *num_offsets = num_registers; 6794cleanup: 6795 if (!IS_ERR_OR_NULL(priv_registers)) { 6796 nvgpu_kfree(g, priv_registers); 6797 } 6798 6799 return err; 6800} 6801 6802int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, 6803 u32 addr, 6804 u32 max_offsets, 6805 u32 *offsets, u32 *offset_addrs, 6806 u32 *num_offsets) 6807{ 6808 u32 i; 6809 u32 priv_offset = 0; 6810 u32 *priv_registers; 6811 u32 num_registers = 0; 6812 int err = 0; 6813 struct gr_gk20a *gr = &g->gr; 6814 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 6815 u32 potential_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 6816 sm_per_tpc; 6817 6818 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6819 6820 /* implementation is crossed-up if either of these happen */ 6821 if (max_offsets > potential_offsets) { 6822 return -EINVAL; 6823 } 6824 6825 if (!g->gr.ctx_vars.golden_image_initialized) { 6826 return -ENODEV; 6827 } 6828 6829 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); 6830 if (priv_registers == NULL) { 6831 nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); 6832 return -ENOMEM; 6833 } 6834 memset(offsets, 0, sizeof(u32) * max_offsets); 6835 memset(offset_addrs, 0, sizeof(u32) * max_offsets); 6836 *num_offsets = 0; 6837 6838 g->ops.gr.create_priv_addr_table(g, addr, priv_registers, 6839 &num_registers); 6840 6841 if ((max_offsets > 1) && (num_registers > max_offsets)) { 6842 err = -EINVAL; 6843 goto cleanup; 6844 } 6845 6846 if ((max_offsets == 1) && (num_registers > 1)) { 6847 num_registers = 1; 6848 } 6849 6850 if (g->gr.ctx_vars.local_golden_image == NULL) { 6851 nvgpu_log_fn(g, "no context switch header info to work with"); 6852 err = -EINVAL; 6853 goto cleanup; 6854 } 6855 6856 for (i = 0; i < num_registers; i++) { 6857 err = gr_gk20a_find_priv_offset_in_pm_buffer(g, 6858 priv_registers[i], 6859 &priv_offset); 6860 if (err != 0) { 6861 nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", 6862 addr); /*, grPriRegStr(addr)));*/ 6863 goto cleanup; 6864 } 6865 6866 offsets[i] = priv_offset; 6867 offset_addrs[i] = priv_registers[i]; 6868 } 6869 6870 *num_offsets = num_registers; 6871cleanup: 6872 nvgpu_kfree(g, priv_registers); 6873 6874 return err; 6875} 6876 6877/* Setup some register tables. This looks hacky; our 6878 * register/offset functions are just that, functions. 6879 * So they can't be used as initializers... TBD: fix to 6880 * generate consts at least on an as-needed basis. 6881 */ 6882static const u32 _num_ovr_perf_regs = 17; 6883static u32 _ovr_perf_regs[17] = { 0, }; 6884/* Following are the blocks of registers that the ucode 6885 stores in the extended region.*/ 6886 6887void gk20a_gr_init_ovr_sm_dsm_perf(void) 6888{ 6889 if (_ovr_perf_regs[0] != 0) { 6890 return; 6891 } 6892 6893 _ovr_perf_regs[0] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel0_r(); 6894 _ovr_perf_regs[1] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel1_r(); 6895 _ovr_perf_regs[2] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r(); 6896 _ovr_perf_regs[3] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control5_r(); 6897 _ovr_perf_regs[4] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status1_r(); 6898 _ovr_perf_regs[5] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_control_r(); 6899 _ovr_perf_regs[6] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_control_r(); 6900 _ovr_perf_regs[7] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_control_r(); 6901 _ovr_perf_regs[8] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_control_r(); 6902 _ovr_perf_regs[9] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_control_r(); 6903 _ovr_perf_regs[10] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_control_r(); 6904 _ovr_perf_regs[11] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_control_r(); 6905 _ovr_perf_regs[12] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_control_r(); 6906 _ovr_perf_regs[13] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_r(); 6907 _ovr_perf_regs[14] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_r(); 6908 _ovr_perf_regs[15] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_r(); 6909 _ovr_perf_regs[16] = gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_r(); 6910 6911} 6912 6913/* TBD: would like to handle this elsewhere, at a higher level. 6914 * these are currently constructed in a "test-then-write" style 6915 * which makes it impossible to know externally whether a ctx 6916 * write will actually occur. so later we should put a lazy, 6917 * map-and-hold system in the patch write state */ 6918static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, 6919 struct channel_gk20a *ch, 6920 u32 addr, u32 data, 6921 struct nvgpu_mem *mem) 6922{ 6923 u32 num_gpc = g->gr.gpc_count; 6924 u32 num_tpc; 6925 u32 tpc, gpc, reg; 6926 u32 chk_addr; 6927 u32 vaddr_lo; 6928 u32 vaddr_hi; 6929 u32 tmp; 6930 u32 num_ovr_perf_regs = 0; 6931 u32 *ovr_perf_regs = NULL; 6932 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 6933 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 6934 struct tsg_gk20a *tsg; 6935 struct nvgpu_gr_ctx *gr_ctx; 6936 struct nvgpu_mem *ctxheader = &ch->ctx_header; 6937 6938 tsg = tsg_gk20a_from_ch(ch); 6939 if (tsg == NULL) { 6940 return -EINVAL; 6941 } 6942 6943 gr_ctx = &tsg->gr_ctx; 6944 g->ops.gr.init_ovr_sm_dsm_perf(); 6945 g->ops.gr.init_sm_dsm_reg_info(); 6946 g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); 6947 6948 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6949 6950 for (reg = 0; reg < num_ovr_perf_regs; reg++) { 6951 for (gpc = 0; gpc < num_gpc; gpc++) { 6952 num_tpc = g->gr.gpc_tpc_count[gpc]; 6953 for (tpc = 0; tpc < num_tpc; tpc++) { 6954 chk_addr = ((gpc_stride * gpc) + 6955 (tpc_in_gpc_stride * tpc) + 6956 ovr_perf_regs[reg]); 6957 if (chk_addr != addr) { 6958 continue; 6959 } 6960 /* reset the patch count from previous 6961 runs,if ucode has already processed 6962 it */ 6963 tmp = nvgpu_mem_rd(g, mem, 6964 ctxsw_prog_main_image_patch_count_o()); 6965 6966 if (tmp == 0U) { 6967 gr_ctx->patch_ctx.data_count = 0; 6968 } 6969 6970 gr_gk20a_ctx_patch_write(g, gr_ctx, 6971 addr, data, true); 6972 6973 vaddr_lo = u64_lo32(gr_ctx->patch_ctx.mem.gpu_va); 6974 vaddr_hi = u64_hi32(gr_ctx->patch_ctx.mem.gpu_va); 6975 6976 nvgpu_mem_wr(g, mem, 6977 ctxsw_prog_main_image_patch_count_o(), 6978 gr_ctx->patch_ctx.data_count); 6979 if (ctxheader->gpu_va) { 6980 nvgpu_mem_wr(g, ctxheader, 6981 ctxsw_prog_main_image_patch_adr_lo_o(), 6982 vaddr_lo); 6983 nvgpu_mem_wr(g, ctxheader, 6984 ctxsw_prog_main_image_patch_adr_hi_o(), 6985 vaddr_hi); 6986 } else { 6987 nvgpu_mem_wr(g, mem, 6988 ctxsw_prog_main_image_patch_adr_lo_o(), 6989 vaddr_lo); 6990 nvgpu_mem_wr(g, mem, 6991 ctxsw_prog_main_image_patch_adr_hi_o(), 6992 vaddr_hi); 6993 } 6994 6995 /* we're not caching these on cpu side, 6996 but later watch for it */ 6997 return 0; 6998 } 6999 } 7000 } 7001 7002 return 0; 7003} 7004 7005#define ILLEGAL_ID ((u32)~0) 7006 7007static inline bool check_main_image_header_magic(u8 *context) 7008{ 7009 u32 magic = *(u32 *)(context + ctxsw_prog_main_image_magic_value_o()); 7010 return magic == ctxsw_prog_main_image_magic_value_v_value_v(); 7011} 7012static inline bool check_local_header_magic(u8 *context) 7013{ 7014 u32 magic = *(u32 *)(context + ctxsw_prog_local_magic_value_o()); 7015 return magic == ctxsw_prog_local_magic_value_v_value_v(); 7016 7017} 7018 7019/* most likely dupe of ctxsw_gpccs_header__size_1_v() */ 7020static inline int ctxsw_prog_ucode_header_size_in_bytes(void) 7021{ 7022 return 256; 7023} 7024 7025void gk20a_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs, 7026 u32 **ovr_perf_regs) 7027{ 7028 *num_ovr_perf_regs = _num_ovr_perf_regs; 7029 *ovr_perf_regs = _ovr_perf_regs; 7030} 7031 7032static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, 7033 u32 addr, 7034 bool is_quad, u32 quad, 7035 u32 *context_buffer, 7036 u32 context_buffer_size, 7037 u32 *priv_offset) 7038{ 7039 u32 i, data32; 7040 u32 gpc_num, tpc_num; 7041 u32 num_gpcs, num_tpcs; 7042 u32 chk_addr; 7043 u32 ext_priv_offset, ext_priv_size; 7044 u8 *context; 7045 u32 offset_to_segment, offset_to_segment_end; 7046 u32 sm_dsm_perf_reg_id = ILLEGAL_ID; 7047 u32 sm_dsm_perf_ctrl_reg_id = ILLEGAL_ID; 7048 u32 num_ext_gpccs_ext_buffer_segments; 7049 u32 inter_seg_offset; 7050 u32 max_tpc_count; 7051 u32 *sm_dsm_perf_ctrl_regs = NULL; 7052 u32 num_sm_dsm_perf_ctrl_regs = 0; 7053 u32 *sm_dsm_perf_regs = NULL; 7054 u32 num_sm_dsm_perf_regs = 0; 7055 u32 buffer_segments_size = 0; 7056 u32 marker_size = 0; 7057 u32 control_register_stride = 0; 7058 u32 perf_register_stride = 0; 7059 struct gr_gk20a *gr = &g->gr; 7060 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 7061 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 7062 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); 7063 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 7064 u32 tpc_gpc_mask = (tpc_in_gpc_stride - 1); 7065 7066 /* Only have TPC registers in extended region, so if not a TPC reg, 7067 then return error so caller can look elsewhere. */ 7068 if (pri_is_gpc_addr(g, addr)) { 7069 u32 gpc_addr = 0; 7070 gpc_num = pri_get_gpc_num(g, addr); 7071 gpc_addr = pri_gpccs_addr_mask(addr); 7072 if (g->ops.gr.is_tpc_addr(g, gpc_addr)) { 7073 tpc_num = g->ops.gr.get_tpc_num(g, gpc_addr); 7074 } else { 7075 return -EINVAL; 7076 } 7077 7078 nvgpu_log_info(g, " gpc = %d tpc = %d", 7079 gpc_num, tpc_num); 7080 } else if ((g->ops.gr.is_etpc_addr != NULL) && 7081 g->ops.gr.is_etpc_addr(g, addr)) { 7082 g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); 7083 gpc_base = g->ops.gr.get_egpc_base(g); 7084 } else { 7085 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 7086 "does not exist in extended region"); 7087 return -EINVAL; 7088 } 7089 7090 buffer_segments_size = ctxsw_prog_extended_buffer_segments_size_in_bytes_v(); 7091 /* note below is in words/num_registers */ 7092 marker_size = ctxsw_prog_extended_marker_size_in_bytes_v() >> 2; 7093 7094 context = (u8 *)context_buffer; 7095 /* sanity check main header */ 7096 if (!check_main_image_header_magic(context)) { 7097 nvgpu_err(g, 7098 "Invalid main header: magic value"); 7099 return -EINVAL; 7100 } 7101 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); 7102 if (gpc_num >= num_gpcs) { 7103 nvgpu_err(g, 7104 "GPC 0x%08x is greater than total count 0x%08x!", 7105 gpc_num, num_gpcs); 7106 return -EINVAL; 7107 } 7108 7109 data32 = *(u32 *)(context + ctxsw_prog_main_extended_buffer_ctl_o()); 7110 ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32); 7111 if (0 == ext_priv_size) { 7112 nvgpu_log_info(g, " No extended memory in context buffer"); 7113 return -EINVAL; 7114 } 7115 ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32); 7116 7117 offset_to_segment = ext_priv_offset * ctxsw_prog_ucode_header_size_in_bytes(); 7118 offset_to_segment_end = offset_to_segment + 7119 (ext_priv_size * buffer_segments_size); 7120 7121 /* check local header magic */ 7122 context += ctxsw_prog_ucode_header_size_in_bytes(); 7123 if (!check_local_header_magic(context)) { 7124 nvgpu_err(g, 7125 "Invalid local header: magic value"); 7126 return -EINVAL; 7127 } 7128 7129 /* 7130 * See if the incoming register address is in the first table of 7131 * registers. We check this by decoding only the TPC addr portion. 7132 * If we get a hit on the TPC bit, we then double check the address 7133 * by computing it from the base gpc/tpc strides. Then make sure 7134 * it is a real match. 7135 */ 7136 g->ops.gr.get_sm_dsm_perf_regs(g, &num_sm_dsm_perf_regs, 7137 &sm_dsm_perf_regs, 7138 &perf_register_stride); 7139 7140 g->ops.gr.init_sm_dsm_reg_info(); 7141 7142 for (i = 0; i < num_sm_dsm_perf_regs; i++) { 7143 if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) { 7144 sm_dsm_perf_reg_id = i; 7145 7146 nvgpu_log_info(g, "register match: 0x%08x", 7147 sm_dsm_perf_regs[i]); 7148 7149 chk_addr = (gpc_base + gpc_stride * gpc_num) + 7150 tpc_in_gpc_base + 7151 (tpc_in_gpc_stride * tpc_num) + 7152 (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask); 7153 7154 if (chk_addr != addr) { 7155 nvgpu_err(g, 7156 "Oops addr miss-match! : 0x%08x != 0x%08x", 7157 addr, chk_addr); 7158 return -EINVAL; 7159 } 7160 break; 7161 } 7162 } 7163 7164 /* Didn't find reg in supported group 1. 7165 * so try the second group now */ 7166 g->ops.gr.get_sm_dsm_perf_ctrl_regs(g, &num_sm_dsm_perf_ctrl_regs, 7167 &sm_dsm_perf_ctrl_regs, 7168 &control_register_stride); 7169 7170 if (ILLEGAL_ID == sm_dsm_perf_reg_id) { 7171 for (i = 0; i < num_sm_dsm_perf_ctrl_regs; i++) { 7172 if ((addr & tpc_gpc_mask) == 7173 (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) { 7174 sm_dsm_perf_ctrl_reg_id = i; 7175 7176 nvgpu_log_info(g, "register match: 0x%08x", 7177 sm_dsm_perf_ctrl_regs[i]); 7178 7179 chk_addr = (gpc_base + gpc_stride * gpc_num) + 7180 tpc_in_gpc_base + 7181 tpc_in_gpc_stride * tpc_num + 7182 (sm_dsm_perf_ctrl_regs[sm_dsm_perf_ctrl_reg_id] & 7183 tpc_gpc_mask); 7184 7185 if (chk_addr != addr) { 7186 nvgpu_err(g, 7187 "Oops addr miss-match! : 0x%08x != 0x%08x", 7188 addr, chk_addr); 7189 return -EINVAL; 7190 7191 } 7192 7193 break; 7194 } 7195 } 7196 } 7197 7198 if ((ILLEGAL_ID == sm_dsm_perf_ctrl_reg_id) && 7199 (ILLEGAL_ID == sm_dsm_perf_reg_id)) { 7200 return -EINVAL; 7201 } 7202 7203 /* Skip the FECS extended header, nothing there for us now. */ 7204 offset_to_segment += buffer_segments_size; 7205 7206 /* skip through the GPCCS extended headers until we get to the data for 7207 * our GPC. The size of each gpc extended segment is enough to hold the 7208 * max tpc count for the gpcs,in 256b chunks. 7209 */ 7210 7211 max_tpc_count = gr->max_tpc_per_gpc_count; 7212 7213 num_ext_gpccs_ext_buffer_segments = (u32)((max_tpc_count + 1) / 2); 7214 7215 offset_to_segment += (num_ext_gpccs_ext_buffer_segments * 7216 buffer_segments_size * gpc_num); 7217 7218 num_tpcs = g->gr.gpc_tpc_count[gpc_num]; 7219 7220 /* skip the head marker to start with */ 7221 inter_seg_offset = marker_size; 7222 7223 if (ILLEGAL_ID != sm_dsm_perf_ctrl_reg_id) { 7224 /* skip over control regs of TPC's before the one we want. 7225 * then skip to the register in this tpc */ 7226 inter_seg_offset = inter_seg_offset + 7227 (tpc_num * control_register_stride) + 7228 sm_dsm_perf_ctrl_reg_id; 7229 } else { 7230 /* skip all the control registers */ 7231 inter_seg_offset = inter_seg_offset + 7232 (num_tpcs * control_register_stride); 7233 7234 /* skip the marker between control and counter segments */ 7235 inter_seg_offset += marker_size; 7236 7237 /* skip over counter regs of TPCs before the one we want */ 7238 inter_seg_offset = inter_seg_offset + 7239 (tpc_num * perf_register_stride) * 7240 ctxsw_prog_extended_num_smpc_quadrants_v(); 7241 7242 /* skip over the register for the quadrants we do not want. 7243 * then skip to the register in this tpc */ 7244 inter_seg_offset = inter_seg_offset + 7245 (perf_register_stride * quad) + 7246 sm_dsm_perf_reg_id; 7247 } 7248 7249 /* set the offset to the segment offset plus the inter segment offset to 7250 * our register */ 7251 offset_to_segment += (inter_seg_offset * 4); 7252 7253 /* last sanity check: did we somehow compute an offset outside the 7254 * extended buffer? */ 7255 if (offset_to_segment > offset_to_segment_end) { 7256 nvgpu_err(g, 7257 "Overflow ctxsw buffer! 0x%08x > 0x%08x", 7258 offset_to_segment, offset_to_segment_end); 7259 return -EINVAL; 7260 } 7261 7262 *priv_offset = offset_to_segment; 7263 7264 return 0; 7265} 7266 7267 7268static int 7269gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g, 7270 enum ctxsw_addr_type addr_type, 7271 u32 pri_addr, 7272 u32 gpc_num, u32 num_tpcs, 7273 u32 num_ppcs, u32 ppc_mask, 7274 u32 *priv_offset) 7275{ 7276 u32 i; 7277 u32 address, base_address; 7278 u32 sys_offset, gpc_offset, tpc_offset, ppc_offset; 7279 u32 ppc_num, tpc_num, tpc_addr, gpc_addr, ppc_addr; 7280 struct aiv_gk20a *reg; 7281 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 7282 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 7283 u32 ppc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_BASE); 7284 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 7285 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); 7286 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 7287 7288 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); 7289 7290 if (!g->gr.ctx_vars.valid) { 7291 return -EINVAL; 7292 } 7293 7294 /* Process the SYS/BE segment. */ 7295 if ((addr_type == CTXSW_ADDR_TYPE_SYS) || 7296 (addr_type == CTXSW_ADDR_TYPE_BE)) { 7297 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.sys.count; i++) { 7298 reg = &g->gr.ctx_vars.ctxsw_regs.sys.l[i]; 7299 address = reg->addr; 7300 sys_offset = reg->index; 7301 7302 if (pri_addr == address) { 7303 *priv_offset = sys_offset; 7304 return 0; 7305 } 7306 } 7307 } 7308 7309 /* Process the TPC segment. */ 7310 if (addr_type == CTXSW_ADDR_TYPE_TPC) { 7311 for (tpc_num = 0; tpc_num < num_tpcs; tpc_num++) { 7312 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.tpc.count; i++) { 7313 reg = &g->gr.ctx_vars.ctxsw_regs.tpc.l[i]; 7314 address = reg->addr; 7315 tpc_addr = pri_tpccs_addr_mask(address); 7316 base_address = gpc_base + 7317 (gpc_num * gpc_stride) + 7318 tpc_in_gpc_base + 7319 (tpc_num * tpc_in_gpc_stride); 7320 address = base_address + tpc_addr; 7321 /* 7322 * The data for the TPCs is interleaved in the context buffer. 7323 * Example with num_tpcs = 2 7324 * 0 1 2 3 4 5 6 7 8 9 10 11 ... 7325 * 0-0 1-0 0-1 1-1 0-2 1-2 0-3 1-3 0-4 1-4 0-5 1-5 ... 7326 */ 7327 tpc_offset = (reg->index * num_tpcs) + (tpc_num * 4); 7328 7329 if (pri_addr == address) { 7330 *priv_offset = tpc_offset; 7331 return 0; 7332 } 7333 } 7334 } 7335 } else if ((addr_type == CTXSW_ADDR_TYPE_EGPC) || 7336 (addr_type == CTXSW_ADDR_TYPE_ETPC)) { 7337 if (g->ops.gr.get_egpc_base == NULL) { 7338 return -EINVAL; 7339 } 7340 7341 for (tpc_num = 0; tpc_num < num_tpcs; tpc_num++) { 7342 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.etpc.count; i++) { 7343 reg = &g->gr.ctx_vars.ctxsw_regs.etpc.l[i]; 7344 address = reg->addr; 7345 tpc_addr = pri_tpccs_addr_mask(address); 7346 base_address = g->ops.gr.get_egpc_base(g) + 7347 (gpc_num * gpc_stride) + 7348 tpc_in_gpc_base + 7349 (tpc_num * tpc_in_gpc_stride); 7350 address = base_address + tpc_addr; 7351 /* 7352 * The data for the TPCs is interleaved in the context buffer. 7353 * Example with num_tpcs = 2 7354 * 0 1 2 3 4 5 6 7 8 9 10 11 ... 7355 * 0-0 1-0 0-1 1-1 0-2 1-2 0-3 1-3 0-4 1-4 0-5 1-5 ... 7356 */ 7357 tpc_offset = (reg->index * num_tpcs) + (tpc_num * 4); 7358 7359 if (pri_addr == address) { 7360 *priv_offset = tpc_offset; 7361 nvgpu_log(g, 7362 gpu_dbg_fn | gpu_dbg_gpu_dbg, 7363 "egpc/etpc priv_offset=0x%#08x", 7364 *priv_offset); 7365 return 0; 7366 } 7367 } 7368 } 7369 } 7370 7371 7372 /* Process the PPC segment. */ 7373 if (addr_type == CTXSW_ADDR_TYPE_PPC) { 7374 for (ppc_num = 0; ppc_num < num_ppcs; ppc_num++) { 7375 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.ppc.count; i++) { 7376 reg = &g->gr.ctx_vars.ctxsw_regs.ppc.l[i]; 7377 address = reg->addr; 7378 ppc_addr = pri_ppccs_addr_mask(address); 7379 base_address = gpc_base + 7380 (gpc_num * gpc_stride) + 7381 ppc_in_gpc_base + 7382 (ppc_num * ppc_in_gpc_stride); 7383 address = base_address + ppc_addr; 7384 /* 7385 * The data for the PPCs is interleaved in the context buffer. 7386 * Example with numPpcs = 2 7387 * 0 1 2 3 4 5 6 7 8 9 10 11 ... 7388 * 0-0 1-0 0-1 1-1 0-2 1-2 0-3 1-3 0-4 1-4 0-5 1-5 ... 7389 */ 7390 ppc_offset = (reg->index * num_ppcs) + (ppc_num * 4); 7391 7392 if (pri_addr == address) { 7393 *priv_offset = ppc_offset; 7394 return 0; 7395 } 7396 } 7397 } 7398 } 7399 7400 7401 /* Process the GPC segment. */ 7402 if (addr_type == CTXSW_ADDR_TYPE_GPC) { 7403 for (i = 0; i < g->gr.ctx_vars.ctxsw_regs.gpc.count; i++) { 7404 reg = &g->gr.ctx_vars.ctxsw_regs.gpc.l[i]; 7405 7406 address = reg->addr; 7407 gpc_addr = pri_gpccs_addr_mask(address); 7408 gpc_offset = reg->index; 7409 7410 base_address = gpc_base + (gpc_num * gpc_stride); 7411 address = base_address + gpc_addr; 7412 7413 if (pri_addr == address) { 7414 *priv_offset = gpc_offset; 7415 return 0; 7416 } 7417 } 7418 } 7419 return -EINVAL; 7420} 7421 7422static int gr_gk20a_determine_ppc_configuration(struct gk20a *g, 7423 u8 *context, 7424 u32 *num_ppcs, u32 *ppc_mask, 7425 u32 *reg_ppc_count) 7426{ 7427 u32 data32; 7428 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); 7429 7430 /* 7431 * if there is only 1 PES_PER_GPC, then we put the PES registers 7432 * in the GPC reglist, so we can't error out if ppc.count == 0 7433 */ 7434 if ((!g->gr.ctx_vars.valid) || 7435 ((g->gr.ctx_vars.ctxsw_regs.ppc.count == 0) && 7436 (num_pes_per_gpc > 1))) { 7437 return -EINVAL; 7438 } 7439 7440 data32 = *(u32 *)(context + ctxsw_prog_local_image_ppc_info_o()); 7441 7442 *num_ppcs = ctxsw_prog_local_image_ppc_info_num_ppcs_v(data32); 7443 *ppc_mask = ctxsw_prog_local_image_ppc_info_ppc_mask_v(data32); 7444 7445 *reg_ppc_count = g->gr.ctx_vars.ctxsw_regs.ppc.count; 7446 7447 return 0; 7448} 7449 7450int gr_gk20a_get_offset_in_gpccs_segment(struct gk20a *g, 7451 enum ctxsw_addr_type addr_type, 7452 u32 num_tpcs, 7453 u32 num_ppcs, 7454 u32 reg_list_ppc_count, 7455 u32 *__offset_in_segment) 7456{ 7457 u32 offset_in_segment = 0; 7458 struct gr_gk20a *gr = &g->gr; 7459 7460 if (addr_type == CTXSW_ADDR_TYPE_TPC) { 7461 /* 7462 * reg = gr->ctx_vars.ctxsw_regs.tpc.l; 7463 * offset_in_segment = 0; 7464 */ 7465 } else if ((addr_type == CTXSW_ADDR_TYPE_EGPC) || 7466 (addr_type == CTXSW_ADDR_TYPE_ETPC)) { 7467 offset_in_segment = 7468 ((gr->ctx_vars.ctxsw_regs.tpc.count * 7469 num_tpcs) << 2); 7470 7471 nvgpu_log(g, gpu_dbg_info | gpu_dbg_gpu_dbg, 7472 "egpc etpc offset_in_segment 0x%#08x", 7473 offset_in_segment); 7474 } else if (addr_type == CTXSW_ADDR_TYPE_PPC) { 7475 /* 7476 * The ucode stores TPC data before PPC data. 7477 * Advance offset past TPC data to PPC data. 7478 */ 7479 offset_in_segment = 7480 (((gr->ctx_vars.ctxsw_regs.tpc.count + 7481 gr->ctx_vars.ctxsw_regs.etpc.count) * 7482 num_tpcs) << 2); 7483 } else if (addr_type == CTXSW_ADDR_TYPE_GPC) { 7484 /* 7485 * The ucode stores TPC/PPC data before GPC data. 7486 * Advance offset past TPC/PPC data to GPC data. 7487 * 7488 * Note 1 PES_PER_GPC case 7489 */ 7490 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, 7491 GPU_LIT_NUM_PES_PER_GPC); 7492 if (num_pes_per_gpc > 1) { 7493 offset_in_segment = 7494 ((((gr->ctx_vars.ctxsw_regs.tpc.count + 7495 gr->ctx_vars.ctxsw_regs.etpc.count) * 7496 num_tpcs) << 2) + 7497 ((reg_list_ppc_count * num_ppcs) << 2)); 7498 } else { 7499 offset_in_segment = 7500 (((gr->ctx_vars.ctxsw_regs.tpc.count + 7501 gr->ctx_vars.ctxsw_regs.etpc.count) * 7502 num_tpcs) << 2); 7503 } 7504 } else { 7505 nvgpu_log_fn(g, "Unknown address type."); 7506 return -EINVAL; 7507 } 7508 7509 *__offset_in_segment = offset_in_segment; 7510 return 0; 7511} 7512 7513/* 7514 * This function will return the 32 bit offset for a priv register if it is 7515 * present in the context buffer. The context buffer is in CPU memory. 7516 */ 7517static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, 7518 u32 addr, 7519 bool is_quad, u32 quad, 7520 u32 *context_buffer, 7521 u32 context_buffer_size, 7522 u32 *priv_offset) 7523{ 7524 u32 i, data32; 7525 int err; 7526 enum ctxsw_addr_type addr_type; 7527 u32 broadcast_flags; 7528 u32 gpc_num, tpc_num, ppc_num, be_num; 7529 u32 num_gpcs, num_tpcs, num_ppcs; 7530 u32 offset; 7531 u32 sys_priv_offset, gpc_priv_offset; 7532 u32 ppc_mask, reg_list_ppc_count; 7533 u8 *context; 7534 u32 offset_to_segment, offset_in_segment = 0; 7535 7536 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 7537 7538 err = g->ops.gr.decode_priv_addr(g, addr, &addr_type, 7539 &gpc_num, &tpc_num, &ppc_num, &be_num, 7540 &broadcast_flags); 7541 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 7542 "addr_type = %d, broadcast_flags: %08x", 7543 addr_type, broadcast_flags); 7544 if (err != 0) { 7545 return err; 7546 } 7547 7548 context = (u8 *)context_buffer; 7549 if (!check_main_image_header_magic(context)) { 7550 nvgpu_err(g, 7551 "Invalid main header: magic value"); 7552 return -EINVAL; 7553 } 7554 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); 7555 7556 /* Parse the FECS local header. */ 7557 context += ctxsw_prog_ucode_header_size_in_bytes(); 7558 if (!check_local_header_magic(context)) { 7559 nvgpu_err(g, 7560 "Invalid FECS local header: magic value"); 7561 return -EINVAL; 7562 } 7563 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); 7564 sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); 7565 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "sys_priv_offset=0x%x", sys_priv_offset); 7566 7567 /* If found in Ext buffer, ok. 7568 * If it failed and we expected to find it there (quad offset) 7569 * then return the error. Otherwise continue on. 7570 */ 7571 err = gr_gk20a_find_priv_offset_in_ext_buffer(g, 7572 addr, is_quad, quad, context_buffer, 7573 context_buffer_size, priv_offset); 7574 if ((err == 0) || ((err != 0) && is_quad)) { 7575 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 7576 "err = %d, is_quad = %s", 7577 err, is_quad ? "true" : "false"); 7578 return err; 7579 } 7580 7581 if ((addr_type == CTXSW_ADDR_TYPE_SYS) || 7582 (addr_type == CTXSW_ADDR_TYPE_BE)) { 7583 /* Find the offset in the FECS segment. */ 7584 offset_to_segment = sys_priv_offset * 7585 ctxsw_prog_ucode_header_size_in_bytes(); 7586 7587 err = gr_gk20a_process_context_buffer_priv_segment(g, 7588 addr_type, addr, 7589 0, 0, 0, 0, 7590 &offset); 7591 if (err != 0) { 7592 return err; 7593 } 7594 7595 *priv_offset = (offset_to_segment + offset); 7596 return 0; 7597 } 7598 7599 if ((gpc_num + 1) > num_gpcs) { 7600 nvgpu_err(g, 7601 "GPC %d not in this context buffer.", 7602 gpc_num); 7603 return -EINVAL; 7604 } 7605 7606 /* Parse the GPCCS local header(s).*/ 7607 for (i = 0; i < num_gpcs; i++) { 7608 context += ctxsw_prog_ucode_header_size_in_bytes(); 7609 if (!check_local_header_magic(context)) { 7610 nvgpu_err(g, 7611 "Invalid GPCCS local header: magic value"); 7612 return -EINVAL; 7613 7614 } 7615 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); 7616 gpc_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32); 7617 7618 err = gr_gk20a_determine_ppc_configuration(g, context, 7619 &num_ppcs, &ppc_mask, 7620 &reg_list_ppc_count); 7621 if (err != 0) { 7622 nvgpu_err(g, "determine ppc configuration failed"); 7623 return err; 7624 } 7625 7626 7627 num_tpcs = *(u32 *)(context + ctxsw_prog_local_image_num_tpcs_o()); 7628 7629 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { 7630 nvgpu_err(g, 7631 "GPC %d TPC %d not in this context buffer.", 7632 gpc_num, tpc_num); 7633 return -EINVAL; 7634 } 7635 7636 /* Find the offset in the GPCCS segment.*/ 7637 if (i == gpc_num) { 7638 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 7639 "gpc_priv_offset 0x%#08x", 7640 gpc_priv_offset); 7641 offset_to_segment = gpc_priv_offset * 7642 ctxsw_prog_ucode_header_size_in_bytes(); 7643 7644 err = g->ops.gr.get_offset_in_gpccs_segment(g, 7645 addr_type, 7646 num_tpcs, num_ppcs, reg_list_ppc_count, 7647 &offset_in_segment); 7648 if (err != 0) { 7649 return -EINVAL; 7650 } 7651 7652 offset_to_segment += offset_in_segment; 7653 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 7654 "offset_to_segment 0x%#08x", 7655 offset_to_segment); 7656 7657 err = gr_gk20a_process_context_buffer_priv_segment(g, 7658 addr_type, addr, 7659 i, num_tpcs, 7660 num_ppcs, ppc_mask, 7661 &offset); 7662 if (err != 0) { 7663 return -EINVAL; 7664 } 7665 7666 *priv_offset = offset_to_segment + offset; 7667 return 0; 7668 } 7669 } 7670 7671 return -EINVAL; 7672} 7673 7674static int map_cmp(const void *a, const void *b) 7675{ 7676 struct ctxsw_buf_offset_map_entry *e1 = 7677 (struct ctxsw_buf_offset_map_entry *)a; 7678 struct ctxsw_buf_offset_map_entry *e2 = 7679 (struct ctxsw_buf_offset_map_entry *)b; 7680 7681 if (e1->addr < e2->addr) { 7682 return -1; 7683 } 7684 7685 if (e1->addr > e2->addr) { 7686 return 1; 7687 } 7688 return 0; 7689} 7690 7691static int add_ctxsw_buffer_map_entries_pmsys(struct ctxsw_buf_offset_map_entry *map, 7692 struct aiv_list_gk20a *regs, 7693 u32 *count, u32 *offset, 7694 u32 max_cnt, u32 base, u32 mask) 7695{ 7696 u32 idx; 7697 u32 cnt = *count; 7698 u32 off = *offset; 7699 7700 if ((cnt + regs->count) > max_cnt) { 7701 return -EINVAL; 7702 } 7703 7704 for (idx = 0; idx < regs->count; idx++) { 7705 if ((base + (regs->l[idx].addr & mask)) < 0xFFF) { 7706 map[cnt].addr = base + (regs->l[idx].addr & mask) 7707 + NV_PCFG_BASE; 7708 } else { 7709 map[cnt].addr = base + (regs->l[idx].addr & mask); 7710 } 7711 map[cnt++].offset = off; 7712 off += 4; 7713 } 7714 *count = cnt; 7715 *offset = off; 7716 return 0; 7717} 7718 7719static int add_ctxsw_buffer_map_entries_pmgpc(struct gk20a *g, 7720 struct ctxsw_buf_offset_map_entry *map, 7721 struct aiv_list_gk20a *regs, 7722 u32 *count, u32 *offset, 7723 u32 max_cnt, u32 base, u32 mask) 7724{ 7725 u32 idx; 7726 u32 cnt = *count; 7727 u32 off = *offset; 7728 7729 if ((cnt + regs->count) > max_cnt) { 7730 return -EINVAL; 7731 } 7732 7733 /* NOTE: The PPC offsets get added to the pm_gpc list if numPpc <= 1 7734 * To handle the case of PPC registers getting added into GPC, the below 7735 * code specifically checks for any PPC offsets and adds them using 7736 * proper mask 7737 */ 7738 for (idx = 0; idx < regs->count; idx++) { 7739 /* Check if the address is PPC address */ 7740 if (pri_is_ppc_addr_shared(g, regs->l[idx].addr & mask)) { 7741 u32 ppc_in_gpc_base = nvgpu_get_litter_value(g, 7742 GPU_LIT_PPC_IN_GPC_BASE); 7743 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, 7744 GPU_LIT_PPC_IN_GPC_STRIDE); 7745 /* Use PPC mask instead of the GPC mask provided */ 7746 u32 ppcmask = ppc_in_gpc_stride - 1; 7747 7748 map[cnt].addr = base + ppc_in_gpc_base 7749 + (regs->l[idx].addr & ppcmask); 7750 } else { 7751 map[cnt].addr = base + (regs->l[idx].addr & mask); 7752 } 7753 map[cnt++].offset = off; 7754 off += 4; 7755 } 7756 *count = cnt; 7757 *offset = off; 7758 return 0; 7759} 7760 7761static int add_ctxsw_buffer_map_entries(struct ctxsw_buf_offset_map_entry *map, 7762 struct aiv_list_gk20a *regs, 7763 u32 *count, u32 *offset, 7764 u32 max_cnt, u32 base, u32 mask) 7765{ 7766 u32 idx; 7767 u32 cnt = *count; 7768 u32 off = *offset; 7769 7770 if ((cnt + regs->count) > max_cnt) { 7771 return -EINVAL; 7772 } 7773 7774 for (idx = 0; idx < regs->count; idx++) { 7775 map[cnt].addr = base + (regs->l[idx].addr & mask); 7776 map[cnt++].offset = off; 7777 off += 4; 7778 } 7779 *count = cnt; 7780 *offset = off; 7781 return 0; 7782} 7783 7784/* Helper function to add register entries to the register map for all 7785 * subunits 7786 */ 7787static int add_ctxsw_buffer_map_entries_subunits( 7788 struct ctxsw_buf_offset_map_entry *map, 7789 struct aiv_list_gk20a *regs, 7790 u32 *count, u32 *offset, 7791 u32 max_cnt, u32 base, 7792 u32 num_units, u32 stride, u32 mask) 7793{ 7794 u32 unit; 7795 u32 idx; 7796 u32 cnt = *count; 7797 u32 off = *offset; 7798 7799 if ((cnt + (regs->count * num_units)) > max_cnt) { 7800 return -EINVAL; 7801 } 7802 7803 /* Data is interleaved for units in ctxsw buffer */ 7804 for (idx = 0; idx < regs->count; idx++) { 7805 for (unit = 0; unit < num_units; unit++) { 7806 map[cnt].addr = base + (regs->l[idx].addr & mask) + 7807 (unit * stride); 7808 map[cnt++].offset = off; 7809 off += 4; 7810 } 7811 } 7812 *count = cnt; 7813 *offset = off; 7814 return 0; 7815} 7816 7817int gr_gk20a_add_ctxsw_reg_pm_fbpa(struct gk20a *g, 7818 struct ctxsw_buf_offset_map_entry *map, 7819 struct aiv_list_gk20a *regs, 7820 u32 *count, u32 *offset, 7821 u32 max_cnt, u32 base, 7822 u32 num_fbpas, u32 stride, u32 mask) 7823{ 7824 return add_ctxsw_buffer_map_entries_subunits(map, regs, count, offset, 7825 max_cnt, base, num_fbpas, stride, mask); 7826} 7827 7828static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g, 7829 struct ctxsw_buf_offset_map_entry *map, 7830 u32 *count, u32 *offset, u32 max_cnt) 7831{ 7832 u32 num_gpcs = g->gr.gpc_count; 7833 u32 num_ppcs, num_tpcs, gpc_num, base; 7834 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 7835 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 7836 u32 ppc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_BASE); 7837 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 7838 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); 7839 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 7840 7841 for (gpc_num = 0; gpc_num < num_gpcs; gpc_num++) { 7842 num_tpcs = g->gr.gpc_tpc_count[gpc_num]; 7843 base = gpc_base + (gpc_stride * gpc_num) + tpc_in_gpc_base; 7844 if (add_ctxsw_buffer_map_entries_subunits(map, 7845 &g->gr.ctx_vars.ctxsw_regs.pm_tpc, 7846 count, offset, max_cnt, base, num_tpcs, 7847 tpc_in_gpc_stride, 7848 (tpc_in_gpc_stride - 1))) { 7849 return -EINVAL; 7850 } 7851 7852 num_ppcs = g->gr.gpc_ppc_count[gpc_num]; 7853 base = gpc_base + (gpc_stride * gpc_num) + ppc_in_gpc_base; 7854 if (add_ctxsw_buffer_map_entries_subunits(map, 7855 &g->gr.ctx_vars.ctxsw_regs.pm_ppc, 7856 count, offset, max_cnt, base, num_ppcs, 7857 ppc_in_gpc_stride, 7858 (ppc_in_gpc_stride - 1))) { 7859 return -EINVAL; 7860 } 7861 7862 base = gpc_base + (gpc_stride * gpc_num); 7863 if (add_ctxsw_buffer_map_entries_pmgpc(g, map, 7864 &g->gr.ctx_vars.ctxsw_regs.pm_gpc, 7865 count, offset, max_cnt, base, 7866 (gpc_stride - 1))) { 7867 return -EINVAL; 7868 } 7869 7870 base = NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE * gpc_num; 7871 if (add_ctxsw_buffer_map_entries(map, 7872 &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc, 7873 count, offset, max_cnt, base, ~0)) { 7874 return -EINVAL; 7875 } 7876 7877 base = (g->ops.gr.get_pmm_per_chiplet_offset() * gpc_num); 7878 if (add_ctxsw_buffer_map_entries(map, 7879 &g->gr.ctx_vars.ctxsw_regs.perf_gpc, 7880 count, offset, max_cnt, base, ~0)) { 7881 return -EINVAL; 7882 } 7883 7884 base = (NV_PERF_PMMGPCROUTER_STRIDE * gpc_num); 7885 if (add_ctxsw_buffer_map_entries(map, 7886 &g->gr.ctx_vars.ctxsw_regs.gpc_router, 7887 count, offset, max_cnt, base, ~0)) { 7888 return -EINVAL; 7889 } 7890 7891 /* Counter Aggregation Unit, if available */ 7892 if (g->gr.ctx_vars.ctxsw_regs.pm_cau.count) { 7893 base = gpc_base + (gpc_stride * gpc_num) 7894 + tpc_in_gpc_base; 7895 if (add_ctxsw_buffer_map_entries_subunits(map, 7896 &g->gr.ctx_vars.ctxsw_regs.pm_cau, 7897 count, offset, max_cnt, base, num_tpcs, 7898 tpc_in_gpc_stride, 7899 (tpc_in_gpc_stride - 1))) { 7900 return -EINVAL; 7901 } 7902 } 7903 7904 *offset = ALIGN(*offset, 256); 7905 } 7906 return 0; 7907} 7908 7909int gr_gk20a_add_ctxsw_reg_perf_pma(struct ctxsw_buf_offset_map_entry *map, 7910 struct aiv_list_gk20a *regs, 7911 u32 *count, u32 *offset, 7912 u32 max_cnt, u32 base, u32 mask) 7913{ 7914 return add_ctxsw_buffer_map_entries(map, regs, 7915 count, offset, max_cnt, base, mask); 7916} 7917 7918/* 7919 * PM CTXSW BUFFER LAYOUT : 7920 *|---------------------------------------------|0x00 <----PM CTXSW BUFFER BASE 7921 *| | 7922 *| LIST_compressed_pm_ctx_reg_SYS |Space allocated: numRegs words 7923 *|---------------------------------------------| 7924 *| | 7925 *| LIST_compressed_nv_perf_ctx_reg_SYS |Space allocated: numRegs words 7926 *|---------------------------------------------| 7927 *| | 7928 *| LIST_compressed_nv_perf_ctx_reg_sysrouter|Space allocated: numRegs words 7929 *|---------------------------------------------| 7930 *| | 7931 *| LIST_compressed_nv_perf_ctx_reg_PMA |Space allocated: numRegs words 7932 *|---------------------------------------------| 7933 *| PADDING for 256 byte alignment | 7934 *|---------------------------------------------|<----256 byte aligned 7935 *| LIST_compressed_nv_perf_fbp_ctx_regs | 7936 *| |Space allocated: numRegs * n words (for n FB units) 7937 *|---------------------------------------------| 7938 *| LIST_compressed_nv_perf_fbprouter_ctx_regs | 7939 *| |Space allocated: numRegs * n words (for n FB units) 7940 *|---------------------------------------------| 7941 *| LIST_compressed_pm_fbpa_ctx_regs | 7942 *| |Space allocated: numRegs * n words (for n FB units) 7943 *|---------------------------------------------| 7944 *| LIST_compressed_pm_rop_ctx_regs | 7945 *|---------------------------------------------| 7946 *| LIST_compressed_pm_ltc_ctx_regs | 7947 *| LTC0 LTS0 | 7948 *| LTC1 LTS0 |Space allocated: numRegs * n words (for n LTC units) 7949 *| LTCn LTS0 | 7950 *| LTC0 LTS1 | 7951 *| LTC1 LTS1 | 7952 *| LTCn LTS1 | 7953 *| LTC0 LTSn | 7954 *| LTC1 LTSn | 7955 *| LTCn LTSn | 7956 *|---------------------------------------------| 7957 *| PADDING for 256 byte alignment | 7958 *|---------------------------------------------|<----256 byte aligned 7959 *| GPC0 REG0 TPC0 |Each GPC has space allocated to accommodate 7960 *| REG0 TPC1 | all the GPC/TPC register lists 7961 *| Lists in each GPC region: REG0 TPCn |Per GPC allocated space is always 256 byte aligned 7962 *| LIST_pm_ctx_reg_TPC REG1 TPC0 | 7963 *| * numTpcs REG1 TPC1 | 7964 *| LIST_pm_ctx_reg_PPC REG1 TPCn | 7965 *| * numPpcs REGn TPC0 | 7966 *| LIST_pm_ctx_reg_GPC REGn TPC1 | 7967 *| List_pm_ctx_reg_uc_GPC REGn TPCn | 7968 *| LIST_nv_perf_ctx_reg_GPC | 7969 *| LIST_nv_perf_gpcrouter_ctx_reg | 7970 *| LIST_nv_perf_ctx_reg_CAU | 7971 *| ---- |-- 7972 *| GPC1 . | 7973 *| . |<---- 7974 *|---------------------------------------------| 7975 *= = 7976 *| GPCn | 7977 *= = 7978 *|---------------------------------------------| 7979 */ 7980 7981static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) 7982{ 7983 u32 hwpm_ctxsw_buffer_size = g->gr.ctx_vars.pm_ctxsw_image_size; 7984 u32 hwpm_ctxsw_reg_count_max; 7985 u32 map_size; 7986 u32 i, count = 0; 7987 u32 offset = 0; 7988 struct ctxsw_buf_offset_map_entry *map; 7989 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 7990 u32 num_fbpas = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS); 7991 u32 fbpa_stride = nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE); 7992 u32 num_ltc = g->ops.gr.get_max_ltc_per_fbp(g) * g->gr.num_fbps; 7993 7994 if (hwpm_ctxsw_buffer_size == 0) { 7995 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 7996 "no PM Ctxsw buffer memory in context buffer"); 7997 return -EINVAL; 7998 } 7999 8000 hwpm_ctxsw_reg_count_max = hwpm_ctxsw_buffer_size >> 2; 8001 map_size = hwpm_ctxsw_reg_count_max * sizeof(*map); 8002 8003 map = nvgpu_big_zalloc(g, map_size); 8004 if (map == NULL) { 8005 return -ENOMEM; 8006 } 8007 8008 /* Add entries from _LIST_pm_ctx_reg_SYS */ 8009 if (add_ctxsw_buffer_map_entries_pmsys(map, &g->gr.ctx_vars.ctxsw_regs.pm_sys, 8010 &count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) { 8011 goto cleanup; 8012 } 8013 8014 /* Add entries from _LIST_nv_perf_ctx_reg_SYS */ 8015 if (add_ctxsw_buffer_map_entries(map, &g->gr.ctx_vars.ctxsw_regs.perf_sys, 8016 &count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) { 8017 goto cleanup; 8018 } 8019 8020 /* Add entries from _LIST_nv_perf_sysrouter_ctx_reg*/ 8021 if (add_ctxsw_buffer_map_entries(map, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router, 8022 &count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) { 8023 goto cleanup; 8024 } 8025 8026 /* Add entries from _LIST_nv_perf_pma_ctx_reg*/ 8027 if (g->ops.gr.add_ctxsw_reg_perf_pma(map, &g->gr.ctx_vars.ctxsw_regs.perf_pma, 8028 &count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) { 8029 goto cleanup; 8030 } 8031 8032 offset = ALIGN(offset, 256); 8033 8034 /* Add entries from _LIST_nv_perf_fbp_ctx_regs */ 8035 if (add_ctxsw_buffer_map_entries_subunits(map, 8036 &g->gr.ctx_vars.ctxsw_regs.fbp, 8037 &count, &offset, 8038 hwpm_ctxsw_reg_count_max, 0, 8039 g->gr.num_fbps, 8040 g->ops.gr.get_pmm_per_chiplet_offset(), 8041 ~0)) { 8042 goto cleanup; 8043 } 8044 8045 /* Add entries from _LIST_nv_perf_fbprouter_ctx_regs */ 8046 if (add_ctxsw_buffer_map_entries_subunits(map, 8047 &g->gr.ctx_vars.ctxsw_regs.fbp_router, 8048 &count, &offset, 8049 hwpm_ctxsw_reg_count_max, 0, g->gr.num_fbps, 8050 NV_PERF_PMM_FBP_ROUTER_STRIDE, ~0)) { 8051 goto cleanup; 8052 } 8053 8054 /* Add entries from _LIST_nv_pm_fbpa_ctx_regs */ 8055 if (g->ops.gr.add_ctxsw_reg_pm_fbpa(g, map, 8056 &g->gr.ctx_vars.ctxsw_regs.pm_fbpa, 8057 &count, &offset, 8058 hwpm_ctxsw_reg_count_max, 0, 8059 num_fbpas, fbpa_stride, ~0)) { 8060 goto cleanup; 8061 } 8062 8063 /* Add entries from _LIST_nv_pm_rop_ctx_regs */ 8064 if (add_ctxsw_buffer_map_entries(map, 8065 &g->gr.ctx_vars.ctxsw_regs.pm_rop, 8066 &count, &offset, 8067 hwpm_ctxsw_reg_count_max, 0, ~0)) { 8068 goto cleanup; 8069 } 8070 8071 /* Add entries from _LIST_compressed_nv_pm_ltc_ctx_regs */ 8072 if (add_ctxsw_buffer_map_entries_subunits(map, 8073 &g->gr.ctx_vars.ctxsw_regs.pm_ltc, 8074 &count, &offset, 8075 hwpm_ctxsw_reg_count_max, 0, 8076 num_ltc, ltc_stride, ~0)) { 8077 goto cleanup; 8078 } 8079 8080 offset = ALIGN(offset, 256); 8081 8082 /* Add GPC entries */ 8083 if (add_ctxsw_buffer_map_entries_gpcs(g, map, &count, &offset, 8084 hwpm_ctxsw_reg_count_max)) { 8085 goto cleanup; 8086 } 8087 8088 if (offset > hwpm_ctxsw_buffer_size) { 8089 nvgpu_err(g, "offset > buffer size"); 8090 goto cleanup; 8091 } 8092 8093 sort(map, count, sizeof(*map), map_cmp, NULL); 8094 8095 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map = map; 8096 g->gr.ctx_vars.hwpm_ctxsw_buffer_offset_map_count = count; 8097 8098 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset"); 8099 8100 for (i = 0; i < count; i++) { 8101 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset); 8102 } 8103 8104 return 0; 8105cleanup: 8106 nvgpu_err(g, "Failed to create HWPM buffer offset map"); 8107 nvgpu_big_free(g, map); 8108 return -EINVAL; 8109} 8110 8111/* 8112 * This function will return the 32 bit offset for a priv register if it is 8113 * present in the PM context buffer. 8114 */ 8115static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, 8116 u32 addr, 8117 u32 *priv_offset) 8118{ 8119 struct gr_gk20a *gr = &g->gr; 8120 int err = 0; 8121 u32 count; 8122 struct ctxsw_buf_offset_map_entry *map, *result, map_key; 8123 8124 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 8125 8126 /* Create map of pri address and pm offset if necessary */ 8127 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { 8128 err = gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(g); 8129 if (err != 0) { 8130 return err; 8131 } 8132 } 8133 8134 *priv_offset = 0; 8135 8136 map = gr->ctx_vars.hwpm_ctxsw_buffer_offset_map; 8137 count = gr->ctx_vars.hwpm_ctxsw_buffer_offset_map_count; 8138 8139 map_key.addr = addr; 8140 result = bsearch(&map_key, map, count, sizeof(*map), map_cmp); 8141 8142 if (result) { 8143 *priv_offset = result->offset; 8144 } else { 8145 nvgpu_err(g, "Lookup failed for address 0x%x", addr); 8146 err = -EINVAL; 8147 } 8148 return err; 8149} 8150 8151bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) 8152{ 8153 int curr_gr_ctx; 8154 u32 curr_gr_tsgid; 8155 struct gk20a *g = ch->g; 8156 struct channel_gk20a *curr_ch; 8157 bool ret = false; 8158 struct tsg_gk20a *tsg; 8159 8160 curr_gr_ctx = gk20a_readl(g, gr_fecs_current_ctx_r()); 8161 8162 /* when contexts are unloaded from GR, the valid bit is reset 8163 * but the instance pointer information remains intact. So the 8164 * valid bit must be checked to be absolutely certain that a 8165 * valid context is currently resident. 8166 */ 8167 if (gr_fecs_current_ctx_valid_v(curr_gr_ctx) == 0U) { 8168 return NULL; 8169 } 8170 8171 curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx, 8172 &curr_gr_tsgid); 8173 8174 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 8175 "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" 8176 " ch->chid=%d", 8177 (curr_ch != NULL) ? curr_ch->chid : U32_MAX, 8178 curr_gr_tsgid, 8179 ch->tsgid, 8180 ch->chid); 8181 8182 if (curr_ch == NULL) { 8183 return false; 8184 } 8185 8186 if (ch->chid == curr_ch->chid) { 8187 ret = true; 8188 } 8189 8190 tsg = tsg_gk20a_from_ch(ch); 8191 if ((tsg != NULL) && (tsg->tsgid == curr_gr_tsgid)) { 8192 ret = true; 8193 } 8194 8195 gk20a_channel_put(curr_ch); 8196 return ret; 8197} 8198 8199int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 8200 struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, 8201 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, 8202 bool ch_is_curr_ctx) 8203{ 8204 struct gk20a *g = ch->g; 8205 struct tsg_gk20a *tsg; 8206 struct nvgpu_gr_ctx *gr_ctx; 8207 bool gr_ctx_ready = false; 8208 bool pm_ctx_ready = false; 8209 struct nvgpu_mem *current_mem = NULL; 8210 u32 i, j, offset, v; 8211 struct gr_gk20a *gr = &g->gr; 8212 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 8213 u32 max_offsets = gr->max_gpc_count * gr->max_tpc_per_gpc_count * 8214 sm_per_tpc; 8215 u32 *offsets = NULL; 8216 u32 *offset_addrs = NULL; 8217 u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops}; 8218 int err = 0, pass; 8219 8220 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d", 8221 num_ctx_wr_ops, num_ctx_rd_ops); 8222 8223 tsg = tsg_gk20a_from_ch(ch); 8224 if (tsg == NULL) { 8225 return -EINVAL; 8226 } 8227 8228 gr_ctx = &tsg->gr_ctx; 8229 8230 if (ch_is_curr_ctx) { 8231 for (pass = 0; pass < 2; pass++) { 8232 ctx_op_nr = 0; 8233 for (i = 0; (ctx_op_nr < num_ctx_ops[pass]) && (i < num_ops); ++i) { 8234 /* only do ctx ops and only on the right pass */ 8235 if ((ctx_ops[i].type == REGOP(TYPE_GLOBAL)) || 8236 (((pass == 0) && reg_op_is_read(ctx_ops[i].op)) || 8237 ((pass == 1) && !reg_op_is_read(ctx_ops[i].op)))) { 8238 continue; 8239 } 8240 8241 /* if this is a quad access, setup for special access*/ 8242 if ((ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD)) 8243 && (g->ops.gr.access_smpc_reg != NULL)) { 8244 g->ops.gr.access_smpc_reg(g, 8245 ctx_ops[i].quad, 8246 ctx_ops[i].offset); 8247 } 8248 offset = ctx_ops[i].offset; 8249 8250 if (pass == 0) { /* write pass */ 8251 v = gk20a_readl(g, offset); 8252 v &= ~ctx_ops[i].and_n_mask_lo; 8253 v |= ctx_ops[i].value_lo; 8254 gk20a_writel(g, offset, v); 8255 8256 nvgpu_log(g, gpu_dbg_gpu_dbg, 8257 "direct wr: offset=0x%x v=0x%x", 8258 offset, v); 8259 8260 if (ctx_ops[i].op == REGOP(WRITE_64)) { 8261 v = gk20a_readl(g, offset + 4); 8262 v &= ~ctx_ops[i].and_n_mask_hi; 8263 v |= ctx_ops[i].value_hi; 8264 gk20a_writel(g, offset + 4, v); 8265 8266 nvgpu_log(g, gpu_dbg_gpu_dbg, 8267 "direct wr: offset=0x%x v=0x%x", 8268 offset + 4, v); 8269 } 8270 8271 } else { /* read pass */ 8272 ctx_ops[i].value_lo = 8273 gk20a_readl(g, offset); 8274 8275 nvgpu_log(g, gpu_dbg_gpu_dbg, 8276 "direct rd: offset=0x%x v=0x%x", 8277 offset, ctx_ops[i].value_lo); 8278 8279 if (ctx_ops[i].op == REGOP(READ_64)) { 8280 ctx_ops[i].value_hi = 8281 gk20a_readl(g, offset + 4); 8282 8283 nvgpu_log(g, gpu_dbg_gpu_dbg, 8284 "direct rd: offset=0x%x v=0x%x", 8285 offset, ctx_ops[i].value_lo); 8286 } else { 8287 ctx_ops[i].value_hi = 0; 8288 } 8289 } 8290 ctx_op_nr++; 8291 } 8292 } 8293 goto cleanup; 8294 } 8295 8296 /* they're the same size, so just use one alloc for both */ 8297 offsets = nvgpu_kzalloc(g, 2 * sizeof(u32) * max_offsets); 8298 if (offsets == NULL) { 8299 err = -ENOMEM; 8300 goto cleanup; 8301 } 8302 offset_addrs = offsets + max_offsets; 8303 8304 err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); 8305 if (err != 0) { 8306 goto cleanup; 8307 } 8308 8309 g->ops.mm.l2_flush(g, true); 8310 8311 /* write to appropriate place in context image, 8312 * first have to figure out where that really is */ 8313 8314 /* first pass is writes, second reads */ 8315 for (pass = 0; pass < 2; pass++) { 8316 ctx_op_nr = 0; 8317 for (i = 0; (ctx_op_nr < num_ctx_ops[pass]) && (i < num_ops); ++i) { 8318 u32 num_offsets; 8319 8320 /* only do ctx ops and only on the right pass */ 8321 if ((ctx_ops[i].type == REGOP(TYPE_GLOBAL)) || 8322 (((pass == 0) && reg_op_is_read(ctx_ops[i].op)) || 8323 ((pass == 1) && !reg_op_is_read(ctx_ops[i].op)))) { 8324 continue; 8325 } 8326 8327 err = gr_gk20a_get_ctx_buffer_offsets(g, 8328 ctx_ops[i].offset, 8329 max_offsets, 8330 offsets, offset_addrs, 8331 &num_offsets, 8332 ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD), 8333 ctx_ops[i].quad); 8334 if (err == 0) { 8335 if (!gr_ctx_ready) { 8336 gr_ctx_ready = true; 8337 } 8338 current_mem = &gr_ctx->mem; 8339 } else { 8340 err = gr_gk20a_get_pm_ctx_buffer_offsets(g, 8341 ctx_ops[i].offset, 8342 max_offsets, 8343 offsets, offset_addrs, 8344 &num_offsets); 8345 if (err != 0) { 8346 nvgpu_log(g, gpu_dbg_gpu_dbg, 8347 "ctx op invalid offset: offset=0x%x", 8348 ctx_ops[i].offset); 8349 ctx_ops[i].status = 8350 REGOP(STATUS_INVALID_OFFSET); 8351 continue; 8352 } 8353 if (!pm_ctx_ready) { 8354 /* Make sure ctx buffer was initialized */ 8355 if (!nvgpu_mem_is_valid(&gr_ctx->pm_ctx.mem)) { 8356 nvgpu_err(g, 8357 "Invalid ctx buffer"); 8358 err = -EINVAL; 8359 goto cleanup; 8360 } 8361 pm_ctx_ready = true; 8362 } 8363 current_mem = &gr_ctx->pm_ctx.mem; 8364 } 8365 8366 /* if this is a quad access, setup for special access*/ 8367 if ((ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD)) && 8368 (g->ops.gr.access_smpc_reg != NULL)) { 8369 g->ops.gr.access_smpc_reg(g, ctx_ops[i].quad, 8370 ctx_ops[i].offset); 8371 } 8372 8373 for (j = 0; j < num_offsets; j++) { 8374 /* sanity check gr ctxt offsets, 8375 * don't write outside, worst case 8376 */ 8377 if ((current_mem == &gr_ctx->mem) && 8378 (offsets[j] >= g->gr.ctx_vars.golden_image_size)) { 8379 continue; 8380 } 8381 if (pass == 0) { /* write pass */ 8382 v = nvgpu_mem_rd(g, current_mem, offsets[j]); 8383 v &= ~ctx_ops[i].and_n_mask_lo; 8384 v |= ctx_ops[i].value_lo; 8385 nvgpu_mem_wr(g, current_mem, offsets[j], v); 8386 8387 nvgpu_log(g, gpu_dbg_gpu_dbg, 8388 "context wr: offset=0x%x v=0x%x", 8389 offsets[j], v); 8390 8391 if (ctx_ops[i].op == REGOP(WRITE_64)) { 8392 v = nvgpu_mem_rd(g, current_mem, offsets[j] + 4); 8393 v &= ~ctx_ops[i].and_n_mask_hi; 8394 v |= ctx_ops[i].value_hi; 8395 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v); 8396 8397 nvgpu_log(g, gpu_dbg_gpu_dbg, 8398 "context wr: offset=0x%x v=0x%x", 8399 offsets[j] + 4, v); 8400 } 8401 8402 /* check to see if we need to add a special WAR 8403 for some of the SMPC perf regs */ 8404 gr_gk20a_ctx_patch_smpc(g, ch, offset_addrs[j], 8405 v, current_mem); 8406 8407 } else { /* read pass */ 8408 ctx_ops[i].value_lo = 8409 nvgpu_mem_rd(g, current_mem, offsets[0]); 8410 8411 nvgpu_log(g, gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", 8412 offsets[0], ctx_ops[i].value_lo); 8413 8414 if (ctx_ops[i].op == REGOP(READ_64)) { 8415 ctx_ops[i].value_hi = 8416 nvgpu_mem_rd(g, current_mem, offsets[0] + 4); 8417 8418 nvgpu_log(g, gpu_dbg_gpu_dbg, 8419 "context rd: offset=0x%x v=0x%x", 8420 offsets[0] + 4, ctx_ops[i].value_hi); 8421 } else { 8422 ctx_ops[i].value_hi = 0; 8423 } 8424 } 8425 } 8426 ctx_op_nr++; 8427 } 8428 } 8429 8430 cleanup: 8431 if (offsets) { 8432 nvgpu_kfree(g, offsets); 8433 } 8434 8435 if (gr_ctx->patch_ctx.mem.cpu_va) { 8436 gr_gk20a_ctx_patch_write_end(g, gr_ctx, gr_ctx_ready); 8437 } 8438 8439 return err; 8440} 8441 8442int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 8443 struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, 8444 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, 8445 bool *is_curr_ctx) 8446{ 8447 struct gk20a *g = ch->g; 8448 int err, tmp_err; 8449 bool ch_is_curr_ctx; 8450 8451 /* disable channel switching. 8452 * at that point the hardware state can be inspected to 8453 * determine if the context we're interested in is current. 8454 */ 8455 err = gr_gk20a_disable_ctxsw(g); 8456 if (err != 0) { 8457 nvgpu_err(g, "unable to stop gr ctxsw"); 8458 /* this should probably be ctx-fatal... */ 8459 return err; 8460 } 8461 8462 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch); 8463 if (is_curr_ctx != NULL) { 8464 *is_curr_ctx = ch_is_curr_ctx; 8465 } 8466 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", 8467 ch_is_curr_ctx); 8468 8469 err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, 8470 num_ctx_rd_ops, ch_is_curr_ctx); 8471 8472 tmp_err = gr_gk20a_enable_ctxsw(g); 8473 if (tmp_err) { 8474 nvgpu_err(g, "unable to restart ctxsw!"); 8475 err = tmp_err; 8476 } 8477 8478 return err; 8479} 8480 8481void gr_gk20a_commit_global_pagepool(struct gk20a *g, 8482 struct nvgpu_gr_ctx *gr_ctx, 8483 u64 addr, u32 size, bool patch) 8484{ 8485 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_base_r(), 8486 gr_scc_pagepool_base_addr_39_8_f(addr), patch); 8487 8488 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_r(), 8489 gr_scc_pagepool_total_pages_f(size) | 8490 gr_scc_pagepool_valid_true_f(), patch); 8491 8492 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_base_r(), 8493 gr_gpcs_gcc_pagepool_base_addr_39_8_f(addr), patch); 8494 8495 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_r(), 8496 gr_gpcs_gcc_pagepool_total_pages_f(size), patch); 8497 8498 gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_pagepool_r(), 8499 gr_pd_pagepool_total_pages_f(size) | 8500 gr_pd_pagepool_valid_true_f(), patch); 8501} 8502 8503void gk20a_init_gr(struct gk20a *g) 8504{ 8505 nvgpu_cond_init(&g->gr.init_wq); 8506} 8507 8508int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 8509 u32 global_esr_mask, bool check_errors) 8510{ 8511 bool locked_down; 8512 bool no_error_pending; 8513 u32 delay = GR_IDLE_CHECK_DEFAULT; 8514 bool mmu_debug_mode_enabled = g->ops.fb.is_debug_mode_enabled(g); 8515 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 8516 u32 dbgr_status0 = 0, dbgr_control0 = 0; 8517 u64 warps_valid = 0, warps_paused = 0, warps_trapped = 0; 8518 struct nvgpu_timeout timeout; 8519 u32 warp_esr; 8520 8521 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 8522 "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); 8523 8524 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 8525 NVGPU_TIMER_CPU_TIMER); 8526 8527 /* wait for the sm to lock down */ 8528 do { 8529 u32 global_esr = g->ops.gr.get_sm_hww_global_esr(g, 8530 gpc, tpc, sm); 8531 dbgr_status0 = gk20a_readl(g, 8532 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset); 8533 8534 warp_esr = g->ops.gr.get_sm_hww_warp_esr(g, gpc, tpc, sm); 8535 8536 locked_down = 8537 (gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(dbgr_status0) == 8538 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v()); 8539 no_error_pending = 8540 check_errors && 8541 (gr_gpc0_tpc0_sm_hww_warp_esr_error_v(warp_esr) == 8542 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v()) && 8543 ((global_esr & ~global_esr_mask) == 0); 8544 8545 if (locked_down || no_error_pending) { 8546 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 8547 "GPC%d TPC%d SM%d: locked down SM", 8548 gpc, tpc, sm); 8549 return 0; 8550 } 8551 8552 /* if an mmu fault is pending and mmu debug mode is not 8553 * enabled, the sm will never lock down. */ 8554 if (!mmu_debug_mode_enabled && 8555 (g->ops.mm.mmu_fault_pending(g))) { 8556 nvgpu_err(g, 8557 "GPC%d TPC%d: mmu fault pending," 8558 " SM%d will never lock down!", gpc, tpc, sm); 8559 return -EFAULT; 8560 } 8561 8562 nvgpu_usleep_range(delay, delay * 2); 8563 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 8564 } while (nvgpu_timeout_expired(&timeout) == 0); 8565 8566 dbgr_control0 = gk20a_readl(g, 8567 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 8568 8569 /* 64 bit read */ 8570 warps_valid = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_1_r() + offset) << 32; 8571 warps_valid |= gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_r() + offset); 8572 8573 /* 64 bit read */ 8574 warps_paused = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_1_r() + offset) << 32; 8575 warps_paused |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r() + offset); 8576 8577 /* 64 bit read */ 8578 warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r() + offset) << 32; 8579 warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + offset); 8580 8581 nvgpu_err(g, 8582 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); 8583 nvgpu_err(g, 8584 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx", 8585 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, 8586 warps_valid, warps_paused, warps_trapped); 8587 8588 return -ETIMEDOUT; 8589} 8590 8591void gk20a_gr_suspend_single_sm(struct gk20a *g, 8592 u32 gpc, u32 tpc, u32 sm, 8593 u32 global_esr_mask, bool check_errors) 8594{ 8595 int err; 8596 u32 dbgr_control0; 8597 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 8598 8599 /* if an SM debugger isn't attached, skip suspend */ 8600 if (!g->ops.gr.sm_debugger_attached(g)) { 8601 nvgpu_err(g, 8602 "SM debugger not attached, skipping suspend!"); 8603 return; 8604 } 8605 8606 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, 8607 "suspending gpc:%d, tpc:%d, sm%d", gpc, tpc, sm); 8608 8609 /* assert stop trigger. */ 8610 dbgr_control0 = gk20a_readl(g, 8611 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 8612 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(); 8613 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, 8614 dbgr_control0); 8615 8616 err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, 8617 global_esr_mask, check_errors); 8618 if (err != 0) { 8619 nvgpu_err(g, 8620 "SuspendSm failed"); 8621 return; 8622 } 8623} 8624 8625void gk20a_gr_suspend_all_sms(struct gk20a *g, 8626 u32 global_esr_mask, bool check_errors) 8627{ 8628 struct gr_gk20a *gr = &g->gr; 8629 u32 gpc, tpc, sm; 8630 int err; 8631 u32 dbgr_control0; 8632 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 8633 8634 /* if an SM debugger isn't attached, skip suspend */ 8635 if (!g->ops.gr.sm_debugger_attached(g)) { 8636 nvgpu_err(g, 8637 "SM debugger not attached, skipping suspend!"); 8638 return; 8639 } 8640 8641 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "suspending all sms"); 8642 /* assert stop trigger. uniformity assumption: all SMs will have 8643 * the same state in dbg_control0. 8644 */ 8645 dbgr_control0 = 8646 gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r()); 8647 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(); 8648 8649 /* broadcast write */ 8650 gk20a_writel(g, 8651 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); 8652 8653 for (gpc = 0; gpc < gr->gpc_count; gpc++) { 8654 for (tpc = 0; tpc < gr_gk20a_get_tpc_count(gr, gpc); tpc++) { 8655 for (sm = 0; sm < sm_per_tpc; sm++) { 8656 err = g->ops.gr.wait_for_sm_lock_down(g, 8657 gpc, tpc, sm, 8658 global_esr_mask, check_errors); 8659 if (err != 0) { 8660 nvgpu_err(g, "SuspendAllSms failed"); 8661 return; 8662 } 8663 } 8664 } 8665 } 8666} 8667 8668void gk20a_gr_resume_single_sm(struct gk20a *g, 8669 u32 gpc, u32 tpc, u32 sm) 8670{ 8671 u32 dbgr_control0; 8672 u32 offset; 8673 /* 8674 * The following requires some clarification. Despite the fact that both 8675 * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their 8676 * names, only one is actually a trigger, and that is the STOP_TRIGGER. 8677 * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to 8678 * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0 8679 * (_DISABLE) as well. 8680 8681 * Advice from the arch group: Disable the stop trigger first, as a 8682 * separate operation, in order to ensure that the trigger has taken 8683 * effect, before enabling the run trigger. 8684 */ 8685 8686 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 8687 8688 /*De-assert stop trigger */ 8689 dbgr_control0 = 8690 gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 8691 dbgr_control0 = set_field(dbgr_control0, 8692 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_m(), 8693 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_disable_f()); 8694 gk20a_writel(g, 8695 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); 8696 8697 /* Run trigger */ 8698 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_task_f(); 8699 gk20a_writel(g, 8700 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); 8701} 8702 8703void gk20a_gr_resume_all_sms(struct gk20a *g) 8704{ 8705 u32 dbgr_control0; 8706 /* 8707 * The following requires some clarification. Despite the fact that both 8708 * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their 8709 * names, only one is actually a trigger, and that is the STOP_TRIGGER. 8710 * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to 8711 * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0 8712 * (_DISABLE) as well. 8713 8714 * Advice from the arch group: Disable the stop trigger first, as a 8715 * separate operation, in order to ensure that the trigger has taken 8716 * effect, before enabling the run trigger. 8717 */ 8718 8719 /*De-assert stop trigger */ 8720 dbgr_control0 = 8721 gk20a_readl(g, gr_gpcs_tpcs_sm_dbgr_control0_r()); 8722 dbgr_control0 &= ~gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(); 8723 gk20a_writel(g, 8724 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); 8725 8726 /* Run trigger */ 8727 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_task_f(); 8728 gk20a_writel(g, 8729 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); 8730} 8731 8732int gr_gk20a_set_sm_debug_mode(struct gk20a *g, 8733 struct channel_gk20a *ch, u64 sms, bool enable) 8734{ 8735 struct nvgpu_dbg_reg_op *ops; 8736 unsigned int i = 0, sm_id; 8737 int err; 8738 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 8739 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 8740 8741 ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops)); 8742 if (ops == NULL) { 8743 return -ENOMEM; 8744 } 8745 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { 8746 int gpc, tpc; 8747 u32 tpc_offset, gpc_offset, reg_offset, reg_mask, reg_val; 8748 8749 if ((sms & BIT64(sm_id)) == 0ULL) { 8750 continue; 8751 } 8752 8753 gpc = g->gr.sm_to_cluster[sm_id].gpc_index; 8754 tpc = g->gr.sm_to_cluster[sm_id].tpc_index; 8755 8756 tpc_offset = tpc_in_gpc_stride * tpc; 8757 gpc_offset = gpc_stride * gpc; 8758 reg_offset = tpc_offset + gpc_offset; 8759 8760 ops[i].op = REGOP(WRITE_32); 8761 ops[i].type = REGOP(TYPE_GR_CTX); 8762 ops[i].offset = gr_gpc0_tpc0_sm_dbgr_control0_r() + reg_offset; 8763 8764 reg_mask = 0; 8765 reg_val = 0; 8766 if (enable) { 8767 reg_mask |= gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_m(); 8768 reg_val |= gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_f(); 8769 reg_mask |= gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_m(); 8770 reg_val |= gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_disable_f(); 8771 reg_mask |= gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_m(); 8772 reg_val |= gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_disable_f(); 8773 } else { 8774 reg_mask |= gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_m(); 8775 reg_val |= gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_f(); 8776 } 8777 8778 ops[i].and_n_mask_lo = reg_mask; 8779 ops[i].value_lo = reg_val; 8780 i++; 8781 } 8782 8783 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0, NULL); 8784 if (err != 0) { 8785 nvgpu_err(g, "Failed to access register"); 8786 } 8787 nvgpu_kfree(g, ops); 8788 return err; 8789} 8790 8791/* 8792 * gr_gk20a_suspend_context() 8793 * This API should be called with dbg_session lock held 8794 * and ctxsw disabled 8795 * Returns bool value indicating if context was resident 8796 * or not 8797 */ 8798bool gr_gk20a_suspend_context(struct channel_gk20a *ch) 8799{ 8800 struct gk20a *g = ch->g; 8801 bool ctx_resident = false; 8802 8803 if (gk20a_is_channel_ctx_resident(ch)) { 8804 g->ops.gr.suspend_all_sms(g, 0, false); 8805 ctx_resident = true; 8806 } else { 8807 gk20a_disable_channel_tsg(g, ch); 8808 } 8809 8810 return ctx_resident; 8811} 8812 8813bool gr_gk20a_resume_context(struct channel_gk20a *ch) 8814{ 8815 struct gk20a *g = ch->g; 8816 bool ctx_resident = false; 8817 8818 if (gk20a_is_channel_ctx_resident(ch)) { 8819 g->ops.gr.resume_all_sms(g); 8820 ctx_resident = true; 8821 } else { 8822 gk20a_enable_channel_tsg(g, ch); 8823 } 8824 8825 return ctx_resident; 8826} 8827 8828int gr_gk20a_suspend_contexts(struct gk20a *g, 8829 struct dbg_session_gk20a *dbg_s, 8830 int *ctx_resident_ch_fd) 8831{ 8832 int local_ctx_resident_ch_fd = -1; 8833 bool ctx_resident; 8834 struct channel_gk20a *ch; 8835 struct dbg_session_channel_data *ch_data; 8836 int err = 0; 8837 8838 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 8839 8840 err = gr_gk20a_disable_ctxsw(g); 8841 if (err != 0) { 8842 nvgpu_err(g, "unable to stop gr ctxsw"); 8843 goto clean_up; 8844 } 8845 8846 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 8847 8848 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, 8849 dbg_session_channel_data, ch_entry) { 8850 ch = g->fifo.channel + ch_data->chid; 8851 8852 ctx_resident = gr_gk20a_suspend_context(ch); 8853 if (ctx_resident) { 8854 local_ctx_resident_ch_fd = ch_data->channel_fd; 8855 } 8856 } 8857 8858 nvgpu_mutex_release(&dbg_s->ch_list_lock); 8859 8860 err = gr_gk20a_enable_ctxsw(g); 8861 if (err != 0) { 8862 nvgpu_err(g, "unable to restart ctxsw!"); 8863 } 8864 8865 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 8866 8867clean_up: 8868 nvgpu_mutex_release(&g->dbg_sessions_lock); 8869 8870 return err; 8871} 8872 8873int gr_gk20a_resume_contexts(struct gk20a *g, 8874 struct dbg_session_gk20a *dbg_s, 8875 int *ctx_resident_ch_fd) 8876{ 8877 int local_ctx_resident_ch_fd = -1; 8878 bool ctx_resident; 8879 struct channel_gk20a *ch; 8880 int err = 0; 8881 struct dbg_session_channel_data *ch_data; 8882 8883 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 8884 8885 err = gr_gk20a_disable_ctxsw(g); 8886 if (err != 0) { 8887 nvgpu_err(g, "unable to stop gr ctxsw"); 8888 goto clean_up; 8889 } 8890 8891 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, 8892 dbg_session_channel_data, ch_entry) { 8893 ch = g->fifo.channel + ch_data->chid; 8894 8895 ctx_resident = gr_gk20a_resume_context(ch); 8896 if (ctx_resident) { 8897 local_ctx_resident_ch_fd = ch_data->channel_fd; 8898 } 8899 } 8900 8901 err = gr_gk20a_enable_ctxsw(g); 8902 if (err != 0) { 8903 nvgpu_err(g, "unable to restart ctxsw!"); 8904 } 8905 8906 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 8907 8908clean_up: 8909 nvgpu_mutex_release(&g->dbg_sessions_lock); 8910 8911 return err; 8912} 8913 8914int gr_gk20a_trigger_suspend(struct gk20a *g) 8915{ 8916 int err = 0; 8917 u32 dbgr_control0; 8918 8919 /* assert stop trigger. uniformity assumption: all SMs will have 8920 * the same state in dbg_control0. */ 8921 dbgr_control0 = 8922 gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r()); 8923 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(); 8924 8925 /* broadcast write */ 8926 gk20a_writel(g, 8927 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); 8928 8929 return err; 8930} 8931 8932int gr_gk20a_wait_for_pause(struct gk20a *g, struct nvgpu_warpstate *w_state) 8933{ 8934 int err = 0; 8935 struct gr_gk20a *gr = &g->gr; 8936 u32 gpc, tpc, sm, sm_id; 8937 u32 global_mask; 8938 8939 if (!g->ops.gr.get_sm_no_lock_down_hww_global_esr_mask || 8940 !g->ops.gr.lock_down_sm || !g->ops.gr.bpt_reg_info) 8941 return -EINVAL; 8942 8943 /* Wait for the SMs to reach full stop. This condition is: 8944 * 1) All SMs with valid warps must be in the trap handler (SM_IN_TRAP_MODE) 8945 * 2) All SMs in the trap handler must have equivalent VALID and PAUSED warp 8946 * masks. 8947 */ 8948 global_mask = g->ops.gr.get_sm_no_lock_down_hww_global_esr_mask(g); 8949 8950 /* Lock down all SMs */ 8951 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 8952 8953 gpc = g->gr.sm_to_cluster[sm_id].gpc_index; 8954 tpc = g->gr.sm_to_cluster[sm_id].tpc_index; 8955 sm = g->gr.sm_to_cluster[sm_id].sm_index; 8956 8957 err = g->ops.gr.lock_down_sm(g, gpc, tpc, sm, 8958 global_mask, false); 8959 if (err != 0) { 8960 nvgpu_err(g, "sm did not lock down!"); 8961 return err; 8962 } 8963 } 8964 8965 /* Read the warp status */ 8966 g->ops.gr.bpt_reg_info(g, w_state); 8967 8968 return 0; 8969} 8970 8971int gr_gk20a_resume_from_pause(struct gk20a *g) 8972{ 8973 int err = 0; 8974 u32 reg_val; 8975 8976 /* Clear the pause mask to tell the GPU we want to resume everyone */ 8977 gk20a_writel(g, 8978 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_r(), 0); 8979 8980 /* explicitly re-enable forwarding of SM interrupts upon any resume */ 8981 reg_val = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r()); 8982 reg_val |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(); 8983 gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), reg_val); 8984 8985 /* Now resume all sms, write a 0 to the stop trigger 8986 * then a 1 to the run trigger */ 8987 g->ops.gr.resume_all_sms(g); 8988 8989 return err; 8990} 8991 8992int gr_gk20a_clear_sm_errors(struct gk20a *g) 8993{ 8994 int ret = 0; 8995 u32 gpc, tpc, sm; 8996 struct gr_gk20a *gr = &g->gr; 8997 u32 global_esr; 8998 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); 8999 9000 if (!g->ops.gr.get_sm_hww_global_esr || !g->ops.gr.clear_sm_hww) 9001 return -EINVAL; 9002 9003 for (gpc = 0; gpc < gr->gpc_count; gpc++) { 9004 9005 /* check if any tpc has an exception */ 9006 for (tpc = 0; tpc < gr->gpc_tpc_count[gpc]; tpc++) { 9007 9008 for (sm = 0; sm < sm_per_tpc; sm++) { 9009 global_esr = g->ops.gr.get_sm_hww_global_esr(g, 9010 gpc, tpc, sm); 9011 9012 /* clearing hwws, also causes tpc and gpc 9013 * exceptions to be cleared 9014 */ 9015 g->ops.gr.clear_sm_hww(g, 9016 gpc, tpc, sm, global_esr); 9017 } 9018 } 9019 } 9020 9021 return ret; 9022} 9023 9024u32 gr_gk20a_tpc_enabled_exceptions(struct gk20a *g) 9025{ 9026 struct gr_gk20a *gr = &g->gr; 9027 u32 sm_id, tpc_exception_en = 0; 9028 u32 offset, regval, tpc_offset, gpc_offset; 9029 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 9030 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 9031 9032 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 9033 9034 tpc_offset = tpc_in_gpc_stride * g->gr.sm_to_cluster[sm_id].tpc_index; 9035 gpc_offset = gpc_stride * g->gr.sm_to_cluster[sm_id].gpc_index; 9036 offset = tpc_offset + gpc_offset; 9037 9038 regval = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + 9039 offset); 9040 /* Each bit represents corresponding enablement state, bit 0 corrsponds to SM0 */ 9041 tpc_exception_en |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) << sm_id; 9042 } 9043 9044 return tpc_exception_en; 9045} 9046 9047u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm) 9048{ 9049 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 9050 u32 hww_warp_esr = gk20a_readl(g, 9051 gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset); 9052 return hww_warp_esr; 9053} 9054 9055u32 gk20a_gr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm) 9056{ 9057 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); 9058 9059 u32 hww_global_esr = gk20a_readl(g, 9060 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset); 9061 9062 return hww_global_esr; 9063} 9064 9065u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g) 9066{ 9067 /* 9068 * These three interrupts don't require locking down the SM. They can 9069 * be handled by usermode clients as they aren't fatal. Additionally, 9070 * usermode clients may wish to allow some warps to execute while others 9071 * are at breakpoints, as opposed to fatal errors where all warps should 9072 * halt. 9073 */ 9074 u32 global_esr_mask = 9075 gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f() | 9076 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f() | 9077 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(); 9078 9079 return global_esr_mask; 9080} 9081 9082/* invalidate channel lookup tlb */ 9083void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr) 9084{ 9085 nvgpu_spinlock_acquire(&gr->ch_tlb_lock); 9086 memset(gr->chid_tlb, 0, 9087 sizeof(struct gr_channel_map_tlb_entry) * 9088 GR_CHANNEL_MAP_TLB_SIZE); 9089 nvgpu_spinlock_release(&gr->ch_tlb_lock); 9090}
diff --git a/include/gk20a/gr_gk20a.h b/include/gk20a/gr_gk20a.h
deleted file mode 100644
index 2cd6a4f..0000000
--- a/include/gk20a/gr_gk20a.h
+++ /dev/null
@@ -1,852 +0,0 @@ 1/* 2 * GK20A Graphics Engine 3 * 4 * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef GR_GK20A_H 25#define GR_GK20A_H 26 27#include <nvgpu/types.h> 28 29#include "gr_ctx_gk20a.h" 30#include "mm_gk20a.h" 31#include <nvgpu/power_features/pg.h> 32 33#include <nvgpu/comptags.h> 34#include <nvgpu/cond.h> 35 36#define GR_IDLE_CHECK_DEFAULT 10 /* usec */ 37#define GR_IDLE_CHECK_MAX 200 /* usec */ 38#define GR_FECS_POLL_INTERVAL 5 /* usec */ 39 40#define INVALID_SCREEN_TILE_ROW_OFFSET 0xFFFFFFFF 41#define INVALID_MAX_WAYS 0xFFFFFFFF 42 43#define GK20A_FECS_UCODE_IMAGE "fecs.bin" 44#define GK20A_GPCCS_UCODE_IMAGE "gpccs.bin" 45 46#define GK20A_GR_MAX_PES_PER_GPC 3 47 48#define GK20A_TIMEOUT_FPGA 100000 /* 100 sec */ 49 50/* Flags to be passed to g->ops.gr.alloc_obj_ctx() */ 51#define NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP (1 << 1) 52#define NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP (1 << 2) 53 54/* 55 * allocate a minimum of 1 page (4KB) worth of patch space, this is 512 entries 56 * of address and data pairs 57 */ 58#define PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY 2 59#define PATCH_CTX_SLOTS_PER_PAGE \ 60 (PAGE_SIZE/(PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY * sizeof(u32))) 61#define PATCH_CTX_ENTRIES_FROM_SIZE(size) (size/sizeof(u32)) 62 63#define NVGPU_PREEMPTION_MODE_GRAPHICS_WFI (1 << 0) 64#define NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP (1 << 1) 65 66#define NVGPU_PREEMPTION_MODE_COMPUTE_WFI (1 << 0) 67#define NVGPU_PREEMPTION_MODE_COMPUTE_CTA (1 << 1) 68#define NVGPU_PREEMPTION_MODE_COMPUTE_CILP (1 << 2) 69 70#define CTXSW_INTR0 BIT32(0) 71#define CTXSW_INTR1 BIT32(1) 72 73#define MAILBOX_VALUE_TIMESTAMP_BUFFER_FULL 0x26 74 75struct tsg_gk20a; 76struct channel_gk20a; 77struct nvgpu_warpstate; 78 79enum ctxsw_addr_type; 80 81enum /* global_ctx_buffer */ { 82 CIRCULAR = 0, 83 PAGEPOOL = 1, 84 ATTRIBUTE = 2, 85 CIRCULAR_VPR = 3, 86 PAGEPOOL_VPR = 4, 87 ATTRIBUTE_VPR = 5, 88 GOLDEN_CTX = 6, 89 PRIV_ACCESS_MAP = 7, 90 /* #8 is reserved */ 91 FECS_TRACE_BUFFER = 9, 92 NR_GLOBAL_CTX_BUF = 10 93}; 94 95/* either ATTRIBUTE or ATTRIBUTE_VPR maps to ATTRIBUTE_VA */ 96enum /*global_ctx_buffer_va */ { 97 CIRCULAR_VA = 0, 98 PAGEPOOL_VA = 1, 99 ATTRIBUTE_VA = 2, 100 GOLDEN_CTX_VA = 3, 101 PRIV_ACCESS_MAP_VA = 4, 102 /* #5 is reserved */ 103 FECS_TRACE_BUFFER_VA = 6, 104 NR_GLOBAL_CTX_BUF_VA = 7 105}; 106 107enum { 108 WAIT_UCODE_LOOP, 109 WAIT_UCODE_TIMEOUT, 110 WAIT_UCODE_ERROR, 111 WAIT_UCODE_OK 112}; 113 114enum { 115 GR_IS_UCODE_OP_EQUAL, 116 GR_IS_UCODE_OP_NOT_EQUAL, 117 GR_IS_UCODE_OP_AND, 118 GR_IS_UCODE_OP_LESSER, 119 GR_IS_UCODE_OP_LESSER_EQUAL, 120 GR_IS_UCODE_OP_SKIP 121}; 122 123enum { 124 eUcodeHandshakeInitComplete = 1, 125 eUcodeHandshakeMethodFinished 126}; 127 128enum { 129 ELCG_MODE = (1 << 0), 130 BLCG_MODE = (1 << 1), 131 INVALID_MODE = (1 << 2) 132}; 133 134enum { 135 NVGPU_EVENT_ID_BPT_INT = 0, 136 NVGPU_EVENT_ID_BPT_PAUSE, 137 NVGPU_EVENT_ID_BLOCKING_SYNC, 138 NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED, 139 NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE, 140 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN, 141 NVGPU_EVENT_ID_MAX, 142}; 143 144#ifndef GR_GO_IDLE_BUNDLE 145#define GR_GO_IDLE_BUNDLE 0x0000e100 /* --V-B */ 146#endif 147 148struct gr_channel_map_tlb_entry { 149 u32 curr_ctx; 150 u32 chid; 151 u32 tsgid; 152}; 153 154struct gr_zcull_gk20a { 155 u32 aliquot_width; 156 u32 aliquot_height; 157 u32 aliquot_size; 158 u32 total_aliquots; 159 160 u32 width_align_pixels; 161 u32 height_align_pixels; 162 u32 pixel_squares_by_aliquots; 163}; 164 165struct gr_zcull_info { 166 u32 width_align_pixels; 167 u32 height_align_pixels; 168 u32 pixel_squares_by_aliquots; 169 u32 aliquot_total; 170 u32 region_byte_multiplier; 171 u32 region_header_size; 172 u32 subregion_header_size; 173 u32 subregion_width_align_pixels; 174 u32 subregion_height_align_pixels; 175 u32 subregion_count; 176}; 177 178#define GK20A_ZBC_COLOR_VALUE_SIZE 4 /* RGBA */ 179 180#define GK20A_STARTOF_ZBC_TABLE 1U /* index zero reserved to indicate "not ZBCd" */ 181#define GK20A_SIZEOF_ZBC_TABLE 16 /* match ltcs_ltss_dstg_zbc_index_address width (4) */ 182#define GK20A_ZBC_TABLE_SIZE (16 - 1) 183 184#define GK20A_ZBC_TYPE_INVALID 0 185#define GK20A_ZBC_TYPE_COLOR 1 186#define GK20A_ZBC_TYPE_DEPTH 2 187#define T19X_ZBC 3 188 189struct zbc_color_table { 190 u32 color_ds[GK20A_ZBC_COLOR_VALUE_SIZE]; 191 u32 color_l2[GK20A_ZBC_COLOR_VALUE_SIZE]; 192 u32 format; 193 u32 ref_cnt; 194}; 195 196struct zbc_depth_table { 197 u32 depth; 198 u32 format; 199 u32 ref_cnt; 200}; 201 202struct zbc_s_table { 203 u32 stencil; 204 u32 format; 205 u32 ref_cnt; 206}; 207 208struct zbc_entry { 209 u32 color_ds[GK20A_ZBC_COLOR_VALUE_SIZE]; 210 u32 color_l2[GK20A_ZBC_COLOR_VALUE_SIZE]; 211 u32 depth; 212 u32 type; /* color or depth */ 213 u32 format; 214}; 215 216struct zbc_query_params { 217 u32 color_ds[GK20A_ZBC_COLOR_VALUE_SIZE]; 218 u32 color_l2[GK20A_ZBC_COLOR_VALUE_SIZE]; 219 u32 depth; 220 u32 ref_cnt; 221 u32 format; 222 u32 type; /* color or depth */ 223 u32 index_size; /* [out] size, [in] index */ 224}; 225 226struct sm_info { 227 u32 gpc_index; 228 u32 tpc_index; 229 u32 sm_index; 230 u32 global_tpc_index; 231}; 232 233#if defined(CONFIG_GK20A_CYCLE_STATS) 234struct gk20a_cs_snapshot_client; 235struct gk20a_cs_snapshot; 236#endif 237 238struct gr_gk20a_isr_data { 239 u32 addr; 240 u32 data_lo; 241 u32 data_hi; 242 u32 curr_ctx; 243 struct channel_gk20a *ch; 244 u32 offset; 245 u32 sub_chan; 246 u32 class_num; 247}; 248 249struct gr_ctx_buffer_desc { 250 void (*destroy)(struct gk20a *, struct gr_ctx_buffer_desc *); 251 struct nvgpu_mem mem; 252 void *priv; 253}; 254 255struct nvgpu_preemption_modes_rec { 256 u32 graphics_preemption_mode_flags; /* supported preemption modes */ 257 u32 compute_preemption_mode_flags; /* supported preemption modes */ 258 259 u32 default_graphics_preempt_mode; /* default mode */ 260 u32 default_compute_preempt_mode; /* default mode */ 261}; 262 263struct gr_gk20a { 264 struct gk20a *g; 265 struct { 266 bool dynamic; 267 268 u32 buffer_size; 269 u32 buffer_total_size; 270 271 bool golden_image_initialized; 272 u32 golden_image_size; 273 u32 *local_golden_image; 274 275 u32 hwpm_ctxsw_buffer_offset_map_count; 276 struct ctxsw_buf_offset_map_entry *hwpm_ctxsw_buffer_offset_map; 277 278 u32 zcull_ctxsw_image_size; 279 280 u32 pm_ctxsw_image_size; 281 282 u32 buffer_header_size; 283 284 u32 priv_access_map_size; 285 286 u32 fecs_trace_buffer_size; 287 288 struct gr_ucode_gk20a ucode; 289 290 struct av_list_gk20a sw_bundle_init; 291 struct av_list_gk20a sw_method_init; 292 struct aiv_list_gk20a sw_ctx_load; 293 struct av_list_gk20a sw_non_ctx_load; 294 struct av_list_gk20a sw_veid_bundle_init; 295 struct av64_list_gk20a sw_bundle64_init; 296 struct { 297 struct aiv_list_gk20a sys; 298 struct aiv_list_gk20a gpc; 299 struct aiv_list_gk20a tpc; 300 struct aiv_list_gk20a zcull_gpc; 301 struct aiv_list_gk20a ppc; 302 struct aiv_list_gk20a pm_sys; 303 struct aiv_list_gk20a pm_gpc; 304 struct aiv_list_gk20a pm_tpc; 305 struct aiv_list_gk20a pm_ppc; 306 struct aiv_list_gk20a perf_sys; 307 struct aiv_list_gk20a perf_gpc; 308 struct aiv_list_gk20a fbp; 309 struct aiv_list_gk20a fbp_router; 310 struct aiv_list_gk20a gpc_router; 311 struct aiv_list_gk20a pm_ltc; 312 struct aiv_list_gk20a pm_fbpa; 313 struct aiv_list_gk20a perf_sys_router; 314 struct aiv_list_gk20a perf_pma; 315 struct aiv_list_gk20a pm_rop; 316 struct aiv_list_gk20a pm_ucgpc; 317 struct aiv_list_gk20a etpc; 318 struct aiv_list_gk20a pm_cau; 319 } ctxsw_regs; 320 u32 regs_base_index; 321 bool valid; 322 323 u32 preempt_image_size; 324 bool force_preemption_gfxp; 325 bool force_preemption_cilp; 326 bool dump_ctxsw_stats_on_channel_close; 327 } ctx_vars; 328 329 struct nvgpu_mutex ctx_mutex; /* protect golden ctx init */ 330 struct nvgpu_mutex fecs_mutex; /* protect fecs method */ 331 332#define GR_NETLIST_DYNAMIC -1 333#define GR_NETLIST_STATIC_A 'A' 334 int netlist; 335 336 struct nvgpu_cond init_wq; 337 int initialized; 338 339 u32 num_fbps; 340 341 u32 max_comptag_lines; 342 u32 compbit_backing_size; 343 u32 comptags_per_cacheline; 344 u32 slices_per_ltc; 345 u32 cacheline_size; 346 u32 gobs_per_comptagline_per_slice; 347 348 u32 max_gpc_count; 349 u32 max_fbps_count; 350 u32 max_tpc_per_gpc_count; 351 u32 max_zcull_per_gpc_count; 352 u32 max_tpc_count; 353 354 u32 sys_count; 355 u32 gpc_count; 356 u32 pe_count_per_gpc; 357 u32 ppc_count; 358 u32 *gpc_ppc_count; 359 u32 tpc_count; 360 u32 *gpc_tpc_count; 361 u32 *gpc_tpc_mask; 362 u32 zcb_count; 363 u32 *gpc_zcb_count; 364 u32 *pes_tpc_count[GK20A_GR_MAX_PES_PER_GPC]; 365 u32 *pes_tpc_mask[GK20A_GR_MAX_PES_PER_GPC]; 366 u32 *gpc_skip_mask; 367 368 u32 bundle_cb_default_size; 369 u32 min_gpm_fifo_depth; 370 u32 bundle_cb_token_limit; 371 u32 attrib_cb_default_size; 372 u32 attrib_cb_size; 373 u32 attrib_cb_gfxp_default_size; 374 u32 attrib_cb_gfxp_size; 375 u32 alpha_cb_default_size; 376 u32 alpha_cb_size; 377 u32 timeslice_mode; 378 u32 czf_bypass; 379 u32 pd_max_batches; 380 u32 gfxp_wfi_timeout_count; 381 u32 gfxp_wfi_timeout_unit; 382 383 /* 384 * The deductible memory size for max_comptag_mem (in MBytes) 385 * Usually close to memory size that running system is taking 386 */ 387 u32 comptag_mem_deduct; 388 389 struct gr_ctx_buffer_desc global_ctx_buffer[NR_GLOBAL_CTX_BUF]; 390 391 u8 *map_tiles; 392 u32 map_tile_count; 393 u32 map_row_offset; 394 395 u32 max_comptag_mem; /* max memory size (MB) for comptag */ 396 struct compbit_store_desc compbit_store; 397 struct gk20a_comptag_allocator comp_tags; 398 399 struct gr_zcull_gk20a zcull; 400 401 struct nvgpu_mutex zbc_lock; 402 struct zbc_color_table zbc_col_tbl[GK20A_ZBC_TABLE_SIZE]; 403 struct zbc_depth_table zbc_dep_tbl[GK20A_ZBC_TABLE_SIZE]; 404 struct zbc_s_table zbc_s_tbl[GK20A_ZBC_TABLE_SIZE]; 405 s32 max_default_color_index; 406 s32 max_default_depth_index; 407 s32 max_default_s_index; 408 409 u32 max_used_color_index; 410 u32 max_used_depth_index; 411 u32 max_used_s_index; 412 413#define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ 414 struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; 415 u32 channel_tlb_flush_index; 416 struct nvgpu_spinlock ch_tlb_lock; 417 418 void (*remove_support)(struct gr_gk20a *gr); 419 bool sw_ready; 420 bool skip_ucode_init; 421 422 struct nvgpu_preemption_modes_rec preemption_mode_rec; 423 424 u32 fecs_feature_override_ecc_val; 425 426 int cilp_preempt_pending_chid; 427 428 u32 fbp_en_mask; 429 u32 *fbp_rop_l2_en_mask; 430 u32 no_of_sm; 431 struct sm_info *sm_to_cluster; 432 433#if defined(CONFIG_GK20A_CYCLE_STATS) 434 struct nvgpu_mutex cs_lock; 435 struct gk20a_cs_snapshot *cs_data; 436#endif 437 u32 max_css_buffer_size; 438}; 439 440void gk20a_fecs_dump_falcon_stats(struct gk20a *g); 441void gk20a_gpccs_dump_falcon_stats(struct gk20a *g); 442 443/* contexts associated with a TSG */ 444struct nvgpu_gr_ctx { 445 struct nvgpu_mem mem; 446 447 u32 graphics_preempt_mode; 448 u32 compute_preempt_mode; 449 450 struct nvgpu_mem preempt_ctxsw_buffer; 451 struct nvgpu_mem spill_ctxsw_buffer; 452 struct nvgpu_mem betacb_ctxsw_buffer; 453 struct nvgpu_mem pagepool_ctxsw_buffer; 454 u32 ctx_id; 455 bool ctx_id_valid; 456 bool cilp_preempt_pending; 457 bool boosted_ctx; 458 bool golden_img_loaded; 459 460#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 461 u64 virt_ctx; 462#endif 463 464 struct patch_desc patch_ctx; 465 struct zcull_ctx_desc zcull_ctx; 466 struct pm_ctx_desc pm_ctx; 467 u64 global_ctx_buffer_va[NR_GLOBAL_CTX_BUF_VA]; 468 u64 global_ctx_buffer_size[NR_GLOBAL_CTX_BUF_VA]; 469 int global_ctx_buffer_index[NR_GLOBAL_CTX_BUF_VA]; 470 bool global_ctx_buffer_mapped; 471 472 u32 tsgid; 473}; 474 475struct gk20a_ctxsw_ucode_segment { 476 u32 offset; 477 u32 size; 478}; 479 480struct gk20a_ctxsw_ucode_segments { 481 u32 boot_entry; 482 u32 boot_imem_offset; 483 u32 boot_signature; 484 struct gk20a_ctxsw_ucode_segment boot; 485 struct gk20a_ctxsw_ucode_segment code; 486 struct gk20a_ctxsw_ucode_segment data; 487}; 488 489/* sums over the ucode files as sequences of u32, computed to the 490 * boot_signature field in the structure above */ 491 492/* T18X FECS remains same as T21X, 493 * so FALCON_UCODE_SIG_T21X_FECS_WITH_RESERVED used 494 * for T18X*/ 495#define FALCON_UCODE_SIG_T18X_GPCCS_WITH_RESERVED 0x68edab34 496#define FALCON_UCODE_SIG_T21X_FECS_WITH_DMEM_SIZE 0x9121ab5c 497#define FALCON_UCODE_SIG_T21X_FECS_WITH_RESERVED 0x9125ab5c 498#define FALCON_UCODE_SIG_T12X_FECS_WITH_RESERVED 0x8a621f78 499#define FALCON_UCODE_SIG_T12X_FECS_WITHOUT_RESERVED 0x67e5344b 500#define FALCON_UCODE_SIG_T12X_FECS_OLDER 0x56da09f 501 502#define FALCON_UCODE_SIG_T21X_GPCCS_WITH_RESERVED 0x3d3d65e2 503#define FALCON_UCODE_SIG_T12X_GPCCS_WITH_RESERVED 0x303465d5 504#define FALCON_UCODE_SIG_T12X_GPCCS_WITHOUT_RESERVED 0x3fdd33d3 505#define FALCON_UCODE_SIG_T12X_GPCCS_OLDER 0x53d7877 506 507#define FALCON_UCODE_SIG_T21X_FECS_WITHOUT_RESERVED 0x93671b7d 508#define FALCON_UCODE_SIG_T21X_FECS_WITHOUT_RESERVED2 0x4d6cbc10 509 510#define FALCON_UCODE_SIG_T21X_GPCCS_WITHOUT_RESERVED 0x393161da 511 512struct gk20a_ctxsw_ucode_info { 513 u64 *p_va; 514 struct nvgpu_mem inst_blk_desc; 515 struct nvgpu_mem surface_desc; 516 struct gk20a_ctxsw_ucode_segments fecs; 517 struct gk20a_ctxsw_ucode_segments gpccs; 518}; 519 520struct gk20a_ctxsw_bootloader_desc { 521 u32 start_offset; 522 u32 size; 523 u32 imem_offset; 524 u32 entry_point; 525}; 526 527struct fecs_method_op_gk20a { 528 struct { 529 u32 addr; 530 u32 data; 531 } method; 532 533 struct { 534 u32 id; 535 u32 data; 536 u32 clr; 537 u32 *ret; 538 u32 ok; 539 u32 fail; 540 } mailbox; 541 542 struct { 543 u32 ok; 544 u32 fail; 545 } cond; 546 547}; 548 549struct nvgpu_warpstate { 550 u64 valid_warps[2]; 551 u64 trapped_warps[2]; 552 u64 paused_warps[2]; 553}; 554 555struct gpu_ops; 556int gr_gk20a_load_golden_ctx_image(struct gk20a *g, 557 struct channel_gk20a *c); 558void gk20a_init_gr(struct gk20a *g); 559int gk20a_init_gr_support(struct gk20a *g); 560int gk20a_enable_gr_hw(struct gk20a *g); 561int gk20a_gr_reset(struct gk20a *g); 562void gk20a_gr_wait_initialized(struct gk20a *g); 563 564int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a); 565 566int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags); 567 568int gk20a_gr_isr(struct gk20a *g); 569u32 gk20a_gr_nonstall_isr(struct gk20a *g); 570 571/* zcull */ 572u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr); 573int gr_gk20a_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, 574 struct channel_gk20a *c, u64 zcull_va, u32 mode); 575int gr_gk20a_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, 576 struct gr_zcull_info *zcull_params); 577void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, 578 u32 *zcull_map_tiles); 579/* zbc */ 580int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, 581 struct zbc_entry *zbc_val); 582int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr, 583 struct zbc_query_params *query_params); 584int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, 585 struct zbc_entry *zbc_val); 586int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr); 587 588/* pmu */ 589int gr_gk20a_fecs_get_reglist_img_size(struct gk20a *g, u32 *size); 590int gr_gk20a_fecs_set_reglist_bind_inst(struct gk20a *g, 591 struct nvgpu_mem *inst_block); 592int gr_gk20a_fecs_set_reglist_virtual_addr(struct gk20a *g, u64 pmu_va); 593 594void gr_gk20a_init_cg_mode(struct gk20a *g, u32 cgmode, u32 mode_config); 595 596/* sm */ 597bool gk20a_gr_sm_debugger_attached(struct gk20a *g); 598u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g); 599 600#define gr_gk20a_elpg_protected_call(g, func) \ 601 ({ \ 602 int err = 0; \ 603 if (g->support_pmu) {\ 604 err = nvgpu_pg_elpg_disable(g);\ 605 if (err != 0) {\ 606 (void)nvgpu_pg_elpg_enable(g); \ 607 } \ 608 } \ 609 if (err == 0) { \ 610 err = func; \ 611 if (g->support_pmu) {\ 612 (void)nvgpu_pg_elpg_enable(g); \ 613 } \ 614 } \ 615 err; \ 616 }) 617 618int gk20a_gr_suspend(struct gk20a *g); 619 620struct nvgpu_dbg_reg_op; 621int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 622 struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, 623 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, 624 bool *is_curr_ctx); 625int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 626 struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, 627 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, 628 bool ch_is_curr_ctx); 629int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, 630 u32 addr, 631 u32 max_offsets, 632 u32 *offsets, u32 *offset_addrs, 633 u32 *num_offsets, 634 bool is_quad, u32 quad); 635int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, 636 u32 addr, 637 u32 max_offsets, 638 u32 *offsets, u32 *offset_addrs, 639 u32 *num_offsets); 640int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, 641 struct channel_gk20a *c, 642 bool enable_smpc_ctxsw); 643int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, 644 struct channel_gk20a *c, 645 u64 gpu_va, 646 u32 mode); 647 648struct nvgpu_gr_ctx; 649void gr_gk20a_ctx_patch_write(struct gk20a *g, struct nvgpu_gr_ctx *ch_ctx, 650 u32 addr, u32 data, bool patch); 651int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, 652 struct nvgpu_gr_ctx *ch_ctx, 653 bool update_patch_count); 654void gr_gk20a_ctx_patch_write_end(struct gk20a *g, 655 struct nvgpu_gr_ctx *ch_ctx, 656 bool update_patch_count); 657void gr_gk20a_commit_global_pagepool(struct gk20a *g, 658 struct nvgpu_gr_ctx *ch_ctx, 659 u64 addr, u32 size, bool patch); 660void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data); 661void gr_gk20a_enable_hww_exceptions(struct gk20a *g); 662int gr_gk20a_init_fs_state(struct gk20a *g); 663int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr); 664int gr_gk20a_init_ctxsw_ucode(struct gk20a *g); 665int gr_gk20a_load_ctxsw_ucode(struct gk20a *g); 666void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g); 667void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base, 668 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset); 669void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base, 670 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset); 671 672 673void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *c); 674int gr_gk20a_disable_ctxsw(struct gk20a *g); 675int gr_gk20a_enable_ctxsw(struct gk20a *g); 676void gk20a_gr_resume_single_sm(struct gk20a *g, 677 u32 gpc, u32 tpc, u32 sm); 678void gk20a_gr_resume_all_sms(struct gk20a *g); 679void gk20a_gr_suspend_single_sm(struct gk20a *g, 680 u32 gpc, u32 tpc, u32 sm, 681 u32 global_esr_mask, bool check_errors); 682void gk20a_gr_suspend_all_sms(struct gk20a *g, 683 u32 global_esr_mask, bool check_errors); 684u32 gr_gk20a_get_tpc_count(struct gr_gk20a *gr, u32 gpc_index); 685int gr_gk20a_set_sm_debug_mode(struct gk20a *g, 686 struct channel_gk20a *ch, u64 sms, bool enable); 687bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch); 688int gr_gk20a_add_zbc_color(struct gk20a *g, struct gr_gk20a *gr, 689 struct zbc_entry *color_val, u32 index); 690int gr_gk20a_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr, 691 struct zbc_entry *depth_val, u32 index); 692int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, 693 struct zbc_entry *zbc_val); 694void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries); 695int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, 696 u32 expect_delay); 697int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 698 bool *post_event, struct channel_gk20a *fault_ch, 699 u32 *hww_global_esr); 700int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, 701 bool *post_event); 702int gr_gk20a_init_ctx_state(struct gk20a *g); 703int gr_gk20a_submit_fecs_method_op(struct gk20a *g, 704 struct fecs_method_op_gk20a op, 705 bool sleepduringwait); 706int gr_gk20a_submit_fecs_method_op_locked(struct gk20a *g, 707 struct fecs_method_op_gk20a op, 708 bool sleepduringwait); 709int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g, 710 struct fecs_method_op_gk20a op); 711int gr_gk20a_alloc_gr_ctx(struct gk20a *g, 712 struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, 713 u32 class, u32 padding); 714void gr_gk20a_free_gr_ctx(struct gk20a *g, 715 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx); 716int gr_gk20a_halt_pipe(struct gk20a *g); 717 718#if defined(CONFIG_GK20A_CYCLE_STATS) 719int gr_gk20a_css_attach(struct channel_gk20a *ch, /* in - main hw structure */ 720 u32 perfmon_id_count, /* in - number of perfmons*/ 721 u32 *perfmon_id_start, /* out- index of first pm */ 722 /* in/out - pointer to client data used in later */ 723 struct gk20a_cs_snapshot_client *css_client); 724 725int gr_gk20a_css_detach(struct channel_gk20a *ch, 726 struct gk20a_cs_snapshot_client *css_client); 727int gr_gk20a_css_flush(struct channel_gk20a *ch, 728 struct gk20a_cs_snapshot_client *css_client); 729 730void gr_gk20a_free_cyclestats_snapshot_data(struct gk20a *g); 731 732#else 733/* fake empty cleanup function if no cyclestats snapshots enabled */ 734static inline void gr_gk20a_free_cyclestats_snapshot_data(struct gk20a *g) 735{ 736 (void)g; 737} 738#endif 739 740void gr_gk20a_fecs_host_int_enable(struct gk20a *g); 741int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, 742 struct gr_gk20a_isr_data *isr_data); 743int gk20a_gr_lock_down_sm(struct gk20a *g, 744 u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask, 745 bool check_errors); 746int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 747 u32 global_esr_mask, bool check_errors); 748int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, 749 u32 *mailbox_ret, u32 opc_success, 750 u32 mailbox_ok, u32 opc_fail, 751 u32 mailbox_fail, bool sleepduringwait); 752 753int gr_gk20a_get_ctx_id(struct gk20a *g, 754 struct channel_gk20a *c, 755 u32 *ctx_id); 756 757u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm); 758u32 gk20a_gr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm); 759 760int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, 761 u32 expect_delay); 762 763struct dbg_session_gk20a; 764 765bool gr_gk20a_suspend_context(struct channel_gk20a *ch); 766bool gr_gk20a_resume_context(struct channel_gk20a *ch); 767int gr_gk20a_suspend_contexts(struct gk20a *g, 768 struct dbg_session_gk20a *dbg_s, 769 int *ctx_resident_ch_fd); 770int gr_gk20a_resume_contexts(struct gk20a *g, 771 struct dbg_session_gk20a *dbg_s, 772 int *ctx_resident_ch_fd); 773void gk20a_gr_enable_gpc_exceptions(struct gk20a *g); 774void gk20a_gr_enable_exceptions(struct gk20a *g); 775int gr_gk20a_trigger_suspend(struct gk20a *g); 776int gr_gk20a_wait_for_pause(struct gk20a *g, struct nvgpu_warpstate *w_state); 777int gr_gk20a_resume_from_pause(struct gk20a *g); 778int gr_gk20a_clear_sm_errors(struct gk20a *g); 779u32 gr_gk20a_tpc_enabled_exceptions(struct gk20a *g); 780 781int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c); 782 783int gr_gk20a_init_sm_id_table(struct gk20a *g); 784 785int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va); 786 787void gr_gk20a_write_zcull_ptr(struct gk20a *g, 788 struct nvgpu_mem *mem, u64 gpu_va); 789 790void gr_gk20a_write_pm_ptr(struct gk20a *g, 791 struct nvgpu_mem *mem, u64 gpu_va); 792 793u32 gk20a_gr_gpc_offset(struct gk20a *g, u32 gpc); 794u32 gk20a_gr_tpc_offset(struct gk20a *g, u32 tpc); 795void gk20a_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, 796 u32 *esr_sm_sel); 797void gk20a_gr_init_ovr_sm_dsm_perf(void); 798void gk20a_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs, 799 u32 **ovr_perf_regs); 800void gk20a_gr_init_ctxsw_hdr_data(struct gk20a *g, 801 struct nvgpu_mem *mem); 802u32 gr_gk20a_get_patch_slots(struct gk20a *g); 803int gk20a_gr_handle_notify_pending(struct gk20a *g, 804 struct gr_gk20a_isr_data *isr_data); 805 806int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g); 807int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, 808 struct channel_gk20a *c); 809int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, 810 struct channel_gk20a *c, bool patch); 811 812int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, 813 struct channel_gk20a *c); 814u32 gk20a_init_sw_bundle(struct gk20a *g); 815int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type); 816int gk20a_gr_handle_semaphore_pending(struct gk20a *g, 817 struct gr_gk20a_isr_data *isr_data); 818int gr_gk20a_add_ctxsw_reg_pm_fbpa(struct gk20a *g, 819 struct ctxsw_buf_offset_map_entry *map, 820 struct aiv_list_gk20a *regs, 821 u32 *count, u32 *offset, 822 u32 max_cnt, u32 base, 823 u32 num_fbpas, u32 stride, u32 mask); 824int gr_gk20a_add_ctxsw_reg_perf_pma(struct ctxsw_buf_offset_map_entry *map, 825 struct aiv_list_gk20a *regs, 826 u32 *count, u32 *offset, 827 u32 max_cnt, u32 base, u32 mask); 828int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, 829 enum ctxsw_addr_type *addr_type, 830 u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num, 831 u32 *broadcast_flags); 832int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, 833 u32 gpc_num, 834 u32 *priv_addr_table, u32 *t); 835int gr_gk20a_create_priv_addr_table(struct gk20a *g, 836 u32 addr, 837 u32 *priv_addr_table, 838 u32 *num_registers); 839void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr, 840 u32 num_fbpas, 841 u32 *priv_addr_table, u32 *t); 842int gr_gk20a_get_offset_in_gpccs_segment(struct gk20a *g, 843 enum ctxsw_addr_type addr_type, u32 num_tpcs, u32 num_ppcs, 844 u32 reg_list_ppc_count, u32 *__offset_in_segment); 845 846void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, 847 struct gr_ctx_buffer_desc *desc); 848int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, 849 struct gr_ctx_buffer_desc *desc, size_t size); 850void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr); 851int gr_gk20a_set_fecs_watchdog_timeout(struct gk20a *g); 852#endif /*__GR_GK20A_H__*/
diff --git a/include/gk20a/gr_pri_gk20a.h b/include/gk20a/gr_pri_gk20a.h
deleted file mode 100644
index d832d90..0000000
--- a/include/gk20a/gr_pri_gk20a.h
+++ /dev/null
@@ -1,261 +0,0 @@ 1/* 2 * GK20A Graphics Context Pri Register Addressing 3 * 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef GR_PRI_GK20A_H 25#define GR_PRI_GK20A_H 26 27/* 28 * These convenience macros are generally for use in the management/modificaiton 29 * of the context state store for gr/compute contexts. 30 */ 31 32/* 33 * GPC pri addressing 34 */ 35static inline u32 pri_gpccs_addr_width(void) 36{ 37 return 15; /*from where?*/ 38} 39static inline u32 pri_gpccs_addr_mask(u32 addr) 40{ 41 return addr & ((1 << pri_gpccs_addr_width()) - 1); 42} 43static inline u32 pri_gpc_addr(struct gk20a *g, u32 addr, u32 gpc) 44{ 45 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 46 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 47 return gpc_base + (gpc * gpc_stride) + addr; 48} 49static inline bool pri_is_gpc_addr_shared(struct gk20a *g, u32 addr) 50{ 51 u32 gpc_shared_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_SHARED_BASE); 52 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 53 return (addr >= gpc_shared_base) && 54 (addr < gpc_shared_base + gpc_stride); 55} 56static inline bool pri_is_gpc_addr(struct gk20a *g, u32 addr) 57{ 58 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 59 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 60 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); 61 return ((addr >= gpc_base) && 62 (addr < gpc_base + num_gpcs * gpc_stride)) || 63 pri_is_gpc_addr_shared(g, addr); 64} 65static inline u32 pri_get_gpc_num(struct gk20a *g, u32 addr) 66{ 67 u32 i, start; 68 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); 69 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 70 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 71 for (i = 0; i < num_gpcs; i++) { 72 start = gpc_base + (i * gpc_stride); 73 if ((addr >= start) && (addr < (start + gpc_stride))) 74 return i; 75 } 76 return 0; 77} 78 79/* 80 * PPC pri addressing 81 */ 82static inline bool pri_is_ppc_addr_shared(struct gk20a *g, u32 addr) 83{ 84 u32 ppc_in_gpc_shared_base = nvgpu_get_litter_value(g, 85 GPU_LIT_PPC_IN_GPC_SHARED_BASE); 86 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, 87 GPU_LIT_PPC_IN_GPC_STRIDE); 88 89 return ((addr >= ppc_in_gpc_shared_base) && 90 (addr < (ppc_in_gpc_shared_base + ppc_in_gpc_stride))); 91} 92 93static inline bool pri_is_ppc_addr(struct gk20a *g, u32 addr) 94{ 95 u32 ppc_in_gpc_base = nvgpu_get_litter_value(g, 96 GPU_LIT_PPC_IN_GPC_BASE); 97 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, 98 GPU_LIT_NUM_PES_PER_GPC); 99 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, 100 GPU_LIT_PPC_IN_GPC_STRIDE); 101 102 return ((addr >= ppc_in_gpc_base) && 103 (addr < ppc_in_gpc_base + num_pes_per_gpc * ppc_in_gpc_stride)) 104 || pri_is_ppc_addr_shared(g, addr); 105} 106 107/* 108 * TPC pri addressing 109 */ 110static inline u32 pri_tpccs_addr_width(void) 111{ 112 return 11; /* from where? */ 113} 114static inline u32 pri_tpccs_addr_mask(u32 addr) 115{ 116 return addr & ((1 << pri_tpccs_addr_width()) - 1); 117} 118static inline u32 pri_fbpa_addr_mask(struct gk20a *g, u32 addr) 119{ 120 return addr & (nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE) - 1); 121} 122static inline u32 pri_tpc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 tpc) 123{ 124 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 125 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 126 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE); 127 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 128 return gpc_base + (gpc * gpc_stride) + 129 tpc_in_gpc_base + (tpc * tpc_in_gpc_stride) + 130 addr; 131} 132static inline bool pri_is_tpc_addr_shared(struct gk20a *g, u32 addr) 133{ 134 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 135 u32 tpc_in_gpc_shared_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_SHARED_BASE); 136 return (addr >= tpc_in_gpc_shared_base) && 137 (addr < (tpc_in_gpc_shared_base + 138 tpc_in_gpc_stride)); 139} 140static inline u32 pri_fbpa_addr(struct gk20a *g, u32 addr, u32 fbpa) 141{ 142 return (nvgpu_get_litter_value(g, GPU_LIT_FBPA_BASE) + addr + 143 (fbpa * nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE))); 144} 145static inline bool pri_is_fbpa_addr_shared(struct gk20a *g, u32 addr) 146{ 147 u32 fbpa_shared_base = nvgpu_get_litter_value(g, GPU_LIT_FBPA_SHARED_BASE); 148 u32 fbpa_stride = nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE); 149 return ((addr >= fbpa_shared_base) && 150 (addr < (fbpa_shared_base + fbpa_stride))); 151} 152static inline bool pri_is_fbpa_addr(struct gk20a *g, u32 addr) 153{ 154 u32 fbpa_base = nvgpu_get_litter_value(g, GPU_LIT_FBPA_BASE); 155 u32 fbpa_stride = nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE); 156 u32 num_fbpas = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS); 157 return (((addr >= fbpa_base) && 158 (addr < (fbpa_base + num_fbpas * fbpa_stride))) 159 || pri_is_fbpa_addr_shared(g, addr)); 160} 161/* 162 * BE pri addressing 163 */ 164static inline u32 pri_becs_addr_width(void) 165{ 166 return 10;/* from where? */ 167} 168static inline u32 pri_becs_addr_mask(u32 addr) 169{ 170 return addr & ((1 << pri_becs_addr_width()) - 1); 171} 172static inline bool pri_is_be_addr_shared(struct gk20a *g, u32 addr) 173{ 174 u32 rop_shared_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_SHARED_BASE); 175 u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE); 176 return (addr >= rop_shared_base) && 177 (addr < rop_shared_base + rop_stride); 178} 179static inline u32 pri_be_shared_addr(struct gk20a *g, u32 addr) 180{ 181 u32 rop_shared_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_SHARED_BASE); 182 return rop_shared_base + pri_becs_addr_mask(addr); 183} 184static inline bool pri_is_be_addr(struct gk20a *g, u32 addr) 185{ 186 u32 rop_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_BASE); 187 u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE); 188 return ((addr >= rop_base) && 189 (addr < rop_base + g->ltc_count * rop_stride)) || 190 pri_is_be_addr_shared(g, addr); 191} 192 193static inline u32 pri_get_be_num(struct gk20a *g, u32 addr) 194{ 195 u32 i, start; 196 u32 num_fbps = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPS); 197 u32 rop_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_BASE); 198 u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE); 199 for (i = 0; i < num_fbps; i++) { 200 start = rop_base + (i * rop_stride); 201 if ((addr >= start) && (addr < (start + rop_stride))) 202 return i; 203 } 204 return 0; 205} 206 207/* 208 * PPC pri addressing 209 */ 210static inline u32 pri_ppccs_addr_width(void) 211{ 212 return 9; /* from where? */ 213} 214static inline u32 pri_ppccs_addr_mask(u32 addr) 215{ 216 return addr & ((1 << pri_ppccs_addr_width()) - 1); 217} 218static inline u32 pri_ppc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 ppc) 219{ 220 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE); 221 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 222 u32 ppc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_BASE); 223 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 224 return gpc_base + (gpc * gpc_stride) + 225 ppc_in_gpc_base + (ppc * ppc_in_gpc_stride) + addr; 226} 227 228enum ctxsw_addr_type { 229 CTXSW_ADDR_TYPE_SYS = 0, 230 CTXSW_ADDR_TYPE_GPC = 1, 231 CTXSW_ADDR_TYPE_TPC = 2, 232 CTXSW_ADDR_TYPE_BE = 3, 233 CTXSW_ADDR_TYPE_PPC = 4, 234 CTXSW_ADDR_TYPE_LTCS = 5, 235 CTXSW_ADDR_TYPE_FBPA = 6, 236 CTXSW_ADDR_TYPE_EGPC = 7, 237 CTXSW_ADDR_TYPE_ETPC = 8, 238 CTXSW_ADDR_TYPE_ROP = 9, 239 CTXSW_ADDR_TYPE_FBP = 10, 240}; 241 242#define PRI_BROADCAST_FLAGS_NONE 0U 243#define PRI_BROADCAST_FLAGS_GPC BIT32(0) 244#define PRI_BROADCAST_FLAGS_TPC BIT32(1) 245#define PRI_BROADCAST_FLAGS_BE BIT32(2) 246#define PRI_BROADCAST_FLAGS_PPC BIT32(3) 247#define PRI_BROADCAST_FLAGS_LTCS BIT32(4) 248#define PRI_BROADCAST_FLAGS_LTSS BIT32(5) 249#define PRI_BROADCAST_FLAGS_FBPA BIT32(6) 250#define PRI_BROADCAST_FLAGS_EGPC BIT32(7) 251#define PRI_BROADCAST_FLAGS_ETPC BIT32(8) 252#define PRI_BROADCAST_FLAGS_PMMGPC BIT32(9) 253#define PRI_BROADCAST_FLAGS_PMM_GPCS BIT32(10) 254#define PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCA BIT32(11) 255#define PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCB BIT32(12) 256#define PRI_BROADCAST_FLAGS_PMMFBP BIT32(13) 257#define PRI_BROADCAST_FLAGS_PMM_FBPS BIT32(14) 258#define PRI_BROADCAST_FLAGS_PMM_FBPGS_LTC BIT32(15) 259#define PRI_BROADCAST_FLAGS_PMM_FBPGS_ROP BIT32(16) 260 261#endif /* GR_PRI_GK20A_H */
diff --git a/include/gk20a/mm_gk20a.c b/include/gk20a/mm_gk20a.c
deleted file mode 100644
index 10ca84d..0000000
--- a/include/gk20a/mm_gk20a.c
+++ /dev/null
@@ -1,654 +0,0 @@ 1/* 2 * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <trace/events/gk20a.h> 24 25#include <nvgpu/mm.h> 26#include <nvgpu/vm.h> 27#include <nvgpu/vm_area.h> 28#include <nvgpu/dma.h> 29#include <nvgpu/kmem.h> 30#include <nvgpu/timers.h> 31#include <nvgpu/pramin.h> 32#include <nvgpu/list.h> 33#include <nvgpu/nvgpu_mem.h> 34#include <nvgpu/allocator.h> 35#include <nvgpu/semaphore.h> 36#include <nvgpu/page_allocator.h> 37#include <nvgpu/log.h> 38#include <nvgpu/bug.h> 39#include <nvgpu/log2.h> 40#include <nvgpu/enabled.h> 41#include <nvgpu/vidmem.h> 42#include <nvgpu/sizes.h> 43#include <nvgpu/io.h> 44#include <nvgpu/utils.h> 45#include <nvgpu/channel.h> 46 47#include "gk20a.h" 48#include "mm_gk20a.h" 49#include "fence_gk20a.h" 50 51#include <nvgpu/hw/gk20a/hw_gmmu_gk20a.h> 52#include <nvgpu/hw/gk20a/hw_ram_gk20a.h> 53#include <nvgpu/hw/gk20a/hw_pram_gk20a.h> 54#include <nvgpu/hw/gk20a/hw_flush_gk20a.h> 55 56/* 57 * GPU mapping life cycle 58 * ====================== 59 * 60 * Kernel mappings 61 * --------------- 62 * 63 * Kernel mappings are created through vm.map(..., false): 64 * 65 * - Mappings to the same allocations are reused and refcounted. 66 * - This path does not support deferred unmapping (i.e. kernel must wait for 67 * all hw operations on the buffer to complete before unmapping). 68 * - References to dmabuf are owned and managed by the (kernel) clients of 69 * the gk20a_vm layer. 70 * 71 * 72 * User space mappings 73 * ------------------- 74 * 75 * User space mappings are created through as.map_buffer -> vm.map(..., true): 76 * 77 * - Mappings to the same allocations are reused and refcounted. 78 * - This path supports deferred unmapping (i.e. we delay the actual unmapping 79 * until all hw operations have completed). 80 * - References to dmabuf are owned and managed by the vm_gk20a 81 * layer itself. vm.map acquires these refs, and sets 82 * mapped_buffer->own_mem_ref to record that we must release the refs when we 83 * actually unmap. 84 * 85 */ 86 87/* make sure gk20a_init_mm_support is called before */ 88int gk20a_init_mm_setup_hw(struct gk20a *g) 89{ 90 struct mm_gk20a *mm = &g->mm; 91 int err; 92 93 nvgpu_log_fn(g, " "); 94 95 if (g->ops.fb.set_mmu_page_size) { 96 g->ops.fb.set_mmu_page_size(g); 97 } 98 99 if (g->ops.fb.set_use_full_comp_tag_line) { 100 mm->use_full_comp_tag_line = 101 g->ops.fb.set_use_full_comp_tag_line(g); 102 } 103 104 g->ops.fb.init_hw(g); 105 106 if (g->ops.bus.bar1_bind) { 107 g->ops.bus.bar1_bind(g, &mm->bar1.inst_block); 108 } 109 110 if (g->ops.bus.bar2_bind) { 111 err = g->ops.bus.bar2_bind(g, &mm->bar2.inst_block); 112 if (err) { 113 return err; 114 } 115 } 116 117 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) { 118 return -EBUSY; 119 } 120 121 nvgpu_log_fn(g, "done"); 122 return 0; 123} 124 125/* for gk20a the "video memory" apertures here are misnomers. */ 126static inline u32 big_valid_pde0_bits(struct gk20a *g, 127 struct nvgpu_gmmu_pd *pd, u64 addr) 128{ 129 u32 pde0_bits = 130 nvgpu_aperture_mask(g, pd->mem, 131 gmmu_pde_aperture_big_sys_mem_ncoh_f(), 132 gmmu_pde_aperture_big_sys_mem_coh_f(), 133 gmmu_pde_aperture_big_video_memory_f()) | 134 gmmu_pde_address_big_sys_f( 135 (u32)(addr >> gmmu_pde_address_shift_v())); 136 137 return pde0_bits; 138} 139 140static inline u32 small_valid_pde1_bits(struct gk20a *g, 141 struct nvgpu_gmmu_pd *pd, u64 addr) 142{ 143 u32 pde1_bits = 144 nvgpu_aperture_mask(g, pd->mem, 145 gmmu_pde_aperture_small_sys_mem_ncoh_f(), 146 gmmu_pde_aperture_small_sys_mem_coh_f(), 147 gmmu_pde_aperture_small_video_memory_f()) | 148 gmmu_pde_vol_small_true_f() | /* tbd: why? */ 149 gmmu_pde_address_small_sys_f( 150 (u32)(addr >> gmmu_pde_address_shift_v())); 151 152 return pde1_bits; 153} 154 155static void update_gmmu_pde_locked(struct vm_gk20a *vm, 156 const struct gk20a_mmu_level *l, 157 struct nvgpu_gmmu_pd *pd, 158 u32 pd_idx, 159 u64 virt_addr, 160 u64 phys_addr, 161 struct nvgpu_gmmu_attrs *attrs) 162{ 163 struct gk20a *g = gk20a_from_vm(vm); 164 bool small_valid, big_valid; 165 u32 pd_offset = pd_offset_from_index(l, pd_idx); 166 u32 pde_v[2] = {0, 0}; 167 168 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL; 169 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG; 170 171 pde_v[0] = gmmu_pde_size_full_f(); 172 pde_v[0] |= big_valid ? 173 big_valid_pde0_bits(g, pd, phys_addr) : 174 gmmu_pde_aperture_big_invalid_f(); 175 176 pde_v[1] |= (small_valid ? small_valid_pde1_bits(g, pd, phys_addr) : 177 (gmmu_pde_aperture_small_invalid_f() | 178 gmmu_pde_vol_small_false_f())) 179 | 180 (big_valid ? (gmmu_pde_vol_big_true_f()) : 181 gmmu_pde_vol_big_false_f()); 182 183 pte_dbg(g, attrs, 184 "PDE: i=%-4u size=%-2u offs=%-4u pgsz: %c%c | " 185 "GPU %#-12llx phys %#-12llx " 186 "[0x%08x, 0x%08x]", 187 pd_idx, l->entry_size, pd_offset, 188 small_valid ? 'S' : '-', 189 big_valid ? 'B' : '-', 190 virt_addr, phys_addr, 191 pde_v[1], pde_v[0]); 192 193 pd_write(g, &vm->pdb, pd_offset + 0, pde_v[0]); 194 pd_write(g, &vm->pdb, pd_offset + 1, pde_v[1]); 195} 196 197static void __update_pte_sparse(u32 *pte_w) 198{ 199 pte_w[0] = gmmu_pte_valid_false_f(); 200 pte_w[1] |= gmmu_pte_vol_true_f(); 201} 202 203static void __update_pte(struct vm_gk20a *vm, 204 u32 *pte_w, 205 u64 phys_addr, 206 struct nvgpu_gmmu_attrs *attrs) 207{ 208 struct gk20a *g = gk20a_from_vm(vm); 209 u32 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 210 u32 pte_valid = attrs->valid ? 211 gmmu_pte_valid_true_f() : 212 gmmu_pte_valid_false_f(); 213 u32 phys_shifted = phys_addr >> gmmu_pte_address_shift_v(); 214 u32 addr = attrs->aperture == APERTURE_SYSMEM ? 215 gmmu_pte_address_sys_f(phys_shifted) : 216 gmmu_pte_address_vid_f(phys_shifted); 217 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); 218 219 pte_w[0] = pte_valid | addr; 220 221 if (attrs->priv) { 222 pte_w[0] |= gmmu_pte_privilege_true_f(); 223 } 224 225 pte_w[1] = nvgpu_aperture_mask_raw(g, attrs->aperture, 226 gmmu_pte_aperture_sys_mem_ncoh_f(), 227 gmmu_pte_aperture_sys_mem_coh_f(), 228 gmmu_pte_aperture_video_memory_f()) | 229 gmmu_pte_kind_f(attrs->kind_v) | 230 gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift)); 231 232 if (attrs->ctag && vm->mm->use_full_comp_tag_line && 233 phys_addr & 0x10000) { 234 pte_w[1] |= gmmu_pte_comptagline_f( 235 1 << (gmmu_pte_comptagline_s() - 1)); 236 } 237 238 if (attrs->rw_flag == gk20a_mem_flag_read_only) { 239 pte_w[0] |= gmmu_pte_read_only_true_f(); 240 pte_w[1] |= gmmu_pte_write_disable_true_f(); 241 } else if (attrs->rw_flag == gk20a_mem_flag_write_only) { 242 pte_w[1] |= gmmu_pte_read_disable_true_f(); 243 } 244 245 if (!attrs->cacheable) { 246 pte_w[1] |= gmmu_pte_vol_true_f(); 247 } 248 249 if (attrs->ctag) { 250 attrs->ctag += page_size; 251 } 252} 253 254static void update_gmmu_pte_locked(struct vm_gk20a *vm, 255 const struct gk20a_mmu_level *l, 256 struct nvgpu_gmmu_pd *pd, 257 u32 pd_idx, 258 u64 virt_addr, 259 u64 phys_addr, 260 struct nvgpu_gmmu_attrs *attrs) 261{ 262 struct gk20a *g = gk20a_from_vm(vm); 263 u32 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 264 u32 pd_offset = pd_offset_from_index(l, pd_idx); 265 u32 pte_w[2] = {0, 0}; 266 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); 267 268 if (phys_addr) { 269 __update_pte(vm, pte_w, phys_addr, attrs); 270 } else if (attrs->sparse) { 271 __update_pte_sparse(pte_w); 272 } 273 274 pte_dbg(g, attrs, 275 "PTE: i=%-4u size=%-2u offs=%-4u | " 276 "GPU %#-12llx phys %#-12llx " 277 "pgsz: %3dkb perm=%-2s kind=%#02x APT=%-6s %c%c%c%c " 278 "ctag=0x%08x " 279 "[0x%08x, 0x%08x]", 280 pd_idx, l->entry_size, pd_offset, 281 virt_addr, phys_addr, 282 page_size >> 10, 283 nvgpu_gmmu_perm_str(attrs->rw_flag), 284 attrs->kind_v, 285 nvgpu_aperture_str(g, attrs->aperture), 286 attrs->cacheable ? 'C' : '-', 287 attrs->sparse ? 'S' : '-', 288 attrs->priv ? 'P' : '-', 289 attrs->valid ? 'V' : '-', 290 (u32)attrs->ctag >> ctag_shift, 291 pte_w[1], pte_w[0]); 292 293 pd_write(g, pd, pd_offset + 0, pte_w[0]); 294 pd_write(g, pd, pd_offset + 1, pte_w[1]); 295} 296 297u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, 298 struct nvgpu_gmmu_pd *pd, u32 pd_idx) 299{ 300 /* 301 * big and small page sizes are the same 302 */ 303 return GMMU_PAGE_SIZE_SMALL; 304} 305 306u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, 307 struct nvgpu_gmmu_pd *pd, u32 pd_idx) 308{ 309 /* 310 * return invalid 311 */ 312 return GMMU_NR_PAGE_SIZES; 313} 314 315const struct gk20a_mmu_level gk20a_mm_levels_64k[] = { 316 {.hi_bit = {NV_GMMU_VA_RANGE-1, NV_GMMU_VA_RANGE-1}, 317 .lo_bit = {26, 26}, 318 .update_entry = update_gmmu_pde_locked, 319 .entry_size = 8, 320 .get_pgsz = gk20a_get_pde_pgsz}, 321 {.hi_bit = {25, 25}, 322 .lo_bit = {12, 16}, 323 .update_entry = update_gmmu_pte_locked, 324 .entry_size = 8, 325 .get_pgsz = gk20a_get_pte_pgsz}, 326 {.update_entry = NULL} 327}; 328 329const struct gk20a_mmu_level gk20a_mm_levels_128k[] = { 330 {.hi_bit = {NV_GMMU_VA_RANGE-1, NV_GMMU_VA_RANGE-1}, 331 .lo_bit = {27, 27}, 332 .update_entry = update_gmmu_pde_locked, 333 .entry_size = 8, 334 .get_pgsz = gk20a_get_pde_pgsz}, 335 {.hi_bit = {26, 26}, 336 .lo_bit = {12, 17}, 337 .update_entry = update_gmmu_pte_locked, 338 .entry_size = 8, 339 .get_pgsz = gk20a_get_pte_pgsz}, 340 {.update_entry = NULL} 341}; 342 343int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) 344{ 345 int err = 0; 346 347 nvgpu_log_fn(ch->g, " "); 348 349 nvgpu_vm_get(vm); 350 ch->vm = vm; 351 err = channel_gk20a_commit_va(ch); 352 if (err) { 353 ch->vm = NULL; 354 } 355 356 nvgpu_log(gk20a_from_vm(vm), gpu_dbg_map, "Binding ch=%d -> VM:%s", 357 ch->chid, vm->name); 358 359 return err; 360} 361 362void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, 363 struct vm_gk20a *vm) 364{ 365 u64 pdb_addr = nvgpu_mem_get_addr(g, vm->pdb.mem); 366 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); 367 u32 pdb_addr_hi = u64_hi32(pdb_addr); 368 369 nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); 370 371 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 372 nvgpu_aperture_mask(g, vm->pdb.mem, 373 ram_in_page_dir_base_target_sys_mem_ncoh_f(), 374 ram_in_page_dir_base_target_sys_mem_coh_f(), 375 ram_in_page_dir_base_target_vid_mem_f()) | 376 ram_in_page_dir_base_vol_true_f() | 377 ram_in_page_dir_base_lo_f(pdb_addr_lo)); 378 379 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), 380 ram_in_page_dir_base_hi_f(pdb_addr_hi)); 381} 382 383void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, 384 u32 big_page_size) 385{ 386 struct gk20a *g = gk20a_from_vm(vm); 387 388 nvgpu_log_info(g, "inst block phys = 0x%llx, kv = 0x%p", 389 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); 390 391 g->ops.mm.init_pdb(g, inst_block, vm); 392 393 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(), 394 u64_lo32(vm->va_limit - 1) & ~0xfff); 395 396 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), 397 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); 398 399 if (big_page_size && g->ops.mm.set_big_page_size) { 400 g->ops.mm.set_big_page_size(g, inst_block, big_page_size); 401 } 402} 403 404int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) 405{ 406 int err; 407 408 nvgpu_log_fn(g, " "); 409 410 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); 411 if (err) { 412 nvgpu_err(g, "%s: memory allocation failed", __func__); 413 return err; 414 } 415 416 nvgpu_log_fn(g, "done"); 417 return 0; 418} 419 420int gk20a_mm_fb_flush(struct gk20a *g) 421{ 422 struct mm_gk20a *mm = &g->mm; 423 u32 data; 424 int ret = 0; 425 struct nvgpu_timeout timeout; 426 u32 retries; 427 428 nvgpu_log_fn(g, " "); 429 430 gk20a_busy_noresume(g); 431 if (!g->power_on) { 432 gk20a_idle_nosuspend(g); 433 return 0; 434 } 435 436 retries = 100; 437 438 if (g->ops.mm.get_flush_retries) { 439 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB); 440 } 441 442 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 443 444 nvgpu_mutex_acquire(&mm->l2_op_lock); 445 446 /* Make sure all previous writes are committed to the L2. There's no 447 guarantee that writes are to DRAM. This will be a sysmembar internal 448 to the L2. */ 449 450 trace_gk20a_mm_fb_flush(g->name); 451 452 gk20a_writel(g, flush_fb_flush_r(), 453 flush_fb_flush_pending_busy_f()); 454 455 do { 456 data = gk20a_readl(g, flush_fb_flush_r()); 457 458 if (flush_fb_flush_outstanding_v(data) == 459 flush_fb_flush_outstanding_true_v() || 460 flush_fb_flush_pending_v(data) == 461 flush_fb_flush_pending_busy_v()) { 462 nvgpu_log_info(g, "fb_flush 0x%x", data); 463 nvgpu_udelay(5); 464 } else { 465 break; 466 } 467 } while (!nvgpu_timeout_expired(&timeout)); 468 469 if (nvgpu_timeout_peek_expired(&timeout)) { 470 if (g->ops.fb.dump_vpr_info) { 471 g->ops.fb.dump_vpr_info(g); 472 } 473 if (g->ops.fb.dump_wpr_info) { 474 g->ops.fb.dump_wpr_info(g); 475 } 476 ret = -EBUSY; 477 } 478 479 trace_gk20a_mm_fb_flush_done(g->name); 480 481 nvgpu_mutex_release(&mm->l2_op_lock); 482 483 gk20a_idle_nosuspend(g); 484 485 return ret; 486} 487 488static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) 489{ 490 u32 data; 491 struct nvgpu_timeout timeout; 492 u32 retries = 200; 493 494 trace_gk20a_mm_l2_invalidate(g->name); 495 496 if (g->ops.mm.get_flush_retries) { 497 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV); 498 } 499 500 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 501 502 /* Invalidate any clean lines from the L2 so subsequent reads go to 503 DRAM. Dirty lines are not affected by this operation. */ 504 gk20a_writel(g, flush_l2_system_invalidate_r(), 505 flush_l2_system_invalidate_pending_busy_f()); 506 507 do { 508 data = gk20a_readl(g, flush_l2_system_invalidate_r()); 509 510 if (flush_l2_system_invalidate_outstanding_v(data) == 511 flush_l2_system_invalidate_outstanding_true_v() || 512 flush_l2_system_invalidate_pending_v(data) == 513 flush_l2_system_invalidate_pending_busy_v()) { 514 nvgpu_log_info(g, "l2_system_invalidate 0x%x", 515 data); 516 nvgpu_udelay(5); 517 } else { 518 break; 519 } 520 } while (!nvgpu_timeout_expired(&timeout)); 521 522 if (nvgpu_timeout_peek_expired(&timeout)) { 523 nvgpu_warn(g, "l2_system_invalidate too many retries"); 524 } 525 526 trace_gk20a_mm_l2_invalidate_done(g->name); 527} 528 529void gk20a_mm_l2_invalidate(struct gk20a *g) 530{ 531 struct mm_gk20a *mm = &g->mm; 532 gk20a_busy_noresume(g); 533 if (g->power_on) { 534 nvgpu_mutex_acquire(&mm->l2_op_lock); 535 gk20a_mm_l2_invalidate_locked(g); 536 nvgpu_mutex_release(&mm->l2_op_lock); 537 } 538 gk20a_idle_nosuspend(g); 539} 540 541void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) 542{ 543 struct mm_gk20a *mm = &g->mm; 544 u32 data; 545 struct nvgpu_timeout timeout; 546 u32 retries = 2000; 547 548 nvgpu_log_fn(g, " "); 549 550 gk20a_busy_noresume(g); 551 if (!g->power_on) { 552 goto hw_was_off; 553 } 554 555 if (g->ops.mm.get_flush_retries) { 556 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH); 557 } 558 559 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 560 561 nvgpu_mutex_acquire(&mm->l2_op_lock); 562 563 trace_gk20a_mm_l2_flush(g->name); 564 565 /* Flush all dirty lines from the L2 to DRAM. Lines are left in the L2 566 as clean, so subsequent reads might hit in the L2. */ 567 gk20a_writel(g, flush_l2_flush_dirty_r(), 568 flush_l2_flush_dirty_pending_busy_f()); 569 570 do { 571 data = gk20a_readl(g, flush_l2_flush_dirty_r()); 572 573 if (flush_l2_flush_dirty_outstanding_v(data) == 574 flush_l2_flush_dirty_outstanding_true_v() || 575 flush_l2_flush_dirty_pending_v(data) == 576 flush_l2_flush_dirty_pending_busy_v()) { 577 nvgpu_log_info(g, "l2_flush_dirty 0x%x", data); 578 nvgpu_udelay(5); 579 } else { 580 break; 581 } 582 } while (!nvgpu_timeout_expired_msg(&timeout, 583 "l2_flush_dirty too many retries")); 584 585 trace_gk20a_mm_l2_flush_done(g->name); 586 587 if (invalidate) { 588 gk20a_mm_l2_invalidate_locked(g); 589 } 590 591 nvgpu_mutex_release(&mm->l2_op_lock); 592 593hw_was_off: 594 gk20a_idle_nosuspend(g); 595} 596 597void gk20a_mm_cbc_clean(struct gk20a *g) 598{ 599 struct mm_gk20a *mm = &g->mm; 600 u32 data; 601 struct nvgpu_timeout timeout; 602 u32 retries = 200; 603 604 nvgpu_log_fn(g, " "); 605 606 gk20a_busy_noresume(g); 607 if (!g->power_on) { 608 goto hw_was_off; 609 } 610 611 if (g->ops.mm.get_flush_retries) { 612 retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN); 613 } 614 615 nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER); 616 617 nvgpu_mutex_acquire(&mm->l2_op_lock); 618 619 /* Flush all dirty lines from the CBC to L2 */ 620 gk20a_writel(g, flush_l2_clean_comptags_r(), 621 flush_l2_clean_comptags_pending_busy_f()); 622 623 do { 624 data = gk20a_readl(g, flush_l2_clean_comptags_r()); 625 626 if (flush_l2_clean_comptags_outstanding_v(data) == 627 flush_l2_clean_comptags_outstanding_true_v() || 628 flush_l2_clean_comptags_pending_v(data) == 629 flush_l2_clean_comptags_pending_busy_v()) { 630 nvgpu_log_info(g, "l2_clean_comptags 0x%x", data); 631 nvgpu_udelay(5); 632 } else { 633 break; 634 } 635 } while (!nvgpu_timeout_expired_msg(&timeout, 636 "l2_clean_comptags too many retries")); 637 638 nvgpu_mutex_release(&mm->l2_op_lock); 639 640hw_was_off: 641 gk20a_idle_nosuspend(g); 642} 643 644u32 gk20a_mm_get_iommu_bit(struct gk20a *g) 645{ 646 return 34; 647} 648 649const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, 650 u32 big_page_size) 651{ 652 return (big_page_size == SZ_64K) ? 653 gk20a_mm_levels_64k : gk20a_mm_levels_128k; 654}
diff --git a/include/gk20a/mm_gk20a.h b/include/gk20a/mm_gk20a.h
deleted file mode 100644
index 76a1621..0000000
--- a/include/gk20a/mm_gk20a.h
+++ /dev/null
@@ -1,155 +0,0 @@ 1/* 2 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef MM_GK20A_H 24#define MM_GK20A_H 25 26#include <nvgpu/nvgpu_mem.h> 27#include <nvgpu/allocator.h> 28#include <nvgpu/vm.h> 29#include <nvgpu/list.h> 30#include <nvgpu/rbtree.h> 31#include <nvgpu/kref.h> 32 33enum gk20a_mem_rw_flag; 34 35struct patch_desc { 36 struct nvgpu_mem mem; 37 u32 data_count; 38}; 39 40struct zcull_ctx_desc { 41 u64 gpu_va; 42 u32 ctx_attr; 43 u32 ctx_sw_mode; 44}; 45 46struct pm_ctx_desc { 47 struct nvgpu_mem mem; 48 u32 pm_mode; 49}; 50 51struct compbit_store_desc { 52 struct nvgpu_mem mem; 53 54 /* The value that is written to the hardware. This depends on 55 * on the number of ltcs and is not an address. */ 56 u64 base_hw; 57}; 58 59struct gk20a_buffer_state { 60 struct nvgpu_list_node list; 61 62 /* The valid compbits and the fence must be changed atomically. */ 63 struct nvgpu_mutex lock; 64 65 /* Offset of the surface within the dma-buf whose state is 66 * described by this struct (one dma-buf can contain multiple 67 * surfaces with different states). */ 68 size_t offset; 69 70 /* A bitmask of valid sets of compbits (0 = uncompressed). */ 71 u32 valid_compbits; 72 73 /* The ZBC color used on this buffer. */ 74 u32 zbc_color; 75 76 /* This struct reflects the state of the buffer when this 77 * fence signals. */ 78 struct gk20a_fence *fence; 79}; 80 81static inline struct gk20a_buffer_state * 82gk20a_buffer_state_from_list(struct nvgpu_list_node *node) 83{ 84 return (struct gk20a_buffer_state *) 85 ((uintptr_t)node - offsetof(struct gk20a_buffer_state, list)); 86}; 87 88struct gk20a; 89struct channel_gk20a; 90 91int gk20a_mm_fb_flush(struct gk20a *g); 92void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate); 93void gk20a_mm_cbc_clean(struct gk20a *g); 94void gk20a_mm_l2_invalidate(struct gk20a *g); 95 96#define dev_from_vm(vm) dev_from_gk20a(vm->mm->g) 97 98void gk20a_mm_ltc_isr(struct gk20a *g); 99 100bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g); 101 102int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block); 103void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, 104 u32 big_page_size); 105int gk20a_init_mm_setup_hw(struct gk20a *g); 106 107u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, 108 u64 map_offset, 109 struct nvgpu_sgt *sgt, 110 u64 buffer_offset, 111 u64 size, 112 u32 pgsz_idx, 113 u8 kind_v, 114 u32 ctag_offset, 115 u32 flags, 116 enum gk20a_mem_rw_flag rw_flag, 117 bool clear_ctags, 118 bool sparse, 119 bool priv, 120 struct vm_gk20a_mapping_batch *batch, 121 enum nvgpu_aperture aperture); 122 123void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, 124 u64 vaddr, 125 u64 size, 126 u32 pgsz_idx, 127 bool va_allocated, 128 enum gk20a_mem_rw_flag rw_flag, 129 bool sparse, 130 struct vm_gk20a_mapping_batch *batch); 131 132/* vm-as interface */ 133struct nvgpu_as_alloc_space_args; 134struct nvgpu_as_free_space_args; 135int gk20a_vm_release_share(struct gk20a_as_share *as_share); 136int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch); 137 138void pde_range_from_vaddr_range(struct vm_gk20a *vm, 139 u64 addr_lo, u64 addr_hi, 140 u32 *pde_lo, u32 *pde_hi); 141u32 gk20a_mm_get_iommu_bit(struct gk20a *g); 142 143const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, 144 u32 big_page_size); 145void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem, 146 struct vm_gk20a *vm); 147 148extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; 149extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; 150 151u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, 152 struct nvgpu_gmmu_pd *pd, u32 pd_idx); 153u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, 154 struct nvgpu_gmmu_pd *pd, u32 pd_idx); 155#endif /* MM_GK20A_H */
diff --git a/include/gk20a/pmu_gk20a.c b/include/gk20a/pmu_gk20a.c
deleted file mode 100644
index 63a32f0..0000000
--- a/include/gk20a/pmu_gk20a.c
+++ /dev/null
@@ -1,879 +0,0 @@ 1/* 2 * GK20A PMU (aka. gPMU outside gk20a context) 3 * 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include <nvgpu/nvgpu_common.h> 26#include <nvgpu/timers.h> 27#include <nvgpu/kmem.h> 28#include <nvgpu/dma.h> 29#include <nvgpu/log.h> 30#include <nvgpu/bug.h> 31#include <nvgpu/firmware.h> 32#include <nvgpu/falcon.h> 33#include <nvgpu/mm.h> 34#include <nvgpu/io.h> 35#include <nvgpu/clk_arb.h> 36#include <nvgpu/utils.h> 37#include <nvgpu/unit.h> 38 39#include "gk20a.h" 40#include "gr_gk20a.h" 41#include "pmu_gk20a.h" 42 43#include <nvgpu/hw/gk20a/hw_mc_gk20a.h> 44#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> 45#include <nvgpu/hw/gk20a/hw_top_gk20a.h> 46 47#define gk20a_dbg_pmu(g, fmt, arg...) \ 48 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) 49 50bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 51{ 52 u32 i = 0, j = strlen(strings); 53 54 for (; i < j; i++) { 55 if (strings[i] == '%') { 56 if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { 57 *hex_pos = i; 58 return true; 59 } 60 } 61 } 62 *hex_pos = -1; 63 return false; 64} 65 66static void print_pmu_trace(struct nvgpu_pmu *pmu) 67{ 68 struct gk20a *g = pmu->g; 69 u32 i = 0, j = 0, k, l, m, count; 70 char part_str[40], buf[0x40]; 71 void *tracebuffer; 72 char *trace; 73 u32 *trace1; 74 75 /* allocate system memory to copy pmu trace buffer */ 76 tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE); 77 if (tracebuffer == NULL) { 78 return; 79 } 80 81 /* read pmu traces into system memory buffer */ 82 nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer, 83 GK20A_PMU_TRACE_BUFSIZE); 84 85 trace = (char *)tracebuffer; 86 trace1 = (u32 *)tracebuffer; 87 88 nvgpu_err(g, "dump PMU trace buffer"); 89 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 90 for (j = 0; j < 0x40; j++) { 91 if (trace1[(i / 4) + j]) { 92 break; 93 } 94 } 95 if (j == 0x40) { 96 break; 97 } 98 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]); 99 l = 0; 100 m = 0; 101 while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) { 102 if (k >= 40) { 103 break; 104 } 105 strncpy(part_str, (trace+i+20+m), k); 106 part_str[k] = '\0'; 107 count += scnprintf((buf + count), 0x40, "%s0x%x", 108 part_str, trace1[(i / 4) + 1 + l]); 109 l++; 110 m += k + 2; 111 } 112 113 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); 114 nvgpu_err(g, "%s", buf); 115 } 116 117 nvgpu_kfree(g, tracebuffer); 118} 119 120u32 gk20a_pmu_get_irqdest(struct gk20a *g) 121{ 122 u32 intr_dest; 123 124 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ 125 intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) | 126 pwr_falcon_irqdest_host_wdtmr_f(1) | 127 pwr_falcon_irqdest_host_mthd_f(0) | 128 pwr_falcon_irqdest_host_ctxsw_f(0) | 129 pwr_falcon_irqdest_host_halt_f(1) | 130 pwr_falcon_irqdest_host_exterr_f(0) | 131 pwr_falcon_irqdest_host_swgen0_f(1) | 132 pwr_falcon_irqdest_host_swgen1_f(0) | 133 pwr_falcon_irqdest_host_ext_f(0xff) | 134 pwr_falcon_irqdest_target_gptmr_f(1) | 135 pwr_falcon_irqdest_target_wdtmr_f(0) | 136 pwr_falcon_irqdest_target_mthd_f(0) | 137 pwr_falcon_irqdest_target_ctxsw_f(0) | 138 pwr_falcon_irqdest_target_halt_f(0) | 139 pwr_falcon_irqdest_target_exterr_f(0) | 140 pwr_falcon_irqdest_target_swgen0_f(0) | 141 pwr_falcon_irqdest_target_swgen1_f(0) | 142 pwr_falcon_irqdest_target_ext_f(0xff); 143 144 return intr_dest; 145} 146 147void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) 148{ 149 struct gk20a *g = gk20a_from_pmu(pmu); 150 u32 intr_mask; 151 u32 intr_dest; 152 153 nvgpu_log_fn(g, " "); 154 155 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, 156 mc_intr_mask_0_pmu_enabled_f()); 157 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false, 158 mc_intr_mask_1_pmu_enabled_f()); 159 160 nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0); 161 162 if (enable) { 163 intr_dest = g->ops.pmu.get_irqdest(g); 164 /* 0=disable, 1=enable */ 165 intr_mask = pwr_falcon_irqmset_gptmr_f(1) | 166 pwr_falcon_irqmset_wdtmr_f(1) | 167 pwr_falcon_irqmset_mthd_f(0) | 168 pwr_falcon_irqmset_ctxsw_f(0) | 169 pwr_falcon_irqmset_halt_f(1) | 170 pwr_falcon_irqmset_exterr_f(1) | 171 pwr_falcon_irqmset_swgen0_f(1) | 172 pwr_falcon_irqmset_swgen1_f(1); 173 174 nvgpu_flcn_set_irq(pmu->flcn, true, intr_mask, intr_dest); 175 176 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true, 177 mc_intr_mask_0_pmu_enabled_f()); 178 } 179 180 nvgpu_log_fn(g, "done"); 181} 182 183 184 185int pmu_bootstrap(struct nvgpu_pmu *pmu) 186{ 187 struct gk20a *g = gk20a_from_pmu(pmu); 188 struct mm_gk20a *mm = &g->mm; 189 struct pmu_ucode_desc *desc = pmu->desc; 190 u64 addr_code, addr_data, addr_load; 191 u32 i, blocks, addr_args; 192 193 nvgpu_log_fn(g, " "); 194 195 gk20a_writel(g, pwr_falcon_itfen_r(), 196 gk20a_readl(g, pwr_falcon_itfen_r()) | 197 pwr_falcon_itfen_ctxen_enable_f()); 198 gk20a_writel(g, pwr_pmu_new_instblk_r(), 199 pwr_pmu_new_instblk_ptr_f( 200 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | 201 pwr_pmu_new_instblk_valid_f(1) | 202 pwr_pmu_new_instblk_target_sys_coh_f()); 203 204 /* TBD: load all other surfaces */ 205 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size( 206 pmu, GK20A_PMU_TRACE_BUFSIZE); 207 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); 208 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( 209 pmu, GK20A_PMU_DMAIDX_VIRT); 210 211 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, 212 g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK)); 213 214 addr_args = (pwr_falcon_hwcfg_dmem_size_v( 215 gk20a_readl(g, pwr_falcon_hwcfg_r())) 216 << GK20A_PMU_DMEM_BLKSIZE2) - 217 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 218 219 nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args, 220 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 221 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); 222 223 gk20a_writel(g, pwr_falcon_dmemc_r(0), 224 pwr_falcon_dmemc_offs_f(0) | 225 pwr_falcon_dmemc_blk_f(0) | 226 pwr_falcon_dmemc_aincw_f(1)); 227 228 addr_code = u64_lo32((pmu->ucode.gpu_va + 229 desc->app_start_offset + 230 desc->app_resident_code_offset) >> 8) ; 231 addr_data = u64_lo32((pmu->ucode.gpu_va + 232 desc->app_start_offset + 233 desc->app_resident_data_offset) >> 8); 234 addr_load = u64_lo32((pmu->ucode.gpu_va + 235 desc->bootloader_start_offset) >> 8); 236 237 gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE); 238 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code); 239 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_size); 240 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size); 241 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry); 242 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data); 243 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size); 244 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code); 245 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1); 246 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args); 247 248 g->ops.pmu.write_dmatrfbase(g, 249 addr_load - (desc->bootloader_imem_offset >> 8)); 250 251 blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8; 252 253 for (i = 0; i < blocks; i++) { 254 gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(), 255 desc->bootloader_imem_offset + (i << 8)); 256 gk20a_writel(g, pwr_falcon_dmatrffboffs_r(), 257 desc->bootloader_imem_offset + (i << 8)); 258 gk20a_writel(g, pwr_falcon_dmatrfcmd_r(), 259 pwr_falcon_dmatrfcmd_imem_f(1) | 260 pwr_falcon_dmatrfcmd_write_f(0) | 261 pwr_falcon_dmatrfcmd_size_f(6) | 262 pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); 263 } 264 265 nvgpu_flcn_bootstrap(g->pmu.flcn, desc->bootloader_entry_point); 266 267 gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); 268 269 return 0; 270} 271 272void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id) 273{ 274 gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id), 275 PMU_PG_IDLE_THRESHOLD); 276 gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id), 277 PMU_PG_POST_POWERUP_IDLE_THRESHOLD); 278} 279 280int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) 281{ 282 struct gk20a *g = gk20a_from_pmu(pmu); 283 struct pmu_mutex *mutex; 284 u32 data, owner, max_retry; 285 286 if (!pmu->initialized) { 287 return -EINVAL; 288 } 289 290 BUG_ON(!token); 291 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); 292 BUG_ON(id > pmu->mutex_cnt); 293 294 mutex = &pmu->mutex[id]; 295 296 owner = pwr_pmu_mutex_value_v( 297 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); 298 299 if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { 300 BUG_ON(mutex->ref_cnt == 0); 301 gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token); 302 mutex->ref_cnt++; 303 return 0; 304 } 305 306 max_retry = 40; 307 do { 308 data = pwr_pmu_mutex_id_value_v( 309 gk20a_readl(g, pwr_pmu_mutex_id_r())); 310 if (data == pwr_pmu_mutex_id_value_init_v() || 311 data == pwr_pmu_mutex_id_value_not_avail_v()) { 312 nvgpu_warn(g, 313 "fail to generate mutex token: val 0x%08x", 314 owner); 315 nvgpu_usleep_range(20, 40); 316 continue; 317 } 318 319 owner = data; 320 gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), 321 pwr_pmu_mutex_value_f(owner)); 322 323 data = pwr_pmu_mutex_value_v( 324 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); 325 326 if (owner == data) { 327 mutex->ref_cnt = 1; 328 gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x", 329 mutex->index, *token); 330 *token = owner; 331 return 0; 332 } else { 333 nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x", 334 mutex->index); 335 336 data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); 337 data = set_field(data, 338 pwr_pmu_mutex_id_release_value_m(), 339 pwr_pmu_mutex_id_release_value_f(owner)); 340 gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); 341 342 nvgpu_usleep_range(20, 40); 343 continue; 344 } 345 } while (max_retry-- > 0); 346 347 return -EBUSY; 348} 349 350int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) 351{ 352 struct gk20a *g = gk20a_from_pmu(pmu); 353 struct pmu_mutex *mutex; 354 u32 owner, data; 355 356 if (!pmu->initialized) { 357 return -EINVAL; 358 } 359 360 BUG_ON(!token); 361 BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); 362 BUG_ON(id > pmu->mutex_cnt); 363 364 mutex = &pmu->mutex[id]; 365 366 owner = pwr_pmu_mutex_value_v( 367 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); 368 369 if (*token != owner) { 370 nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x", 371 *token, owner); 372 return -EINVAL; 373 } 374 375 if (--mutex->ref_cnt > 0) { 376 return -EBUSY; 377 } 378 379 gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), 380 pwr_pmu_mutex_value_initial_lock_f()); 381 382 data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); 383 data = set_field(data, pwr_pmu_mutex_id_release_value_m(), 384 pwr_pmu_mutex_id_release_value_f(owner)); 385 gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); 386 387 gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x", 388 mutex->index, *token); 389 390 return 0; 391} 392 393int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, 394 u32 *head, bool set) 395{ 396 u32 queue_head_size = 0; 397 398 if (g->ops.pmu.pmu_get_queue_head_size) { 399 queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); 400 } 401 402 BUG_ON(!head || !queue_head_size); 403 404 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 405 406 if (queue->index >= queue_head_size) { 407 return -EINVAL; 408 } 409 410 if (!set) { 411 *head = pwr_pmu_queue_head_address_v( 412 gk20a_readl(g, 413 g->ops.pmu.pmu_get_queue_head(queue->index))); 414 } else { 415 gk20a_writel(g, 416 g->ops.pmu.pmu_get_queue_head(queue->index), 417 pwr_pmu_queue_head_address_f(*head)); 418 } 419 } else { 420 if (!set) { 421 *head = pwr_pmu_msgq_head_val_v( 422 gk20a_readl(g, pwr_pmu_msgq_head_r())); 423 } else { 424 gk20a_writel(g, 425 pwr_pmu_msgq_head_r(), 426 pwr_pmu_msgq_head_val_f(*head)); 427 } 428 } 429 430 return 0; 431} 432 433int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, 434 u32 *tail, bool set) 435{ 436 u32 queue_tail_size = 0; 437 438 if (g->ops.pmu.pmu_get_queue_tail_size) { 439 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); 440 } 441 442 BUG_ON(!tail || !queue_tail_size); 443 444 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 445 446 if (queue->index >= queue_tail_size) { 447 return -EINVAL; 448 } 449 450 if (!set) { 451 *tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g, 452 g->ops.pmu.pmu_get_queue_tail(queue->index))); 453 } else { 454 gk20a_writel(g, 455 g->ops.pmu.pmu_get_queue_tail(queue->index), 456 pwr_pmu_queue_tail_address_f(*tail)); 457 } 458 459 } else { 460 if (!set) { 461 *tail = pwr_pmu_msgq_tail_val_v( 462 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 463 } else { 464 gk20a_writel(g, 465 pwr_pmu_msgq_tail_r(), 466 pwr_pmu_msgq_tail_val_f(*tail)); 467 } 468 } 469 470 return 0; 471} 472 473void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set) 474{ 475 struct gk20a *g = gk20a_from_pmu(pmu); 476 u32 queue_tail_size = 0; 477 478 if (g->ops.pmu.pmu_get_queue_tail_size) { 479 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); 480 } 481 482 BUG_ON(!tail || !queue_tail_size); 483 484 if (!set) { 485 *tail = pwr_pmu_msgq_tail_val_v( 486 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 487 } else { 488 gk20a_writel(g, 489 pwr_pmu_msgq_tail_r(), 490 pwr_pmu_msgq_tail_val_f(*tail)); 491 } 492} 493 494void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) 495{ 496 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); 497} 498 499bool gk20a_pmu_is_engine_in_reset(struct gk20a *g) 500{ 501 bool status = false; 502 503 status = g->ops.mc.is_enabled(g, NVGPU_UNIT_PWR); 504 505 return status; 506} 507 508int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset) 509{ 510 u32 reset_mask = g->ops.mc.reset_mask(g, NVGPU_UNIT_PWR); 511 512 if (do_reset) { 513 g->ops.mc.enable(g, reset_mask); 514 } else { 515 g->ops.mc.disable(g, reset_mask); 516 } 517 518 return 0; 519} 520 521bool gk20a_is_pmu_supported(struct gk20a *g) 522{ 523 return true; 524} 525 526u32 gk20a_pmu_pg_engines_list(struct gk20a *g) 527{ 528 return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS); 529} 530 531u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 532{ 533 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { 534 return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; 535 } 536 537 return 0; 538} 539 540static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, 541 void *param, u32 handle, u32 status) 542{ 543 struct nvgpu_pmu *pmu = param; 544 gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE"); 545 pmu->zbc_save_done = 1; 546} 547 548void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) 549{ 550 struct nvgpu_pmu *pmu = &g->pmu; 551 struct pmu_cmd cmd; 552 u32 seq; 553 554 if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) { 555 return; 556 } 557 558 memset(&cmd, 0, sizeof(struct pmu_cmd)); 559 cmd.hdr.unit_id = PMU_UNIT_PG; 560 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd); 561 cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update; 562 cmd.cmd.zbc.entry_mask = ZBC_MASK(entries); 563 564 pmu->zbc_save_done = 0; 565 566 gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); 567 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 568 pmu_handle_zbc_msg, pmu, &seq, ~0); 569 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 570 &pmu->zbc_save_done, 1); 571 if (!pmu->zbc_save_done) { 572 nvgpu_err(g, "ZBC save timeout"); 573 } 574} 575 576int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, 577 struct nv_pmu_therm_msg *msg) 578{ 579 struct gk20a *g = gk20a_from_pmu(pmu); 580 581 nvgpu_log_fn(g, " "); 582 583 switch (msg->msg_type) { 584 case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: 585 if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) { 586 nvgpu_clk_arb_send_thermal_alarm(pmu->g); 587 } else { 588 gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", 589 msg->hw_slct_msg.mask); 590 } 591 break; 592 default: 593 gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); 594 break; 595 } 596 597 return 0; 598} 599 600void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) 601{ 602 struct gk20a *g = gk20a_from_pmu(pmu); 603 604 gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x", 605 gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); 606 gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", 607 gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); 608 gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", 609 gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); 610 gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x", 611 gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); 612 gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x", 613 gk20a_readl(g, pwr_pmu_pg_intren_r(0))); 614 615 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x", 616 gk20a_readl(g, pwr_pmu_idle_count_r(3))); 617 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x", 618 gk20a_readl(g, pwr_pmu_idle_count_r(4))); 619 gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x", 620 gk20a_readl(g, pwr_pmu_idle_count_r(7))); 621} 622 623void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) 624{ 625 struct gk20a *g = gk20a_from_pmu(pmu); 626 unsigned int i; 627 628 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) { 629 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x", 630 i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); 631 } 632 633 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) { 634 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x", 635 i, gk20a_readl(g, pwr_pmu_debug_r(i))); 636 } 637 638 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); 639 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i); 640 if (i != 0) { 641 nvgpu_err(g, "pwr_pmu_bar0_addr_r : 0x%x", 642 gk20a_readl(g, pwr_pmu_bar0_addr_r())); 643 nvgpu_err(g, "pwr_pmu_bar0_data_r : 0x%x", 644 gk20a_readl(g, pwr_pmu_bar0_data_r())); 645 nvgpu_err(g, "pwr_pmu_bar0_timeout_r : 0x%x", 646 gk20a_readl(g, pwr_pmu_bar0_timeout_r())); 647 nvgpu_err(g, "pwr_pmu_bar0_ctl_r : 0x%x", 648 gk20a_readl(g, pwr_pmu_bar0_ctl_r())); 649 } 650 651 i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r()); 652 nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i); 653 654 i = gk20a_readl(g, pwr_falcon_exterrstat_r()); 655 nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i); 656 if (pwr_falcon_exterrstat_valid_v(i) == 657 pwr_falcon_exterrstat_valid_true_v()) { 658 nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x", 659 gk20a_readl(g, pwr_falcon_exterraddr_r())); 660 } 661 662 /* Print PMU F/W debug prints */ 663 print_pmu_trace(pmu); 664} 665 666bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu) 667{ 668 struct gk20a *g = gk20a_from_pmu(pmu); 669 u32 servicedpmuint; 670 671 servicedpmuint = pwr_falcon_irqstat_halt_true_f() | 672 pwr_falcon_irqstat_exterr_true_f() | 673 pwr_falcon_irqstat_swgen0_true_f(); 674 675 if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) { 676 return true; 677 } 678 679 return false; 680} 681 682void gk20a_pmu_isr(struct gk20a *g) 683{ 684 struct nvgpu_pmu *pmu = &g->pmu; 685 struct nvgpu_falcon_queue *queue; 686 u32 intr, mask; 687 bool recheck = false; 688 689 nvgpu_log_fn(g, " "); 690 691 nvgpu_mutex_acquire(&pmu->isr_mutex); 692 if (!pmu->isr_enabled) { 693 nvgpu_mutex_release(&pmu->isr_mutex); 694 return; 695 } 696 697 mask = gk20a_readl(g, pwr_falcon_irqmask_r()) & 698 gk20a_readl(g, pwr_falcon_irqdest_r()); 699 700 intr = gk20a_readl(g, pwr_falcon_irqstat_r()); 701 702 gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr); 703 704 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; 705 if (!intr || pmu->pmu_state == PMU_STATE_OFF) { 706 gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); 707 nvgpu_mutex_release(&pmu->isr_mutex); 708 return; 709 } 710 711 if (intr & pwr_falcon_irqstat_halt_true_f()) { 712 nvgpu_err(g, "pmu halt intr not implemented"); 713 nvgpu_pmu_dump_falcon_stats(pmu); 714 if (gk20a_readl(g, pwr_pmu_mailbox_r 715 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == 716 PMU_MODE_MISMATCH_STATUS_VAL) { 717 if (g->ops.pmu.dump_secure_fuses) { 718 g->ops.pmu.dump_secure_fuses(g); 719 } 720 } 721 } 722 if (intr & pwr_falcon_irqstat_exterr_true_f()) { 723 nvgpu_err(g, 724 "pmu exterr intr not implemented. Clearing interrupt."); 725 nvgpu_pmu_dump_falcon_stats(pmu); 726 727 gk20a_writel(g, pwr_falcon_exterrstat_r(), 728 gk20a_readl(g, pwr_falcon_exterrstat_r()) & 729 ~pwr_falcon_exterrstat_valid_m()); 730 } 731 732 if (g->ops.pmu.handle_ext_irq) { 733 g->ops.pmu.handle_ext_irq(g, intr); 734 } 735 736 if (intr & pwr_falcon_irqstat_swgen0_true_f()) { 737 nvgpu_pmu_process_message(pmu); 738 recheck = true; 739 } 740 741 gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); 742 743 if (recheck) { 744 queue = &pmu->queue[PMU_MESSAGE_QUEUE]; 745 if (!nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) { 746 gk20a_writel(g, pwr_falcon_irqsset_r(), 747 pwr_falcon_irqsset_swgen0_set_f()); 748 } 749 } 750 751 nvgpu_mutex_release(&pmu->isr_mutex); 752} 753 754void gk20a_pmu_init_perfmon_counter(struct gk20a *g) 755{ 756 u32 data; 757 758 /* use counter #3 for GR && CE2 busy cycles */ 759 gk20a_writel(g, pwr_pmu_idle_mask_r(3), 760 pwr_pmu_idle_mask_gr_enabled_f() | 761 pwr_pmu_idle_mask_ce_2_enabled_f()); 762 763 /* assign same mask setting from GR ELPG to counter #3 */ 764 data = gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(0)); 765 gk20a_writel(g, pwr_pmu_idle_mask_1_r(3), data); 766 767 /* disable idle filtering for counters 3 and 6 */ 768 data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(3)); 769 data = set_field(data, pwr_pmu_idle_ctrl_value_m() | 770 pwr_pmu_idle_ctrl_filter_m(), 771 pwr_pmu_idle_ctrl_value_busy_f() | 772 pwr_pmu_idle_ctrl_filter_disabled_f()); 773 gk20a_writel(g, pwr_pmu_idle_ctrl_r(3), data); 774 775 /* use counter #6 for total cycles */ 776 data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(6)); 777 data = set_field(data, pwr_pmu_idle_ctrl_value_m() | 778 pwr_pmu_idle_ctrl_filter_m(), 779 pwr_pmu_idle_ctrl_value_always_f() | 780 pwr_pmu_idle_ctrl_filter_disabled_f()); 781 gk20a_writel(g, pwr_pmu_idle_ctrl_r(6), data); 782 783 /* 784 * We don't want to disturb counters #3 and #6, which are used by 785 * perfmon, so we add wiring also to counters #1 and #2 for 786 * exposing raw counter readings. 787 */ 788 gk20a_writel(g, pwr_pmu_idle_mask_r(1), 789 pwr_pmu_idle_mask_gr_enabled_f() | 790 pwr_pmu_idle_mask_ce_2_enabled_f()); 791 792 data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(1)); 793 data = set_field(data, pwr_pmu_idle_ctrl_value_m() | 794 pwr_pmu_idle_ctrl_filter_m(), 795 pwr_pmu_idle_ctrl_value_busy_f() | 796 pwr_pmu_idle_ctrl_filter_disabled_f()); 797 gk20a_writel(g, pwr_pmu_idle_ctrl_r(1), data); 798 799 data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(2)); 800 data = set_field(data, pwr_pmu_idle_ctrl_value_m() | 801 pwr_pmu_idle_ctrl_filter_m(), 802 pwr_pmu_idle_ctrl_value_always_f() | 803 pwr_pmu_idle_ctrl_filter_disabled_f()); 804 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); 805 806 /* 807 * use counters 4 and 0 for perfmon to log busy cycles and total cycles 808 * counter #0 overflow sets pmu idle intr status bit 809 */ 810 gk20a_writel(g, pwr_pmu_idle_intr_r(), 811 pwr_pmu_idle_intr_en_f(0)); 812 813 gk20a_writel(g, pwr_pmu_idle_threshold_r(0), 814 pwr_pmu_idle_threshold_value_f(0x7FFFFFFF)); 815 816 data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(0)); 817 data = set_field(data, pwr_pmu_idle_ctrl_value_m() | 818 pwr_pmu_idle_ctrl_filter_m(), 819 pwr_pmu_idle_ctrl_value_always_f() | 820 pwr_pmu_idle_ctrl_filter_disabled_f()); 821 gk20a_writel(g, pwr_pmu_idle_ctrl_r(0), data); 822 823 gk20a_writel(g, pwr_pmu_idle_mask_r(4), 824 pwr_pmu_idle_mask_gr_enabled_f() | 825 pwr_pmu_idle_mask_ce_2_enabled_f()); 826 827 data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(4)); 828 data = set_field(data, pwr_pmu_idle_ctrl_value_m() | 829 pwr_pmu_idle_ctrl_filter_m(), 830 pwr_pmu_idle_ctrl_value_busy_f() | 831 pwr_pmu_idle_ctrl_filter_disabled_f()); 832 gk20a_writel(g, pwr_pmu_idle_ctrl_r(4), data); 833 834 gk20a_writel(g, pwr_pmu_idle_count_r(0), pwr_pmu_idle_count_reset_f(1)); 835 gk20a_writel(g, pwr_pmu_idle_count_r(4), pwr_pmu_idle_count_reset_f(1)); 836 gk20a_writel(g, pwr_pmu_idle_intr_status_r(), 837 pwr_pmu_idle_intr_status_intr_f(1)); 838} 839 840u32 gk20a_pmu_read_idle_counter(struct gk20a *g, u32 counter_id) 841{ 842 return pwr_pmu_idle_count_value_v( 843 gk20a_readl(g, pwr_pmu_idle_count_r(counter_id))); 844} 845 846void gk20a_pmu_reset_idle_counter(struct gk20a *g, u32 counter_id) 847{ 848 gk20a_writel(g, pwr_pmu_idle_count_r(counter_id), 849 pwr_pmu_idle_count_reset_f(1)); 850} 851 852u32 gk20a_pmu_read_idle_intr_status(struct gk20a *g) 853{ 854 return pwr_pmu_idle_intr_status_intr_v( 855 gk20a_readl(g, pwr_pmu_idle_intr_status_r())); 856} 857 858void gk20a_pmu_clear_idle_intr_status(struct gk20a *g) 859{ 860 gk20a_writel(g, pwr_pmu_idle_intr_status_r(), 861 pwr_pmu_idle_intr_status_intr_f(1)); 862} 863 864void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 865 struct pmu_pg_stats_data *pg_stat_data) 866{ 867 struct nvgpu_pmu *pmu = &g->pmu; 868 struct pmu_pg_stats stats; 869 870 nvgpu_flcn_copy_from_dmem(pmu->flcn, 871 pmu->stat_dmem_offset[pg_engine_id], 872 (u8 *)&stats, sizeof(struct pmu_pg_stats), 0); 873 874 pg_stat_data->ingating_time = stats.pg_ingating_time_us; 875 pg_stat_data->ungating_time = stats.pg_ungating_time_us; 876 pg_stat_data->gating_cnt = stats.pg_gating_cnt; 877 pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us; 878 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us; 879}
diff --git a/include/gk20a/pmu_gk20a.h b/include/gk20a/pmu_gk20a.h
deleted file mode 100644
index 65ffd63..0000000
--- a/include/gk20a/pmu_gk20a.h
+++ /dev/null
@@ -1,80 +0,0 @@ 1/* 2 * drivers/video/tegra/host/gk20a/pmu_gk20a.h 3 * 4 * GK20A PMU (aka. gPMU outside gk20a context) 5 * 6 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26#ifndef NVGPU_GK20A_PMU_GK20A_H 27#define NVGPU_GK20A_PMU_GK20A_H 28 29#include <nvgpu/flcnif_cmn.h> 30#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 31#include <nvgpu/pmu.h> 32 33struct nvgpu_firmware; 34 35#define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) 36 37bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu); 38void gk20a_pmu_isr(struct gk20a *g); 39 40u32 gk20a_pmu_pg_engines_list(struct gk20a *g); 41u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id); 42 43void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries); 44 45void gk20a_pmu_init_perfmon_counter(struct gk20a *g); 46 47void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id); 48 49int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); 50int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); 51 52int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, 53 u32 *head, bool set); 54int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, 55 u32 *tail, bool set); 56void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set); 57 58u32 gk20a_pmu_read_idle_counter(struct gk20a *g, u32 counter_id); 59void gk20a_pmu_reset_idle_counter(struct gk20a *g, u32 counter_id); 60 61u32 gk20a_pmu_read_idle_intr_status(struct gk20a *g); 62void gk20a_pmu_clear_idle_intr_status(struct gk20a *g); 63 64void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr); 65bool gk20a_is_pmu_supported(struct gk20a *g); 66 67int pmu_bootstrap(struct nvgpu_pmu *pmu); 68 69void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu); 70void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); 71 72void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); 73void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, 74 void *param, u32 handle, u32 status); 75void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 76 struct pmu_pg_stats_data *pg_stat_data); 77bool gk20a_pmu_is_engine_in_reset(struct gk20a *g); 78int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset); 79u32 gk20a_pmu_get_irqdest(struct gk20a *g); 80#endif /*NVGPU_GK20A_PMU_GK20A_H*/
diff --git a/include/gk20a/regops_gk20a.c b/include/gk20a/regops_gk20a.c
deleted file mode 100644
index 0aec4f8..0000000
--- a/include/gk20a/regops_gk20a.c
+++ /dev/null
@@ -1,472 +0,0 @@ 1/* 2 * Tegra GK20A GPU Debugger Driver Register Ops 3 * 4 * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include "gk20a.h" 26#include "gr_gk20a.h" 27#include "dbg_gpu_gk20a.h" 28#include "regops_gk20a.h" 29 30#include <nvgpu/log.h> 31#include <nvgpu/bsearch.h> 32#include <nvgpu/bug.h> 33#include <nvgpu/io.h> 34 35static int regop_bsearch_range_cmp(const void *pkey, const void *pelem) 36{ 37 u32 key = *(u32 *)pkey; 38 struct regop_offset_range *prange = (struct regop_offset_range *)pelem; 39 if (key < prange->base) { 40 return -1; 41 } else if (prange->base <= key && key < (prange->base + 42 (prange->count * 4U))) { 43 return 0; 44 } 45 return 1; 46} 47 48static inline bool linear_search(u32 offset, const u32 *list, int size) 49{ 50 int i; 51 for (i = 0; i < size; i++) { 52 if (list[i] == offset) { 53 return true; 54 } 55 } 56 return false; 57} 58 59/* 60 * In order to perform a context relative op the context has 61 * to be created already... which would imply that the 62 * context switch mechanism has already been put in place. 63 * So by the time we perform such an opertation it should always 64 * be possible to query for the appropriate context offsets, etc. 65 * 66 * But note: while the dbg_gpu bind requires the a channel fd, 67 * it doesn't require an allocated gr/compute obj at that point... 68 */ 69static bool gr_context_info_available(struct gr_gk20a *gr) 70{ 71 int err; 72 73 nvgpu_mutex_acquire(&gr->ctx_mutex); 74 err = !gr->ctx_vars.golden_image_initialized; 75 nvgpu_mutex_release(&gr->ctx_mutex); 76 if (err) { 77 return false; 78 } 79 80 return true; 81 82} 83 84static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 85 u32 *ctx_rd_count, u32 *ctx_wr_count, 86 struct nvgpu_dbg_reg_op *ops, 87 u32 op_count); 88 89 90int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, 91 struct nvgpu_dbg_reg_op *ops, 92 u64 num_ops, 93 bool *is_current_ctx) 94{ 95 int err = 0; 96 unsigned int i; 97 struct channel_gk20a *ch = NULL; 98 struct gk20a *g = dbg_s->g; 99 /*struct gr_gk20a *gr = &g->gr;*/ 100 u32 data32_lo = 0, data32_hi = 0; 101 u32 ctx_rd_count = 0, ctx_wr_count = 0; 102 bool skip_read_lo, skip_read_hi; 103 bool ok; 104 105 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); 106 107 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 108 109 /* For vgpu, the regops routines need to be handled in the 110 * context of the server and support for that does not exist. 111 * 112 * The two users of the regops interface are the compute driver 113 * and tools. The compute driver will work without a functional 114 * regops implementation, so we return -ENOSYS. This will allow 115 * compute apps to run with vgpu. Tools will not work in this 116 * configuration and are not required to work at this time. */ 117 if (g->is_virtual) { 118 return -ENOSYS; 119 } 120 121 ok = validate_reg_ops(dbg_s, 122 &ctx_rd_count, &ctx_wr_count, 123 ops, num_ops); 124 if (!ok) { 125 nvgpu_err(g, "invalid op(s)"); 126 err = -EINVAL; 127 /* each op has its own err/status */ 128 goto clean_up; 129 } 130 131 /* be sure that ctx info is in place if there are ctx ops */ 132 if (ctx_wr_count | ctx_rd_count) { 133 if (!gr_context_info_available(&g->gr)) { 134 nvgpu_err(g, "gr context data not available"); 135 return -ENODEV; 136 } 137 } 138 139 for (i = 0; i < num_ops; i++) { 140 /* if it isn't global then it is done in the ctx ops... */ 141 if (ops[i].type != REGOP(TYPE_GLOBAL)) { 142 continue; 143 } 144 145 switch (ops[i].op) { 146 147 case REGOP(READ_32): 148 ops[i].value_hi = 0; 149 ops[i].value_lo = gk20a_readl(g, ops[i].offset); 150 nvgpu_log(g, gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x", 151 ops[i].value_lo, ops[i].offset); 152 153 break; 154 155 case REGOP(READ_64): 156 ops[i].value_lo = gk20a_readl(g, ops[i].offset); 157 ops[i].value_hi = 158 gk20a_readl(g, ops[i].offset + 4); 159 160 nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", 161 ops[i].value_hi, ops[i].value_lo, 162 ops[i].offset); 163 break; 164 165 case REGOP(WRITE_32): 166 case REGOP(WRITE_64): 167 /* some of this appears wonky/unnecessary but 168 we've kept it for compat with existing 169 debugger code. just in case... */ 170 skip_read_lo = skip_read_hi = false; 171 if (ops[i].and_n_mask_lo == ~(u32)0) { 172 data32_lo = ops[i].value_lo; 173 skip_read_lo = true; 174 } 175 176 if ((ops[i].op == REGOP(WRITE_64)) && 177 (ops[i].and_n_mask_hi == ~(u32)0)) { 178 data32_hi = ops[i].value_hi; 179 skip_read_hi = true; 180 } 181 182 /* read first 32bits */ 183 if (skip_read_lo == false) { 184 data32_lo = gk20a_readl(g, ops[i].offset); 185 data32_lo &= ~ops[i].and_n_mask_lo; 186 data32_lo |= ops[i].value_lo; 187 } 188 189 /* if desired, read second 32bits */ 190 if ((ops[i].op == REGOP(WRITE_64)) && 191 !skip_read_hi) { 192 data32_hi = gk20a_readl(g, ops[i].offset + 4); 193 data32_hi &= ~ops[i].and_n_mask_hi; 194 data32_hi |= ops[i].value_hi; 195 } 196 197 /* now update first 32bits */ 198 gk20a_writel(g, ops[i].offset, data32_lo); 199 nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", 200 data32_lo, ops[i].offset); 201 /* if desired, update second 32bits */ 202 if (ops[i].op == REGOP(WRITE_64)) { 203 gk20a_writel(g, ops[i].offset + 4, data32_hi); 204 nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", 205 data32_hi, ops[i].offset + 4); 206 207 } 208 209 210 break; 211 212 /* shouldn't happen as we've already screened */ 213 default: 214 BUG(); 215 err = -EINVAL; 216 goto clean_up; 217 break; 218 } 219 } 220 221 if (ctx_wr_count | ctx_rd_count) { 222 err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops, 223 ctx_wr_count, ctx_rd_count, 224 is_current_ctx); 225 if (err) { 226 nvgpu_warn(g, "failed to perform ctx ops\n"); 227 goto clean_up; 228 } 229 } 230 231 clean_up: 232 nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err); 233 return err; 234 235} 236 237 238static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s, 239 struct nvgpu_dbg_reg_op *op) 240{ 241 int err = 0; 242 243 op->status = REGOP(STATUS_SUCCESS); 244 245 switch (op->op) { 246 case REGOP(READ_32): 247 case REGOP(READ_64): 248 case REGOP(WRITE_32): 249 case REGOP(WRITE_64): 250 break; 251 default: 252 op->status |= REGOP(STATUS_UNSUPPORTED_OP); 253 err = -EINVAL; 254 break; 255 } 256 257 switch (op->type) { 258 case REGOP(TYPE_GLOBAL): 259 case REGOP(TYPE_GR_CTX): 260 case REGOP(TYPE_GR_CTX_TPC): 261 case REGOP(TYPE_GR_CTX_SM): 262 case REGOP(TYPE_GR_CTX_CROP): 263 case REGOP(TYPE_GR_CTX_ZROP): 264 case REGOP(TYPE_GR_CTX_QUAD): 265 break; 266 /* 267 case NVGPU_DBG_GPU_REG_OP_TYPE_FB: 268 */ 269 default: 270 op->status |= REGOP(STATUS_INVALID_TYPE); 271 err = -EINVAL; 272 break; 273 } 274 275 return err; 276} 277 278static bool check_whitelists(struct dbg_session_gk20a *dbg_s, 279 struct nvgpu_dbg_reg_op *op, u32 offset) 280{ 281 struct gk20a *g = dbg_s->g; 282 bool valid = false; 283 struct channel_gk20a *ch; 284 285 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 286 287 if (op->type == REGOP(TYPE_GLOBAL)) { 288 /* search global list */ 289 valid = g->ops.regops.get_global_whitelist_ranges && 290 !!bsearch(&offset, 291 g->ops.regops.get_global_whitelist_ranges(), 292 g->ops.regops.get_global_whitelist_ranges_count(), 293 sizeof(*g->ops.regops.get_global_whitelist_ranges()), 294 regop_bsearch_range_cmp); 295 296 /* if debug session and channel is bound search context list */ 297 if ((!valid) && (!dbg_s->is_profiler && ch)) { 298 /* binary search context list */ 299 valid = g->ops.regops.get_context_whitelist_ranges && 300 !!bsearch(&offset, 301 g->ops.regops.get_context_whitelist_ranges(), 302 g->ops.regops.get_context_whitelist_ranges_count(), 303 sizeof(*g->ops.regops.get_context_whitelist_ranges()), 304 regop_bsearch_range_cmp); 305 } 306 307 /* if debug session and channel is bound search runcontrol list */ 308 if ((!valid) && (!dbg_s->is_profiler && ch)) { 309 valid = g->ops.regops.get_runcontrol_whitelist && 310 linear_search(offset, 311 g->ops.regops.get_runcontrol_whitelist(), 312 g->ops.regops.get_runcontrol_whitelist_count()); 313 } 314 } else if (op->type == REGOP(TYPE_GR_CTX)) { 315 /* it's a context-relative op */ 316 if (!ch) { 317 nvgpu_err(dbg_s->g, "can't perform ctx regop unless bound"); 318 op->status = REGOP(STATUS_UNSUPPORTED_OP); 319 return valid; 320 } 321 322 /* binary search context list */ 323 valid = g->ops.regops.get_context_whitelist_ranges && 324 !!bsearch(&offset, 325 g->ops.regops.get_context_whitelist_ranges(), 326 g->ops.regops.get_context_whitelist_ranges_count(), 327 sizeof(*g->ops.regops.get_context_whitelist_ranges()), 328 regop_bsearch_range_cmp); 329 330 /* if debug session and channel is bound search runcontrol list */ 331 if ((!valid) && (!dbg_s->is_profiler && ch)) { 332 valid = g->ops.regops.get_runcontrol_whitelist && 333 linear_search(offset, 334 g->ops.regops.get_runcontrol_whitelist(), 335 g->ops.regops.get_runcontrol_whitelist_count()); 336 } 337 338 } else if (op->type == REGOP(TYPE_GR_CTX_QUAD)) { 339 valid = g->ops.regops.get_qctl_whitelist && 340 linear_search(offset, 341 g->ops.regops.get_qctl_whitelist(), 342 g->ops.regops.get_qctl_whitelist_count()); 343 } 344 345 return valid; 346} 347 348/* note: the op here has already been through validate_reg_op_info */ 349static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, 350 struct nvgpu_dbg_reg_op *op) 351{ 352 int err; 353 u32 buf_offset_lo, buf_offset_addr, num_offsets, offset; 354 bool valid = false; 355 356 op->status = 0; 357 offset = op->offset; 358 359 /* support only 24-bit 4-byte aligned offsets */ 360 if (offset & 0xFF000003) { 361 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset); 362 op->status |= REGOP(STATUS_INVALID_OFFSET); 363 return -EINVAL; 364 } 365 366 valid = check_whitelists(dbg_s, op, offset); 367 if ((op->op == REGOP(READ_64) || op->op == REGOP(WRITE_64)) && valid) { 368 valid = check_whitelists(dbg_s, op, offset + 4); 369 } 370 371 if (valid && (op->type != REGOP(TYPE_GLOBAL))) { 372 err = gr_gk20a_get_ctx_buffer_offsets(dbg_s->g, 373 op->offset, 374 1, 375 &buf_offset_lo, 376 &buf_offset_addr, 377 &num_offsets, 378 op->type == REGOP(TYPE_GR_CTX_QUAD), 379 op->quad); 380 if (err) { 381 err = gr_gk20a_get_pm_ctx_buffer_offsets(dbg_s->g, 382 op->offset, 383 1, 384 &buf_offset_lo, 385 &buf_offset_addr, 386 &num_offsets); 387 388 if (err) { 389 op->status |= REGOP(STATUS_INVALID_OFFSET); 390 return -EINVAL; 391 } 392 } 393 if (!num_offsets) { 394 op->status |= REGOP(STATUS_INVALID_OFFSET); 395 return -EINVAL; 396 } 397 } 398 399 if (!valid) { 400 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset); 401 op->status |= REGOP(STATUS_INVALID_OFFSET); 402 return -EINVAL; 403 } 404 405 return 0; 406} 407 408static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 409 u32 *ctx_rd_count, u32 *ctx_wr_count, 410 struct nvgpu_dbg_reg_op *ops, 411 u32 op_count) 412{ 413 u32 i; 414 bool ok = true; 415 struct gk20a *g = dbg_s->g; 416 417 /* keep going until the end so every op can get 418 * a separate error code if needed */ 419 for (i = 0; i < op_count; i++) { 420 421 if (validate_reg_op_info(dbg_s, &ops[i]) != 0) { 422 ok = false; 423 } 424 425 if (reg_op_is_gr_ctx(ops[i].type)) { 426 if (reg_op_is_read(ops[i].op)) { 427 (*ctx_rd_count)++; 428 } else { 429 (*ctx_wr_count)++; 430 } 431 } 432 433 /* if "allow_all" flag enabled, dont validate offset */ 434 if (!g->allow_all) { 435 if (validate_reg_op_offset(dbg_s, &ops[i]) != 0) { 436 ok = false; 437 } 438 } 439 } 440 441 nvgpu_log(g, gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d", 442 *ctx_wr_count, *ctx_rd_count); 443 444 return ok; 445} 446 447/* exported for tools like cyclestats, etc */ 448bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset) 449{ 450 bool valid = !!bsearch(&offset, 451 g->ops.regops.get_global_whitelist_ranges(), 452 g->ops.regops.get_global_whitelist_ranges_count(), 453 sizeof(*g->ops.regops.get_global_whitelist_ranges()), 454 regop_bsearch_range_cmp); 455 return valid; 456} 457 458bool reg_op_is_gr_ctx(u8 type) 459{ 460 return type == REGOP(TYPE_GR_CTX) || 461 type == REGOP(TYPE_GR_CTX_TPC) || 462 type == REGOP(TYPE_GR_CTX_SM) || 463 type == REGOP(TYPE_GR_CTX_CROP) || 464 type == REGOP(TYPE_GR_CTX_ZROP) || 465 type == REGOP(TYPE_GR_CTX_QUAD); 466} 467 468bool reg_op_is_read(u8 op) 469{ 470 return op == REGOP(READ_32) || 471 op == REGOP(READ_64); 472}
diff --git a/include/gk20a/regops_gk20a.h b/include/gk20a/regops_gk20a.h
deleted file mode 100644
index 9670587..0000000
--- a/include/gk20a/regops_gk20a.h
+++ /dev/null
@@ -1,90 +0,0 @@ 1/* 2 * Tegra GK20A GPU Debugger Driver Register Ops 3 * 4 * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef REGOPS_GK20A_H 25#define REGOPS_GK20A_H 26 27/* 28 * Register operations 29 * All operations are targeted towards first channel 30 * attached to debug session 31 */ 32/* valid op values */ 33#define NVGPU_DBG_REG_OP_READ_32 (0x00000000) 34#define NVGPU_DBG_REG_OP_WRITE_32 (0x00000001) 35#define NVGPU_DBG_REG_OP_READ_64 (0x00000002) 36#define NVGPU_DBG_REG_OP_WRITE_64 (0x00000003) 37/* note: 8b ops are unsupported */ 38#define NVGPU_DBG_REG_OP_READ_08 (0x00000004) 39#define NVGPU_DBG_REG_OP_WRITE_08 (0x00000005) 40 41/* valid type values */ 42#define NVGPU_DBG_REG_OP_TYPE_GLOBAL (0x00000000) 43#define NVGPU_DBG_REG_OP_TYPE_GR_CTX (0x00000001) 44#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_TPC (0x00000002) 45#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_SM (0x00000004) 46#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_CROP (0x00000008) 47#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_ZROP (0x00000010) 48/*#define NVGPU_DBG_REG_OP_TYPE_FB (0x00000020)*/ 49#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_QUAD (0x00000040) 50 51/* valid status values */ 52#define NVGPU_DBG_REG_OP_STATUS_SUCCESS (0x00000000) 53#define NVGPU_DBG_REG_OP_STATUS_INVALID_OP (0x00000001) 54#define NVGPU_DBG_REG_OP_STATUS_INVALID_TYPE (0x00000002) 55#define NVGPU_DBG_REG_OP_STATUS_INVALID_OFFSET (0x00000004) 56#define NVGPU_DBG_REG_OP_STATUS_UNSUPPORTED_OP (0x00000008) 57#define NVGPU_DBG_REG_OP_STATUS_INVALID_MASK (0x00000010) 58 59struct nvgpu_dbg_reg_op { 60 u8 op; 61 u8 type; 62 u8 status; 63 u8 quad; 64 u32 group_mask; 65 u32 sub_group_mask; 66 u32 offset; 67 u32 value_lo; 68 u32 value_hi; 69 u32 and_n_mask_lo; 70 u32 and_n_mask_hi; 71}; 72 73struct regop_offset_range { 74 u32 base:24; 75 u32 count:8; 76}; 77 78int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, 79 struct nvgpu_dbg_reg_op *ops, 80 u64 num_ops, 81 bool *is_current_ctx); 82 83/* turn seriously unwieldy names -> something shorter */ 84#define REGOP(x) NVGPU_DBG_REG_OP_##x 85 86bool reg_op_is_gr_ctx(u8 type); 87bool reg_op_is_read(u8 op); 88bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset); 89 90#endif /* REGOPS_GK20A_H */
diff --git a/include/lpwr/lpwr.c b/include/lpwr/lpwr.c
deleted file mode 100644
index c8cfb84..0000000
--- a/include/lpwr/lpwr.c
+++ /dev/null
@@ -1,448 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/bios.h> 24#include <nvgpu/pmu.h> 25#include <nvgpu/clk_arb.h> 26#include <nvgpu/gk20a.h> 27 28#include "gp106/bios_gp106.h" 29#include "pstate/pstate.h" 30#include "pmu_perf/pmu_perf.h" 31#include "lpwr.h" 32 33static int get_lpwr_idx_table(struct gk20a *g) 34{ 35 u32 *lpwr_idx_table_ptr; 36 u8 *entry_addr; 37 u32 idx; 38 struct nvgpu_lpwr_bios_idx_data *pidx_data = 39 &g->perf_pmu.lpwr.lwpr_bios_data.idx; 40 struct nvgpu_bios_lpwr_idx_table_1x_header header = { 0 }; 41 struct nvgpu_bios_lpwr_idx_table_1x_entry entry = { 0 }; 42 43 lpwr_idx_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, 44 g->bios.perf_token, LOWPOWER_TABLE); 45 if (lpwr_idx_table_ptr == NULL) { 46 return -EINVAL; 47 } 48 49 memcpy(&header, lpwr_idx_table_ptr, 50 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_header)); 51 52 if (header.entry_count >= LPWR_VBIOS_IDX_ENTRY_COUNT_MAX) { 53 return -EINVAL; 54 } 55 56 pidx_data->base_sampling_period = (u16)header.base_sampling_period; 57 58 /* Parse the LPWR Index Table entries.*/ 59 for (idx = 0; idx < header.entry_count; idx++) { 60 entry_addr = (u8 *)lpwr_idx_table_ptr + header.header_size + 61 (idx * header.entry_size); 62 63 memcpy(&entry, entry_addr, 64 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_entry)); 65 66 pidx_data->entry[idx].pcie_idx = entry.pcie_idx; 67 pidx_data->entry[idx].gr_idx = entry.gr_idx; 68 pidx_data->entry[idx].ms_idx = entry.ms_idx; 69 pidx_data->entry[idx].di_idx = entry.di_idx; 70 pidx_data->entry[idx].gc6_idx = entry.gc6_idx; 71 72 } 73 74 return 0; 75} 76 77static int get_lpwr_gr_table(struct gk20a *g) 78{ 79 u32 *lpwr_gr_table_ptr; 80 u8 *entry_addr; 81 u32 idx; 82 struct nvgpu_lpwr_bios_gr_data *pgr_data = 83 &g->perf_pmu.lpwr.lwpr_bios_data.gr; 84 struct nvgpu_bios_lpwr_gr_table_1x_header header = { 0 }; 85 struct nvgpu_bios_lpwr_gr_table_1x_entry entry = { 0 }; 86 87 lpwr_gr_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, 88 g->bios.perf_token, LOWPOWER_GR_TABLE); 89 if (lpwr_gr_table_ptr == NULL) { 90 return -EINVAL; 91 } 92 93 memcpy(&header, lpwr_gr_table_ptr, 94 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_header)); 95 96 /* Parse the LPWR Index Table entries.*/ 97 for (idx = 0; idx < header.entry_count; idx++) { 98 entry_addr = (u8 *)lpwr_gr_table_ptr + header.header_size + 99 (idx * header.entry_size); 100 101 memcpy(&entry, entry_addr, 102 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_entry)); 103 104 if (BIOS_GET_FIELD(entry.feautre_mask, 105 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) { 106 pgr_data->entry[idx].gr_enabled = true; 107 108 pgr_data->entry[idx].feature_mask = 109 NVGPU_PMU_GR_FEATURE_MASK_ALL; 110 111 if (!BIOS_GET_FIELD(entry.feautre_mask, 112 NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG)) { 113 pgr_data->entry[idx].feature_mask &= 114 ~NVGPU_PMU_GR_FEATURE_MASK_RPPG; 115 } 116 } 117 118 } 119 120 return 0; 121} 122 123static int get_lpwr_ms_table(struct gk20a *g) 124{ 125 u32 *lpwr_ms_table_ptr; 126 u8 *entry_addr; 127 u32 idx; 128 struct nvgpu_lpwr_bios_ms_data *pms_data = 129 &g->perf_pmu.lpwr.lwpr_bios_data.ms; 130 struct nvgpu_bios_lpwr_ms_table_1x_header header = { 0 }; 131 struct nvgpu_bios_lpwr_ms_table_1x_entry entry = { 0 }; 132 133 lpwr_ms_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, 134 g->bios.perf_token, LOWPOWER_MS_TABLE); 135 if (lpwr_ms_table_ptr == NULL) { 136 return -EINVAL; 137 } 138 139 memcpy(&header, lpwr_ms_table_ptr, 140 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_header)); 141 142 if (header.entry_count >= LPWR_VBIOS_MS_ENTRY_COUNT_MAX) { 143 return -EINVAL; 144 } 145 146 pms_data->default_entry_idx = (u8)header.default_entry_idx; 147 148 pms_data->idle_threshold_us = (u32)(header.idle_threshold_us * 10); 149 150 /* Parse the LPWR MS Table entries.*/ 151 for (idx = 0; idx < header.entry_count; idx++) { 152 entry_addr = (u8 *)lpwr_ms_table_ptr + header.header_size + 153 (idx * header.entry_size); 154 155 memcpy(&entry, entry_addr, 156 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_entry)); 157 158 if (BIOS_GET_FIELD(entry.feautre_mask, 159 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) { 160 pms_data->entry[idx].ms_enabled = true; 161 162 pms_data->entry[idx].feature_mask = 163 NVGPU_PMU_MS_FEATURE_MASK_ALL; 164 165 if (!BIOS_GET_FIELD(entry.feautre_mask, 166 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING)) { 167 pms_data->entry[idx].feature_mask &= 168 ~NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING; 169 } 170 171 if (!BIOS_GET_FIELD(entry.feautre_mask, 172 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR)) { 173 pms_data->entry[idx].feature_mask &= 174 ~NVGPU_PMU_MS_FEATURE_MASK_SW_ASR; 175 } 176 177 if (!BIOS_GET_FIELD(entry.feautre_mask, 178 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG)) { 179 pms_data->entry[idx].feature_mask &= 180 ~NVGPU_PMU_MS_FEATURE_MASK_RPPG; 181 } 182 } 183 184 pms_data->entry[idx].dynamic_current_logic = 185 entry.dynamic_current_logic; 186 187 pms_data->entry[idx].dynamic_current_sram = 188 entry.dynamic_current_sram; 189 } 190 191 return 0; 192} 193 194u32 nvgpu_lpwr_pg_setup(struct gk20a *g) 195{ 196 u32 err = 0; 197 198 nvgpu_log_fn(g, " "); 199 200 err = get_lpwr_gr_table(g); 201 if (err) { 202 return err; 203 } 204 205 err = get_lpwr_ms_table(g); 206 if (err) { 207 return err; 208 } 209 210 err = get_lpwr_idx_table(g); 211 212 return err; 213} 214 215static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g, 216 struct pmu_msg *msg, void *param, 217 u32 handle, u32 status) 218{ 219 u32 *ack_status = param; 220 221 nvgpu_log_fn(g, " "); 222 223 if (status != 0) { 224 nvgpu_err(g, "LWPR PARAM cmd aborted"); 225 return; 226 } 227 228 *ack_status = 1; 229 230 nvgpu_pmu_dbg(g, "lpwr-param is acknowledged from PMU %x", 231 msg->msg.pg.msg_type); 232} 233 234int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate) 235{ 236 struct pmu_cmd cmd; 237 u32 seq, status = 0; 238 u32 payload = NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED; 239 struct clk_set_info *pstate_info; 240 u32 ack_status = 0; 241 242 nvgpu_log_fn(g, " "); 243 244 pstate_info = pstate_get_clk_set_info(g, pstate, 245 clkwhich_mclk); 246 if (!pstate_info) { 247 return -EINVAL; 248 } 249 250 if (pstate_info->max_mhz > 251 MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ) { 252 payload |= 253 NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED; 254 } 255 256 if (payload != g->perf_pmu.lpwr.mclk_change_cache) { 257 g->perf_pmu.lpwr.mclk_change_cache = payload; 258 259 cmd.hdr.unit_id = PMU_UNIT_PG; 260 cmd.hdr.size = PMU_CMD_HDR_SIZE + 261 sizeof(struct pmu_pg_cmd_mclk_change); 262 cmd.cmd.pg.mclk_change.cmd_type = 263 PMU_PG_CMD_ID_PG_PARAM; 264 cmd.cmd.pg.mclk_change.cmd_id = 265 PMU_PG_PARAM_CMD_MCLK_CHANGE; 266 cmd.cmd.pg.mclk_change.data = payload; 267 268 nvgpu_pmu_dbg(g, "cmd post MS PMU_PG_PARAM_CMD_MCLK_CHANGE"); 269 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 270 PMU_COMMAND_QUEUE_HPQ, 271 nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0); 272 273 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), 274 &ack_status, 1); 275 if (ack_status == 0) { 276 status = -EINVAL; 277 nvgpu_err(g, "MCLK-CHANGE ACK failed"); 278 } 279 } 280 281 return status; 282} 283 284u32 nvgpu_lpwr_post_init(struct gk20a *g) 285{ 286 struct pmu_cmd cmd; 287 u32 seq; 288 u32 status = 0; 289 u32 ack_status = 0; 290 291 memset(&cmd, 0, sizeof(struct pmu_cmd)); 292 cmd.hdr.unit_id = PMU_UNIT_PG; 293 cmd.hdr.size = PMU_CMD_HDR_SIZE + 294 sizeof(struct pmu_pg_cmd_post_init_param); 295 296 cmd.cmd.pg.post_init.cmd_type = 297 PMU_PG_CMD_ID_PG_PARAM; 298 cmd.cmd.pg.post_init.cmd_id = 299 PMU_PG_PARAM_CMD_POST_INIT; 300 301 nvgpu_pmu_dbg(g, "cmd post post-init PMU_PG_PARAM_CMD_POST_INIT"); 302 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, 303 PMU_COMMAND_QUEUE_LPQ, 304 nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0); 305 306 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), 307 &ack_status, 1); 308 if (ack_status == 0) { 309 status = -EINVAL; 310 nvgpu_err(g, "post-init ack failed"); 311 } 312 313 return status; 314} 315 316u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num) 317{ 318 struct nvgpu_lpwr_bios_ms_data *pms_data = 319 &g->perf_pmu.lpwr.lwpr_bios_data.ms; 320 struct nvgpu_lpwr_bios_idx_data *pidx_data = 321 &g->perf_pmu.lpwr.lwpr_bios_data.idx; 322 struct pstate *pstate = pstate_find(g, pstate_num); 323 u32 ms_idx; 324 325 nvgpu_log_fn(g, " "); 326 327 if (!pstate) { 328 return 0; 329 } 330 331 ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx; 332 if (pms_data->entry[ms_idx].ms_enabled) { 333 return 1; 334 } else { 335 return 0; 336 } 337} 338 339u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num) 340{ 341 struct nvgpu_lpwr_bios_gr_data *pgr_data = 342 &g->perf_pmu.lpwr.lwpr_bios_data.gr; 343 struct nvgpu_lpwr_bios_idx_data *pidx_data = 344 &g->perf_pmu.lpwr.lwpr_bios_data.idx; 345 struct pstate *pstate = pstate_find(g, pstate_num); 346 u32 idx; 347 348 nvgpu_log_fn(g, " "); 349 350 if (!pstate) { 351 return 0; 352 } 353 354 idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx; 355 if (pgr_data->entry[idx].gr_enabled) { 356 return 1; 357 } else { 358 return 0; 359 } 360} 361 362 363int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) 364{ 365 struct nvgpu_pmu *pmu = &g->pmu; 366 u32 status = 0; 367 u32 is_mscg_supported = 0; 368 u32 is_rppg_supported = 0; 369 u32 present_pstate = 0; 370 371 nvgpu_log_fn(g, " "); 372 373 if (pstate_lock) { 374 nvgpu_clk_arb_pstate_change_lock(g, true); 375 } 376 nvgpu_mutex_acquire(&pmu->pg_mutex); 377 378 present_pstate = nvgpu_clk_arb_get_current_pstate(g); 379 380 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, 381 present_pstate); 382 if (is_mscg_supported && g->mscg_enabled) { 383 if (!pmu->mscg_stat) { 384 pmu->mscg_stat = PMU_MSCG_ENABLED; 385 } 386 } 387 388 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g, 389 present_pstate); 390 if (is_rppg_supported) { 391 if (g->support_pmu && g->can_elpg) { 392 status = nvgpu_pmu_enable_elpg(g); 393 } 394 } 395 396 nvgpu_mutex_release(&pmu->pg_mutex); 397 if (pstate_lock) { 398 nvgpu_clk_arb_pstate_change_lock(g, false); 399 } 400 401 return status; 402} 403 404int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock) 405{ 406 struct nvgpu_pmu *pmu = &g->pmu; 407 int status = 0; 408 u32 is_mscg_supported = 0; 409 u32 is_rppg_supported = 0; 410 u32 present_pstate = 0; 411 412 nvgpu_log_fn(g, " "); 413 414 if (pstate_lock) { 415 nvgpu_clk_arb_pstate_change_lock(g, true); 416 } 417 nvgpu_mutex_acquire(&pmu->pg_mutex); 418 419 present_pstate = nvgpu_clk_arb_get_current_pstate(g); 420 421 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g, 422 present_pstate); 423 if (is_rppg_supported) { 424 if (g->support_pmu && g->elpg_enabled) { 425 status = nvgpu_pmu_disable_elpg(g); 426 if (status) { 427 goto exit_unlock; 428 } 429 } 430 } 431 432 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, 433 present_pstate); 434 if (is_mscg_supported && g->mscg_enabled) { 435 if (pmu->mscg_stat) { 436 pmu->mscg_stat = PMU_MSCG_DISABLED; 437 } 438 } 439 440exit_unlock: 441 nvgpu_mutex_release(&pmu->pg_mutex); 442 if (pstate_lock) { 443 nvgpu_clk_arb_pstate_change_lock(g, false); 444 } 445 446 nvgpu_log_fn(g, "done"); 447 return status; 448}
diff --git a/include/lpwr/lpwr.h b/include/lpwr/lpwr.h
deleted file mode 100644
index c38ba62..0000000
--- a/include/lpwr/lpwr.h
+++ /dev/null
@@ -1,101 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_LPWR_H 23#define NVGPU_LPWR_H 24 25#define MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ 540 26 27#define NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED BIT(0x1) 28#define NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED BIT(0x3) 29 30#define LPWR_ENTRY_COUNT_MAX 0x06 31 32#define LPWR_VBIOS_IDX_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX) 33 34#define LPWR_VBIOS_IDX_ENTRY_RSVD \ 35 (LPWR_VBIOS_IDX_ENTRY_COUNT_MAX - 1) 36 37#define LPWR_VBIOS_BASE_SAMPLING_PERIOD_DEFAULT (500) 38 39struct nvgpu_lpwr_bios_idx_entry { 40 u8 pcie_idx; 41 u8 gr_idx; 42 u8 ms_idx; 43 u8 di_idx; 44 u8 gc6_idx; 45}; 46 47struct nvgpu_lpwr_bios_idx_data { 48 u16 base_sampling_period; 49 struct nvgpu_lpwr_bios_idx_entry entry[LPWR_VBIOS_IDX_ENTRY_COUNT_MAX]; 50}; 51 52#define LPWR_VBIOS_MS_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX) 53 54struct nvgpu_lpwr_bios_ms_entry { 55 bool ms_enabled; 56 u32 feature_mask; 57 u32 asr_efficiency_thresholdl; 58 u16 dynamic_current_logic; 59 u16 dynamic_current_sram; 60}; 61 62struct nvgpu_lpwr_bios_ms_data { 63 u8 default_entry_idx; 64 u32 idle_threshold_us; 65 struct nvgpu_lpwr_bios_ms_entry entry[LPWR_VBIOS_MS_ENTRY_COUNT_MAX]; 66}; 67 68#define LPWR_VBIOS_GR_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX) 69 70struct nvgpu_lpwr_bios_gr_entry { 71 bool gr_enabled; 72 u32 feature_mask; 73}; 74 75struct nvgpu_lpwr_bios_gr_data { 76 u8 default_entry_idx; 77 u32 idle_threshold_us; 78 u8 adaptive_gr_multiplier; 79 struct nvgpu_lpwr_bios_gr_entry entry[LPWR_VBIOS_GR_ENTRY_COUNT_MAX]; 80}; 81 82struct nvgpu_lpwr_bios_data { 83 struct nvgpu_lpwr_bios_idx_data idx; 84 struct nvgpu_lpwr_bios_ms_data ms; 85 struct nvgpu_lpwr_bios_gr_data gr; 86}; 87 88struct obj_lwpr { 89 struct nvgpu_lpwr_bios_data lwpr_bios_data; 90 u32 mclk_change_cache; 91}; 92 93u32 nvgpu_lpwr_pg_setup(struct gk20a *g); 94int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate); 95int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock); 96int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock); 97u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num); 98u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num); 99u32 nvgpu_lpwr_post_init(struct gk20a *g); 100 101#endif /* NVGPU_LPWR_H */
diff --git a/include/lpwr/rppg.c b/include/lpwr/rppg.c
deleted file mode 100644
index 13e8126..0000000
--- a/include/lpwr/rppg.c
+++ /dev/null
@@ -1,160 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/pmu.h> 24#include <nvgpu/gk20a.h> 25 26#include "gp106/bios_gp106.h" 27#include "pstate/pstate.h" 28#include "lpwr/rppg.h" 29 30static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg, 31 void *param, u32 handle, u32 status) 32{ 33 u32 *success = param; 34 35 if (status == 0) { 36 switch (msg->msg.pg.rppg_msg.cmn.msg_id) { 37 case NV_PMU_RPPG_MSG_ID_INIT_CTRL_ACK: 38 *success = 1; 39 nvgpu_pmu_dbg(g, "RPPG is acknowledged from PMU %x", 40 msg->msg.pg.msg_type); 41 break; 42 } 43 } 44 45 nvgpu_pmu_dbg(g, "RPPG is acknowledged from PMU %x", 46 msg->msg.pg.msg_type); 47} 48 49static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd) 50{ 51 struct pmu_cmd cmd; 52 u32 seq; 53 u32 status = 0; 54 u32 success = 0; 55 56 memset(&cmd, 0, sizeof(struct pmu_cmd)); 57 cmd.hdr.unit_id = PMU_UNIT_PG; 58 cmd.hdr.size = PMU_CMD_HDR_SIZE + 59 sizeof(struct nv_pmu_rppg_cmd); 60 61 cmd.cmd.pg.rppg_cmd.cmn.cmd_type = PMU_PMU_PG_CMD_ID_RPPG; 62 cmd.cmd.pg.rppg_cmd.cmn.cmd_id = prppg_cmd->cmn.cmd_id; 63 64 switch (prppg_cmd->cmn.cmd_id) { 65 case NV_PMU_RPPG_CMD_ID_INIT: 66 break; 67 case NV_PMU_RPPG_CMD_ID_INIT_CTRL: 68 cmd.cmd.pg.rppg_cmd.init_ctrl.ctrl_id = 69 prppg_cmd->init_ctrl.ctrl_id; 70 cmd.cmd.pg.rppg_cmd.init_ctrl.domain_id = 71 prppg_cmd->init_ctrl.domain_id; 72 break; 73 case NV_PMU_RPPG_CMD_ID_STATS_RESET: 74 cmd.cmd.pg.rppg_cmd.stats_reset.ctrl_id = 75 prppg_cmd->stats_reset.ctrl_id; 76 break; 77 default: 78 nvgpu_err(g, "Inivalid RPPG command %d", 79 prppg_cmd->cmn.cmd_id); 80 return -1; 81 } 82 83 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 84 pmu_handle_rppg_init_msg, &success, &seq, ~0); 85 if (status) { 86 nvgpu_err(g, "Unable to submit parameter command %d", 87 prppg_cmd->cmn.cmd_id); 88 goto exit; 89 } 90 91 if (prppg_cmd->cmn.cmd_id == NV_PMU_RPPG_CMD_ID_INIT_CTRL) { 92 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), 93 &success, 1); 94 if (success == 0) { 95 status = -EINVAL; 96 nvgpu_err(g, "Ack for the parameter command %x", 97 prppg_cmd->cmn.cmd_id); 98 } 99 } 100 101exit: 102 return status; 103} 104 105static u32 rppg_init(struct gk20a *g) 106{ 107 struct nv_pmu_rppg_cmd rppg_cmd; 108 109 rppg_cmd.init.cmd_id = NV_PMU_RPPG_CMD_ID_INIT; 110 111 return rppg_send_cmd(g, &rppg_cmd); 112} 113 114static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id) 115{ 116 struct nv_pmu_rppg_cmd rppg_cmd; 117 118 rppg_cmd.init_ctrl.cmd_id = NV_PMU_RPPG_CMD_ID_INIT_CTRL; 119 rppg_cmd.init_ctrl.ctrl_id = ctrl_id; 120 121 switch (ctrl_id) { 122 case NV_PMU_RPPG_CTRL_ID_GR: 123 case NV_PMU_RPPG_CTRL_ID_MS: 124 rppg_cmd.init_ctrl.domain_id = NV_PMU_RPPG_DOMAIN_ID_GFX; 125 break; 126 } 127 128 return rppg_send_cmd(g, &rppg_cmd); 129} 130 131u32 init_rppg(struct gk20a *g) 132{ 133 u32 status; 134 135 status = rppg_init(g); 136 if (status != 0) { 137 nvgpu_err(g, 138 "Failed to initialize RPPG in PMU: 0x%08x", status); 139 return status; 140 } 141 142 143 status = rppg_ctrl_init(g, NV_PMU_RPPG_CTRL_ID_GR); 144 if (status != 0) { 145 nvgpu_err(g, 146 "Failed to initialize RPPG_CTRL: GR in PMU: 0x%08x", 147 status); 148 return status; 149 } 150 151 status = rppg_ctrl_init(g, NV_PMU_RPPG_CTRL_ID_MS); 152 if (status != 0) { 153 nvgpu_err(g, 154 "Failed to initialize RPPG_CTRL: MS in PMU: 0x%08x", 155 status); 156 return status; 157 } 158 159 return status; 160}
diff --git a/include/lpwr/rppg.h b/include/lpwr/rppg.h
deleted file mode 100644
index d66600a..0000000
--- a/include/lpwr/rppg.h
+++ /dev/null
@@ -1,26 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_LPWR_RPPG_H 23#define NVGPU_LPWR_RPPG_H 24 25u32 init_rppg(struct gk20a *g); 26#endif /* NVGPU_LPWR_RPPG_H */
diff --git a/include/nvgpu/acr/acr_flcnbl.h b/include/nvgpu/acr/acr_flcnbl.h
deleted file mode 100644
index ad697b2..0000000
--- a/include/nvgpu/acr/acr_flcnbl.h
+++ /dev/null
@@ -1,144 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_ACR_FLCNBL_H 23#define NVGPU_ACR_FLCNBL_H 24 25#include <nvgpu/flcnif_cmn.h> 26 27#ifndef NVGPU_ACR_H 28#warning "acr_flcnbl.h not included from nvgpu_acr.h!" \ 29 "Include nvgpu_acr.h instead of acr_xxx.h to get access to ACR interfaces" 30#endif 31 32/* 33 * Structure used by the boot-loader to load the rest of the code. This has 34 * to be filled by NVGPU and copied into DMEM at offset provided in the 35 * hsflcn_bl_desc.bl_desc_dmem_load_off. 36 */ 37struct flcn_bl_dmem_desc { 38 u32 reserved[4]; /*Should be the first element..*/ 39 u32 signature[4]; /*Should be the first element..*/ 40 u32 ctx_dma; 41 u32 code_dma_base; 42 u32 non_sec_code_off; 43 u32 non_sec_code_size; 44 u32 sec_code_off; 45 u32 sec_code_size; 46 u32 code_entry_point; 47 u32 data_dma_base; 48 u32 data_size; 49 u32 code_dma_base1; 50 u32 data_dma_base1; 51}; 52 53struct flcn_bl_dmem_desc_v1 { 54 u32 reserved[4]; /*Should be the first element..*/ 55 u32 signature[4]; /*Should be the first element..*/ 56 u32 ctx_dma; 57 struct falc_u64 code_dma_base; 58 u32 non_sec_code_off; 59 u32 non_sec_code_size; 60 u32 sec_code_off; 61 u32 sec_code_size; 62 u32 code_entry_point; 63 struct falc_u64 data_dma_base; 64 u32 data_size; 65 u32 argc; 66 u32 argv; 67}; 68 69/* 70 * The header used by NVGPU to figure out code and data sections of bootloader 71 * 72 * bl_code_off - Offset of code section in the image 73 * bl_code_size - Size of code section in the image 74 * bl_data_off - Offset of data section in the image 75 * bl_data_size - Size of data section in the image 76 */ 77struct flcn_bl_img_hdr { 78 u32 bl_code_off; 79 u32 bl_code_size; 80 u32 bl_data_off; 81 u32 bl_data_size; 82}; 83 84/* 85 * The descriptor used by NVGPU to figure out the requirements of bootloader 86 * 87 * bl_start_tag - Starting tag of bootloader 88 * bl_desc_dmem_load_off - Dmem offset where _def_rm_flcn_bl_dmem_desc 89 * to be loaded 90 * bl_img_hdr - Description of the image 91 */ 92struct hsflcn_bl_desc { 93 u32 bl_start_tag; 94 u32 bl_desc_dmem_load_off; 95 struct flcn_bl_img_hdr bl_img_hdr; 96}; 97 98/* 99 * Legacy structure used by the current PMU/DPU bootloader. 100 */ 101struct loader_config { 102 u32 dma_idx; 103 u32 code_dma_base; /* upper 32-bits of 40-bit dma address */ 104 u32 code_size_total; 105 u32 code_size_to_load; 106 u32 code_entry_point; 107 u32 data_dma_base; /* upper 32-bits of 40-bit dma address */ 108 u32 data_size; /* initialized data of the application */ 109 u32 overlay_dma_base; /* upper 32-bits of the 40-bit dma address */ 110 u32 argc; 111 u32 argv; 112 u16 code_dma_base1; /* upper 7 bits of 47-bit dma address */ 113 u16 data_dma_base1; /* upper 7 bits of 47-bit dma address */ 114 u16 overlay_dma_base1; /* upper 7 bits of the 47-bit dma address */ 115}; 116 117struct loader_config_v1 { 118 u32 reserved; 119 u32 dma_idx; 120 struct falc_u64 code_dma_base; 121 u32 code_size_total; 122 u32 code_size_to_load; 123 u32 code_entry_point; 124 struct falc_u64 data_dma_base; 125 u32 data_size; 126 struct falc_u64 overlay_dma_base; 127 u32 argc; 128 u32 argv; 129}; 130 131/* 132 * Union of all supported structures used by bootloaders. 133 */ 134union flcn_bl_generic_desc { 135 struct flcn_bl_dmem_desc bl_dmem_desc; 136 struct loader_config loader_cfg; 137}; 138 139union flcn_bl_generic_desc_v1 { 140 struct flcn_bl_dmem_desc_v1 bl_dmem_desc_v1; 141 struct loader_config_v1 loader_cfg_v1; 142}; 143 144#endif /* NVGPU_ACR_FLCNBL_H */
diff --git a/include/nvgpu/acr/acr_lsfm.h b/include/nvgpu/acr/acr_lsfm.h
deleted file mode 100644
index ed58552..0000000
--- a/include/nvgpu/acr/acr_lsfm.h
+++ /dev/null
@@ -1,328 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_ACR_LSFM_H 23#define NVGPU_ACR_LSFM_H 24 25#ifndef NVGPU_ACR_H 26#warning "acr_lsfm.h not included from nvgpu_acr.h!" \ 27 "Include nvgpu_acr.h instead of acr_xxx.h to get access to ACR interfaces" 28#endif 29 30/* 31 * READ/WRITE masks for WPR region 32 */ 33/* Readable only from level 2 and 3 client */ 34#define LSF_WPR_REGION_RMASK (0xC) 35/* Writable only from level 2 and 3 client */ 36#define LSF_WPR_REGION_WMASK (0xC) 37/* Readable only from level 3 client */ 38#define LSF_WPR_REGION_RMASK_SUB_WPR_ENABLED (0x8) 39/* Writable only from level 3 client */ 40#define LSF_WPR_REGION_WMASK_SUB_WPR_ENABLED (0x8) 41/* Disallow read mis-match for all clients */ 42#define LSF_WPR_REGION_ALLOW_READ_MISMATCH_NO (0x0) 43/* Disallow write mis-match for all clients */ 44#define LSF_WPR_REGION_ALLOW_WRITE_MISMATCH_NO (0x0) 45 46/* 47 * Falcon Id Defines 48 * Defines a common Light Secure Falcon identifier. 49 */ 50#define LSF_FALCON_ID_PMU (0) 51#define LSF_FALCON_ID_GSPLITE (1) 52#define LSF_FALCON_ID_FECS (2) 53#define LSF_FALCON_ID_GPCCS (3) 54#define LSF_FALCON_ID_SEC2 (7) 55#define LSF_FALCON_ID_END (11) 56#define LSF_FALCON_ID_INVALID (0xFFFFFFFF) 57 58/* 59 * Light Secure Falcon Ucode Description Defines 60 * This structure is prelim and may change as the ucode signing flow evolves. 61 */ 62struct lsf_ucode_desc { 63 u8 prd_keys[2][16]; 64 u8 dbg_keys[2][16]; 65 u32 b_prd_present; 66 u32 b_dbg_present; 67 u32 falcon_id; 68}; 69 70struct lsf_ucode_desc_v1 { 71 u8 prd_keys[2][16]; 72 u8 dbg_keys[2][16]; 73 u32 b_prd_present; 74 u32 b_dbg_present; 75 u32 falcon_id; 76 u32 bsupports_versioning; 77 u32 version; 78 u32 dep_map_count; 79 u8 dep_map[LSF_FALCON_ID_END * 2 * 4]; 80 u8 kdf[16]; 81}; 82 83/* 84 * Light Secure WPR Header 85 * Defines state allowing Light Secure Falcon bootstrapping. 86 */ 87struct lsf_wpr_header { 88 u32 falcon_id; 89 u32 lsb_offset; 90 u32 bootstrap_owner; 91 u32 lazy_bootstrap; 92 u32 status; 93}; 94 95struct lsf_wpr_header_v1 { 96 u32 falcon_id; 97 u32 lsb_offset; 98 u32 bootstrap_owner; 99 u32 lazy_bootstrap; 100 u32 bin_version; 101 u32 status; 102}; 103 104 105/* 106 * LSF shared SubWpr Header 107 * 108 * use_case_id - Shared SubWpr use case ID (updated by nvgpu) 109 * start_addr - start address of subWpr (updated by nvgpu) 110 * size_4K - size of subWpr in 4K (updated by nvgpu) 111 */ 112struct lsf_shared_sub_wpr_header { 113 u32 use_case_id; 114 u32 start_addr; 115 u32 size_4K; 116}; 117 118/* shared sub_wpr use case IDs */ 119enum { 120 LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_FRTS_VBIOS_TABLES = 1, 121 LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA = 2 122}; 123 124#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX \ 125 LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA 126 127#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID (0xFFFFFFFF) 128 129#define MAX_SUPPORTED_SHARED_SUB_WPR_USE_CASES \ 130 LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX 131 132/* Static sizes of shared subWPRs */ 133/* Minimum granularity supported is 4K */ 134/* 1MB in 4K */ 135#define LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K (0x100) 136/* 4K */ 137#define LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K (0x1) 138 139/* 140 * Bootstrap Owner Defines 141 */ 142#define LSF_BOOTSTRAP_OWNER_DEFAULT (LSF_FALCON_ID_PMU) 143 144/* 145 * Image Status Defines 146 */ 147#define LSF_IMAGE_STATUS_NONE (0) 148#define LSF_IMAGE_STATUS_COPY (1) 149#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2) 150#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3) 151#define LSF_IMAGE_STATUS_VALIDATION_DONE (4) 152#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5) 153#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6) 154 155/*Light Secure Bootstrap header related defines*/ 156#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_FALSE 0 157#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE 1 158#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_FALSE 0 159#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE 4 160#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE 8 161#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE 0 162 163/* 164 * Light Secure Bootstrap Header 165 * Defines state allowing Light Secure Falcon bootstrapping. 166 */ 167struct lsf_lsb_header { 168 struct lsf_ucode_desc signature; 169 u32 ucode_off; 170 u32 ucode_size; 171 u32 data_size; 172 u32 bl_code_size; 173 u32 bl_imem_off; 174 u32 bl_data_off; 175 u32 bl_data_size; 176 u32 app_code_off; 177 u32 app_code_size; 178 u32 app_data_off; 179 u32 app_data_size; 180 u32 flags; 181}; 182 183struct lsf_lsb_header_v1 { 184 struct lsf_ucode_desc_v1 signature; 185 u32 ucode_off; 186 u32 ucode_size; 187 u32 data_size; 188 u32 bl_code_size; 189 u32 bl_imem_off; 190 u32 bl_data_off; 191 u32 bl_data_size; 192 u32 app_code_off; 193 u32 app_code_size; 194 u32 app_data_off; 195 u32 app_data_size; 196 u32 flags; 197}; 198 199/* 200 * Light Secure WPR Content Alignments 201 */ 202#define LSF_WPR_HEADER_ALIGNMENT (256U) 203#define LSF_SUB_WPR_HEADER_ALIGNMENT (256U) 204#define LSF_LSB_HEADER_ALIGNMENT (256U) 205#define LSF_BL_DATA_ALIGNMENT (256U) 206#define LSF_BL_DATA_SIZE_ALIGNMENT (256U) 207#define LSF_BL_CODE_SIZE_ALIGNMENT (256U) 208#define LSF_DATA_SIZE_ALIGNMENT (256U) 209#define LSF_CODE_SIZE_ALIGNMENT (256U) 210 211/* MMU excepts sub_wpr sizes in units of 4K */ 212#define SUB_WPR_SIZE_ALIGNMENT (4096U) 213 214/* 215 * Maximum WPR Header size 216 */ 217#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX \ 218 (ALIGN_UP((sizeof(struct lsf_wpr_header_v1) * LSF_FALCON_ID_END), \ 219 LSF_WPR_HEADER_ALIGNMENT)) 220#define LSF_LSB_HEADER_TOTAL_SIZE_MAX (\ 221 ALIGN_UP(sizeof(struct lsf_lsb_header_v1), LSF_LSB_HEADER_ALIGNMENT)) 222 223/* Maximum SUB WPR header size */ 224#define LSF_SUB_WPR_HEADERS_TOTAL_SIZE_MAX (ALIGN_UP( \ 225 (sizeof(struct lsf_shared_sub_wpr_header) * \ 226 LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX), \ 227 LSF_SUB_WPR_HEADER_ALIGNMENT)) 228 229 230#define LSF_UCODE_DATA_ALIGNMENT 4096 231 232/* Defined for 1MB alignment */ 233#define SHIFT_1MB (20) 234#define SHIFT_4KB (12) 235 236/* 237 * Supporting maximum of 2 regions. 238 * This is needed to pre-allocate space in DMEM 239 */ 240#define NVGPU_FLCN_ACR_MAX_REGIONS (2) 241#define LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE (0x200) 242 243/* 244 * start_addr - Starting address of region 245 * end_addr - Ending address of region 246 * region_id - Region ID 247 * read_mask - Read Mask 248 * write_mask - WriteMask 249 * client_mask - Bit map of all clients currently using this region 250 */ 251struct flcn_acr_region_prop { 252 u32 start_addr; 253 u32 end_addr; 254 u32 region_id; 255 u32 read_mask; 256 u32 write_mask; 257 u32 client_mask; 258}; 259 260struct flcn_acr_region_prop_v1 { 261 u32 start_addr; 262 u32 end_addr; 263 u32 region_id; 264 u32 read_mask; 265 u32 write_mask; 266 u32 client_mask; 267 u32 shadowmMem_startaddress; 268}; 269 270/* 271 * no_regions - Number of regions used. 272 * region_props - Region properties 273 */ 274struct flcn_acr_regions { 275 u32 no_regions; 276 struct flcn_acr_region_prop region_props[NVGPU_FLCN_ACR_MAX_REGIONS]; 277}; 278 279struct flcn_acr_regions_v1 { 280 u32 no_regions; 281 struct flcn_acr_region_prop_v1 region_props[NVGPU_FLCN_ACR_MAX_REGIONS]; 282}; 283/* 284 * reserved_dmem-When the bootstrap owner has done bootstrapping other falcons, 285 * and need to switch into LS mode, it needs to have its own 286 * actual DMEM image copied into DMEM as part of LS setup. If 287 * ACR desc is at location 0, it will definitely get overwritten 288 * causing data corruption. Hence we are reserving 0x200 bytes 289 * to give room for any loading data. NOTE: This has to be the 290 * first member always 291 * signature - Signature of ACR ucode. 292 * wpr_region_id - Region ID holding the WPR header and its details 293 * wpr_offset - Offset from the WPR region holding the wpr header 294 * regions - Region descriptors 295 * nonwpr_ucode_blob_start -stores non-WPR start where kernel stores ucode blob 296 * nonwpr_ucode_blob_end -stores non-WPR end where kernel stores ucode blob 297 */ 298struct flcn_acr_desc { 299 union { 300 u32 reserved_dmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/4)]; 301 u32 signatures[4]; 302 } ucode_reserved_space; 303 /*Always 1st*/ 304 u32 wpr_region_id; 305 u32 wpr_offset; 306 u32 mmu_mem_range; 307 struct flcn_acr_regions regions; 308 u32 nonwpr_ucode_blob_size; 309 u64 nonwpr_ucode_blob_start; 310}; 311 312struct flcn_acr_desc_v1 { 313 union { 314 u32 reserved_dmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/4)]; 315 } ucode_reserved_space; 316 u32 signatures[4]; 317 /*Always 1st*/ 318 u32 wpr_region_id; 319 u32 wpr_offset; 320 u32 mmu_mem_range; 321 struct flcn_acr_regions_v1 regions; 322 u32 nonwpr_ucode_blob_size; 323 u64 nonwpr_ucode_blob_start; 324 u32 dummy[4]; /* ACR_BSI_VPR_DESC */ 325}; 326 327 328#endif /* NVGPU_ACR_LSFM_H */
diff --git a/include/nvgpu/acr/acr_objflcn.h b/include/nvgpu/acr/acr_objflcn.h
deleted file mode 100644
index 57b43c8..0000000
--- a/include/nvgpu/acr/acr_objflcn.h
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_ACR_OBJFLCN_H 23#define NVGPU_ACR_OBJFLCN_H 24 25#ifndef NVGPU_ACR_H 26#warning "acr_objflcn.h not included from nvgpu_acr.h!" \ 27 "Include nvgpu_acr.h instead of acr_xxx.h to get access to ACR interfaces" 28#endif 29 30struct flcn_ucode_img { 31 u32 *header; /* only some falcons have header */ 32 u32 *data; 33 struct pmu_ucode_desc *desc; /* only some falcons have descriptor */ 34 u32 data_size; 35 void *fw_ver; /* CTRL_GPU_GET_FIRMWARE_VERSION_PARAMS struct */ 36 u8 load_entire_os_data; /* load the whole osData section at boot time.*/ 37 /* NULL if not a light secure falcon.*/ 38 struct lsf_ucode_desc *lsf_desc; 39 /* True if there a resources to freed by the client. */ 40 u8 free_res_allocs; 41 u32 flcn_inst; 42}; 43 44struct flcn_ucode_img_v1 { 45 u32 *header; 46 u32 *data; 47 struct pmu_ucode_desc_v1 *desc; 48 u32 data_size; 49 void *fw_ver; 50 u8 load_entire_os_data; 51 struct lsf_ucode_desc_v1 *lsf_desc; 52 u8 free_res_allocs; 53 u32 flcn_inst; 54}; 55 56/* 57 * Falcon UCODE header index. 58 */ 59#define FLCN_NL_UCODE_HDR_OS_CODE_OFF_IND (0) 60#define FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND (1) 61#define FLCN_NL_UCODE_HDR_OS_DATA_OFF_IND (2) 62#define FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND (3) 63#define FLCN_NL_UCODE_HDR_NUM_APPS_IND (4) 64 65/* 66 * There are total N number of Apps with code and offset defined in UCODE header 67 * This macro provides the CODE and DATA offset and size of Ath application. 68 */ 69#define FLCN_NL_UCODE_HDR_APP_CODE_START_IND (5) 70#define FLCN_NL_UCODE_HDR_APP_CODE_OFF_IND(N, A) \ 71 (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + (A*2)) 72#define FLCN_NL_UCODE_HDR_APP_CODE_SIZE_IND(N, A) \ 73 (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + (A*2) + 1) 74#define FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) \ 75 (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + (N*2) - 1) 76 77#define FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) \ 78 (FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) + 1) 79#define FLCN_NL_UCODE_HDR_APP_DATA_OFF_IND(N, A) \ 80 (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + (A*2)) 81#define FLCN_NL_UCODE_HDR_APP_DATA_SIZE_IND(N, A) \ 82 (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + (A*2) + 1) 83#define FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) \ 84 (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + (N*2) - 1) 85 86#define FLCN_NL_UCODE_HDR_OS_OVL_OFF_IND(N) \ 87 (FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 1) 88#define FLCN_NL_UCODE_HDR_OS_OVL_SIZE_IND(N) \ 89 (FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 2) 90 91#endif /* NVGPU_ACR_OBJFLCN_H */
diff --git a/include/nvgpu/acr/acr_objlsfm.h b/include/nvgpu/acr/acr_objlsfm.h
deleted file mode 100644
index e3769bb..0000000
--- a/include/nvgpu/acr/acr_objlsfm.h
+++ /dev/null
@@ -1,97 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_ACR_OBJLSFM_H 23#define NVGPU_ACR_OBJLSFM_H 24 25#ifndef NVGPU_ACR_H 26#warning "acr_objlsfm.h not included from nvgpu_acr.h!" \ 27 "Include nvgpu_acr.h instead of acr_xxx.h to get access to ACR interfaces" 28#endif 29 30#include "acr_flcnbl.h" 31#include "acr_objflcn.h" 32 33/* 34 * LSFM Managed Ucode Image 35 * next : Next image the list, NULL if last. 36 * wpr_header : WPR header for this ucode image 37 * lsb_header : LSB header for this ucode image 38 * bl_gen_desc : Bootloader generic desc structure for this ucode image 39 * bl_gen_desc_size : Sizeof bootloader desc structure for this ucode image 40 * full_ucode_size : Surface size required for final ucode image 41 * ucode_img : Ucode image info 42 */ 43struct lsfm_managed_ucode_img { 44 struct lsfm_managed_ucode_img *next; 45 struct lsf_wpr_header wpr_header; 46 struct lsf_lsb_header lsb_header; 47 union flcn_bl_generic_desc bl_gen_desc; 48 u32 bl_gen_desc_size; 49 u32 full_ucode_size; 50 struct flcn_ucode_img ucode_img; 51}; 52 53struct lsfm_managed_ucode_img_v2 { 54 struct lsfm_managed_ucode_img_v2 *next; 55 struct lsf_wpr_header_v1 wpr_header; 56 struct lsf_lsb_header_v1 lsb_header; 57 union flcn_bl_generic_desc_v1 bl_gen_desc; 58 u32 bl_gen_desc_size; 59 u32 full_ucode_size; 60 struct flcn_ucode_img_v1 ucode_img; 61}; 62 63/* 64 * Defines the structure used to contain all generic information related to 65 * the LSFM. 66 * Contains the Light Secure Falcon Manager (LSFM) feature related data. 67 */ 68struct ls_flcn_mgr { 69 u16 managed_flcn_cnt; 70 u32 wpr_size; 71 u32 disable_mask; 72 struct lsfm_managed_ucode_img *ucode_img_list; 73 void *wpr_client_req_state;/*PACR_CLIENT_REQUEST_STATE originally*/ 74}; 75 76/* 77 * LSFM SUB WPRs struct 78 * pnext : Next entry in the list, NULL if last 79 * sub_wpr_header : SubWpr Header struct 80 */ 81struct lsfm_sub_wpr { 82 struct lsfm_sub_wpr *pnext; 83 struct lsf_shared_sub_wpr_header sub_wpr_header; 84}; 85 86struct ls_flcn_mgr_v1 { 87 u16 managed_flcn_cnt; 88 u32 wpr_size; 89 u32 disable_mask; 90 struct lsfm_managed_ucode_img_v2 *ucode_img_list; 91 void *wpr_client_req_state;/*PACR_CLIENT_REQUEST_STATE originally*/ 92 u16 managed_sub_wpr_count; 93 struct lsfm_sub_wpr *psub_wpr_list; 94}; 95 96 97#endif /* NVGPU_ACR_OBJLSFM_H */
diff --git a/include/nvgpu/acr/nvgpu_acr.h b/include/nvgpu/acr/nvgpu_acr.h
deleted file mode 100644
index cdb7bb8..0000000
--- a/include/nvgpu/acr/nvgpu_acr.h
+++ /dev/null
@@ -1,192 +0,0 @@ 1/* 2 * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_ACR_H 24#define NVGPU_ACR_H 25 26#include <nvgpu/falcon.h> 27 28#include "gk20a/mm_gk20a.h" 29 30#include "acr_lsfm.h" 31#include "acr_flcnbl.h" 32#include "acr_objlsfm.h" 33#include "acr_objflcn.h" 34 35struct nvgpu_firmware; 36struct gk20a; 37struct hs_acr_ops; 38struct hs_acr; 39struct nvgpu_acr; 40 41#define HSBIN_ACR_BL_UCODE_IMAGE "pmu_bl.bin" 42#define GM20B_HSBIN_ACR_PROD_UCODE "nv_acr_ucode_prod.bin" 43#define GM20B_HSBIN_ACR_DBG_UCODE "nv_acr_ucode_dbg.bin" 44#define HSBIN_ACR_UCODE_IMAGE "acr_ucode.bin" 45#define HSBIN_ACR_PROD_UCODE "acr_ucode_prod.bin" 46#define HSBIN_ACR_DBG_UCODE "acr_ucode_dbg.bin" 47#define HSBIN_ACR_AHESASC_PROD_UCODE "acr_ahesasc_prod_ucode.bin" 48#define HSBIN_ACR_ASB_PROD_UCODE "acr_asb_prod_ucode.bin" 49#define HSBIN_ACR_AHESASC_DBG_UCODE "acr_ahesasc_dbg_ucode.bin" 50#define HSBIN_ACR_ASB_DBG_UCODE "acr_asb_dbg_ucode.bin" 51 52#define LSF_SEC2_UCODE_IMAGE_BIN "sec2_ucode_image.bin" 53#define LSF_SEC2_UCODE_DESC_BIN "sec2_ucode_desc.bin" 54#define LSF_SEC2_UCODE_SIG_BIN "sec2_sig.bin" 55 56#define MAX_SUPPORTED_LSFM 3 /*PMU, FECS, GPCCS*/ 57 58#define ACR_COMPLETION_TIMEOUT_MS 10000 /*in msec */ 59 60#define PMU_SECURE_MODE (0x1) 61#define PMU_LSFM_MANAGED (0x2) 62 63struct bin_hdr { 64 /* 0x10de */ 65 u32 bin_magic; 66 /* versioning of bin format */ 67 u32 bin_ver; 68 /* Entire image size including this header */ 69 u32 bin_size; 70 /* 71 * Header offset of executable binary metadata, 72 * start @ offset- 0x100 * 73 */ 74 u32 header_offset; 75 /* 76 * Start of executable binary data, start @ 77 * offset- 0x200 78 */ 79 u32 data_offset; 80 /* Size of executable binary */ 81 u32 data_size; 82}; 83 84struct acr_fw_header { 85 u32 sig_dbg_offset; 86 u32 sig_dbg_size; 87 u32 sig_prod_offset; 88 u32 sig_prod_size; 89 u32 patch_loc; 90 u32 patch_sig; 91 u32 hdr_offset; /* This header points to acr_ucode_header_t210_load */ 92 u32 hdr_size; /* Size of above header */ 93}; 94 95struct wpr_carveout_info { 96 u64 wpr_base; 97 u64 nonwpr_base; 98 u64 size; 99}; 100 101/* ACR interfaces */ 102 103struct hs_flcn_bl { 104 char *bl_fw_name; 105 struct nvgpu_firmware *hs_bl_fw; 106 struct hsflcn_bl_desc *hs_bl_desc; 107 struct bin_hdr *hs_bl_bin_hdr; 108 struct nvgpu_mem hs_bl_ucode; 109}; 110 111struct hs_acr { 112 u32 acr_type; 113 114 /* HS bootloader to validate & load ACR ucode */ 115 struct hs_flcn_bl acr_hs_bl; 116 117 /* ACR ucode */ 118 char *acr_fw_name; 119 struct nvgpu_firmware *acr_fw; 120 struct nvgpu_mem acr_ucode; 121 122 union { 123 struct flcn_bl_dmem_desc bl_dmem_desc; 124 struct flcn_bl_dmem_desc_v1 bl_dmem_desc_v1; 125 }; 126 127 void *ptr_bl_dmem_desc; 128 u32 bl_dmem_desc_size; 129 130 union{ 131 struct flcn_acr_desc *acr_dmem_desc; 132 struct flcn_acr_desc_v1 *acr_dmem_desc_v1; 133 }; 134 135 /* Falcon used to execute ACR ucode */ 136 struct nvgpu_falcon *acr_flcn; 137 138 int (*acr_flcn_setup_hw_and_bl_bootstrap)(struct gk20a *g, 139 struct hs_acr *acr_desc, 140 struct nvgpu_falcon_bl_info *bl_info); 141}; 142 143#define ACR_DEFAULT 0U 144#define ACR_AHESASC 1U 145#define ACR_ASB 2U 146 147struct nvgpu_acr { 148 struct gk20a *g; 149 150 u32 bootstrap_owner; 151 u32 max_supported_lsfm; 152 u32 capabilities; 153 154 /* 155 * non-wpr space to hold LSF ucodes, 156 * ACR does copy ucode from non-wpr to wpr 157 */ 158 struct nvgpu_mem ucode_blob; 159 /* 160 * Even though this mem_desc wouldn't be used, 161 * the wpr region needs to be reserved in the 162 * allocator in dGPU case. 163 */ 164 struct nvgpu_mem wpr_dummy; 165 166 /* ACR member for different types of ucode */ 167 /* For older dgpu/tegra ACR cuode */ 168 struct hs_acr acr; 169 /* ACR load split feature support */ 170 struct hs_acr acr_ahesasc; 171 struct hs_acr acr_asb; 172 173 u32 pmu_args; 174 struct nvgpu_firmware *pmu_fw; 175 struct nvgpu_firmware *pmu_desc; 176 177 int (*prepare_ucode_blob)(struct gk20a *g, struct nvgpu_acr *acr); 178 void (*get_wpr_info)(struct gk20a *g, struct wpr_carveout_info *inf); 179 int (*alloc_blob_space)(struct gk20a *g, size_t size, 180 struct nvgpu_mem *mem); 181 int (*patch_wpr_info_to_ucode)(struct gk20a *g, struct nvgpu_acr *acr, 182 struct hs_acr *acr_desc, bool is_recovery); 183 int (*acr_fill_bl_dmem_desc)(struct gk20a *g, 184 struct nvgpu_acr *acr, struct hs_acr *acr_desc, 185 u32 *acr_ucode_header); 186 int (*bootstrap_hs_acr)(struct gk20a *g, struct nvgpu_acr *acr, 187 struct hs_acr *acr_desc); 188 189 void (*remove_support)(struct nvgpu_acr *acr); 190}; 191#endif /* NVGPU_ACR_H */ 192
diff --git a/include/nvgpu/allocator.h b/include/nvgpu/allocator.h
deleted file mode 100644
index c444543..0000000
--- a/include/nvgpu/allocator.h
+++ /dev/null
@@ -1,331 +0,0 @@ 1/* 2 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_ALLOCATOR_H 24#define NVGPU_ALLOCATOR_H 25 26#ifdef __KERNEL__ 27/* 28 * The Linux kernel has this notion of seq_files for printing info to userspace. 29 * One of the allocator function pointers takes advantage of this and allows the 30 * debug output to be directed either to nvgpu_log() or a seq_file. 31 */ 32#include <linux/seq_file.h> 33#endif 34 35#include <nvgpu/log.h> 36#include <nvgpu/lock.h> 37#include <nvgpu/list.h> 38#include <nvgpu/types.h> 39 40/* #define ALLOCATOR_DEBUG_FINE */ 41 42struct nvgpu_allocator; 43struct nvgpu_alloc_carveout; 44struct vm_gk20a; 45struct gk20a; 46 47/* 48 * Operations for an allocator to implement. 49 */ 50struct nvgpu_allocator_ops { 51 u64 (*alloc)(struct nvgpu_allocator *allocator, u64 len); 52 u64 (*alloc_pte)(struct nvgpu_allocator *allocator, u64 len, 53 u32 page_size); 54 void (*free)(struct nvgpu_allocator *allocator, u64 addr); 55 56 /* 57 * Special interface to allocate a memory region with a specific 58 * starting address. Yikes. Note: if free() works for freeing both 59 * regular and fixed allocations then free_fixed() does not need to 60 * be implemented. This behavior exists for legacy reasons and should 61 * not be propagated to new allocators. 62 * 63 * For allocators where the @page_size field is not applicable it can 64 * be left as 0. Otherwise a valid page size should be passed (4k or 65 * what the large page size is). 66 */ 67 u64 (*alloc_fixed)(struct nvgpu_allocator *allocator, 68 u64 base, u64 len, u32 page_size); 69 void (*free_fixed)(struct nvgpu_allocator *allocator, 70 u64 base, u64 len); 71 72 /* 73 * Allow allocators to reserve space for carveouts. 74 */ 75 int (*reserve_carveout)(struct nvgpu_allocator *allocator, 76 struct nvgpu_alloc_carveout *co); 77 void (*release_carveout)(struct nvgpu_allocator *allocator, 78 struct nvgpu_alloc_carveout *co); 79 80 /* 81 * Returns info about the allocator. 82 */ 83 u64 (*base)(struct nvgpu_allocator *allocator); 84 u64 (*length)(struct nvgpu_allocator *allocator); 85 u64 (*end)(struct nvgpu_allocator *allocator); 86 bool (*inited)(struct nvgpu_allocator *allocator); 87 u64 (*space)(struct nvgpu_allocator *allocator); 88 89 /* Destructor. */ 90 void (*fini)(struct nvgpu_allocator *allocator); 91 92#ifdef __KERNEL__ 93 /* Debugging. */ 94 void (*print_stats)(struct nvgpu_allocator *allocator, 95 struct seq_file *s, int lock); 96#endif 97}; 98 99struct nvgpu_allocator { 100 struct gk20a *g; 101 102 char name[32]; 103 struct nvgpu_mutex lock; 104 105 void *priv; 106 const struct nvgpu_allocator_ops *ops; 107 108 struct dentry *debugfs_entry; 109 bool debug; /* Control for debug msgs. */ 110}; 111 112struct nvgpu_alloc_carveout { 113 const char *name; 114 u64 base; 115 u64 length; 116 117 struct nvgpu_allocator *allocator; 118 119 /* 120 * For usage by the allocator implementation. 121 */ 122 struct nvgpu_list_node co_entry; 123}; 124 125static inline struct nvgpu_alloc_carveout * 126nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node) 127{ 128 return (struct nvgpu_alloc_carveout *) 129 ((uintptr_t)node - offsetof(struct nvgpu_alloc_carveout, co_entry)); 130}; 131 132#define NVGPU_CARVEOUT(local_name, local_base, local_length) \ 133 { \ 134 .name = (local_name), \ 135 .base = (local_base), \ 136 .length = (local_length) \ 137 } 138 139/* 140 * These are the available allocator flags. 141 * 142 * GPU_ALLOC_GVA_SPACE 143 * 144 * This flag makes sense for the buddy allocator only. It specifies that the 145 * allocator will be used for managing a GVA space. When managing GVA spaces 146 * special care has to be taken to ensure that allocations of similar PTE 147 * sizes are placed in the same PDE block. This allows the higher level 148 * code to skip defining both small and large PTE tables for every PDE. That 149 * can save considerable memory for address spaces that have a lot of 150 * allocations. 151 * 152 * GPU_ALLOC_NO_ALLOC_PAGE 153 * 154 * For any allocator that needs to manage a resource in a latency critical 155 * path this flag specifies that the allocator should not use any kmalloc() 156 * or similar functions during normal operation. Initialization routines 157 * may still use kmalloc(). This prevents the possibility of long waits for 158 * pages when using alloc_page(). Currently only the bitmap allocator 159 * implements this functionality. 160 * 161 * Also note that if you accept this flag then you must also define the 162 * free_fixed() function. Since no meta-data is allocated to help free 163 * allocations you need to keep track of the meta-data yourself (in this 164 * case the base and length of the allocation as opposed to just the base 165 * of the allocation). 166 * 167 * GPU_ALLOC_4K_VIDMEM_PAGES 168 * 169 * We manage vidmem pages at a large page granularity for performance 170 * reasons; however, this can lead to wasting memory. For page allocators 171 * setting this flag will tell the allocator to manage pools of 4K pages 172 * inside internally allocated large pages. 173 * 174 * Currently this flag is ignored since the only usage of the page allocator 175 * uses a 4K block size already. However, this flag has been reserved since 176 * it will be necessary in the future. 177 * 178 * GPU_ALLOC_FORCE_CONTIG 179 * 180 * Force allocations to be contiguous. Currently only relevant for page 181 * allocators since all other allocators are naturally contiguous. 182 * 183 * GPU_ALLOC_NO_SCATTER_GATHER 184 * 185 * The page allocator normally returns a scatter gather data structure for 186 * allocations (to handle discontiguous pages). However, at times that can 187 * be annoying so this flag forces the page allocator to return a u64 188 * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be 189 * set as well). 190 */ 191#define GPU_ALLOC_GVA_SPACE BIT64(0) 192#define GPU_ALLOC_NO_ALLOC_PAGE BIT64(1) 193#define GPU_ALLOC_4K_VIDMEM_PAGES BIT64(2) 194#define GPU_ALLOC_FORCE_CONTIG BIT64(3) 195#define GPU_ALLOC_NO_SCATTER_GATHER BIT64(4) 196 197static inline void alloc_lock(struct nvgpu_allocator *a) 198{ 199 nvgpu_mutex_acquire(&a->lock); 200} 201 202static inline void alloc_unlock(struct nvgpu_allocator *a) 203{ 204 nvgpu_mutex_release(&a->lock); 205} 206 207/* 208 * Buddy allocator specific initializers. 209 */ 210int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, 211 struct vm_gk20a *vm, const char *name, 212 u64 base, u64 size, u64 blk_size, 213 u64 max_order, u64 flags); 214 215/* 216 * Bitmap initializers. 217 */ 218int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, 219 const char *name, u64 base, u64 length, 220 u64 blk_size, u64 flags); 221 222/* 223 * Page allocator initializers. 224 */ 225int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, 226 const char *name, u64 base, u64 length, 227 u64 blk_size, u64 flags); 228 229/* 230 * Lockless allocatior initializers. 231 * Note: This allocator can only allocate fixed-size structures of a 232 * pre-defined size. 233 */ 234int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, 235 const char *name, u64 base, u64 length, 236 u64 struct_size, u64 flags); 237 238#define GPU_BALLOC_MAX_ORDER 31U 239 240/* 241 * Allocator APIs. 242 */ 243u64 nvgpu_alloc(struct nvgpu_allocator *allocator, u64 len); 244u64 nvgpu_alloc_pte(struct nvgpu_allocator *a, u64 len, u32 page_size); 245void nvgpu_free(struct nvgpu_allocator *allocator, u64 addr); 246 247u64 nvgpu_alloc_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len, 248 u32 page_size); 249void nvgpu_free_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len); 250 251int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, 252 struct nvgpu_alloc_carveout *co); 253void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, 254 struct nvgpu_alloc_carveout *co); 255 256u64 nvgpu_alloc_base(struct nvgpu_allocator *a); 257u64 nvgpu_alloc_length(struct nvgpu_allocator *a); 258u64 nvgpu_alloc_end(struct nvgpu_allocator *a); 259bool nvgpu_alloc_initialized(struct nvgpu_allocator *a); 260u64 nvgpu_alloc_space(struct nvgpu_allocator *a); 261 262void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator); 263 264#ifdef __KERNEL__ 265void nvgpu_alloc_print_stats(struct nvgpu_allocator *a, 266 struct seq_file *s, int lock); 267#endif 268 269static inline struct gk20a *nvgpu_alloc_to_gpu(struct nvgpu_allocator *a) 270{ 271 return a->g; 272} 273 274#ifdef CONFIG_DEBUG_FS 275/* 276 * Common functionality for the internals of the allocators. 277 */ 278void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a); 279void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a); 280#endif 281 282int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, 283 const char *name, void *priv, bool dbg, 284 const struct nvgpu_allocator_ops *ops); 285 286static inline void nvgpu_alloc_enable_dbg(struct nvgpu_allocator *a) 287{ 288 a->debug = true; 289} 290 291static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a) 292{ 293 a->debug = false; 294} 295 296/* 297 * Debug stuff. 298 */ 299#ifdef __KERNEL__ 300#define __alloc_pstat(seq, allocator, fmt, arg...) \ 301 do { \ 302 if (seq) \ 303 seq_printf(seq, fmt "\n", ##arg); \ 304 else \ 305 alloc_dbg(allocator, fmt, ##arg); \ 306 } while (0) 307#endif 308 309#define do_alloc_dbg(a, fmt, arg...) \ 310 nvgpu_log((a)->g, gpu_dbg_alloc, "%25s " fmt, (a)->name, ##arg) 311 312/* 313 * This gives finer control over debugging messages. By defining the 314 * ALLOCATOR_DEBUG_FINE macro prints for an allocator will only get made if 315 * that allocator's debug flag is set. 316 * 317 * Otherwise debugging is as normal: debug statements for all allocators 318 * if the GPU debugging mask bit is set. Note: even when ALLOCATOR_DEBUG_FINE 319 * is set gpu_dbg_alloc must still also be set to true. 320 */ 321#if defined(ALLOCATOR_DEBUG_FINE) 322#define alloc_dbg(a, fmt, arg...) \ 323 do { \ 324 if ((a)->debug) \ 325 do_alloc_dbg((a), fmt, ##arg); \ 326 } while (0) 327#else 328#define alloc_dbg(a, fmt, arg...) do_alloc_dbg(a, fmt, ##arg) 329#endif 330 331#endif /* NVGPU_ALLOCATOR_H */
diff --git a/include/nvgpu/as.h b/include/nvgpu/as.h
deleted file mode 100644
index f2249f9..0000000
--- a/include/nvgpu/as.h
+++ /dev/null
@@ -1,54 +0,0 @@ 1/* 2 * GK20A Address Spaces 3 * 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef NVGPU_AS_H 25#define NVGPU_AS_H 26 27#include <nvgpu/types.h> 28 29struct vm_gk20a; 30struct gk20a; 31 32struct gk20a_as { 33 int last_share_id; /* dummy allocator for now */ 34}; 35 36struct gk20a_as_share { 37 struct gk20a_as *as; 38 struct vm_gk20a *vm; 39 int id; 40}; 41 42/* 43 * AS allocation flags. 44 */ 45#define NVGPU_AS_ALLOC_USERSPACE_MANAGED (1 << 0) 46 47int gk20a_as_release_share(struct gk20a_as_share *as_share); 48 49/* if big_page_size == 0, the default big page size is used */ 50int gk20a_as_alloc_share(struct gk20a *g, u32 big_page_size, 51 u32 flags, struct gk20a_as_share **out); 52 53struct gk20a *gk20a_from_as(struct gk20a_as *as); 54#endif /* NVGPU_AS_H */
diff --git a/include/nvgpu/atomic.h b/include/nvgpu/atomic.h
deleted file mode 100644
index 3edc1fc..0000000
--- a/include/nvgpu/atomic.h
+++ /dev/null
@@ -1,130 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_ATOMIC_H 23#define NVGPU_ATOMIC_H 24 25#ifdef __KERNEL__ 26#include <nvgpu/linux/atomic.h> 27#elif defined(__NVGPU_POSIX__) 28#include <nvgpu/posix/atomic.h> 29#else 30#include <nvgpu_rmos/include/atomic.h> 31#endif 32 33#define NVGPU_ATOMIC_INIT(i) __nvgpu_atomic_init(i) 34#define NVGPU_ATOMIC64_INIT(i) __nvgpu_atomic64_init(i) 35 36static inline void nvgpu_atomic_set(nvgpu_atomic_t *v, int i) 37{ 38 __nvgpu_atomic_set(v, i); 39} 40static inline int nvgpu_atomic_read(nvgpu_atomic_t *v) 41{ 42 return __nvgpu_atomic_read(v); 43} 44static inline void nvgpu_atomic_inc(nvgpu_atomic_t *v) 45{ 46 __nvgpu_atomic_inc(v); 47} 48static inline int nvgpu_atomic_inc_return(nvgpu_atomic_t *v) 49{ 50 return __nvgpu_atomic_inc_return(v); 51} 52static inline void nvgpu_atomic_dec(nvgpu_atomic_t *v) 53{ 54 __nvgpu_atomic_dec(v); 55} 56static inline int nvgpu_atomic_dec_return(nvgpu_atomic_t *v) 57{ 58 return __nvgpu_atomic_dec_return(v); 59} 60static inline int nvgpu_atomic_cmpxchg(nvgpu_atomic_t *v, int old, int new) 61{ 62 return __nvgpu_atomic_cmpxchg(v, old, new); 63} 64static inline int nvgpu_atomic_xchg(nvgpu_atomic_t *v, int new) 65{ 66 return __nvgpu_atomic_xchg(v, new); 67} 68static inline bool nvgpu_atomic_inc_and_test(nvgpu_atomic_t *v) 69{ 70 return __nvgpu_atomic_inc_and_test(v); 71} 72static inline bool nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v) 73{ 74 return __nvgpu_atomic_dec_and_test(v); 75} 76static inline bool nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v) 77{ 78 return __nvgpu_atomic_sub_and_test(i, v); 79} 80static inline int nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v) 81{ 82 return __nvgpu_atomic_add_return(i, v); 83} 84static inline int nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u) 85{ 86 return __nvgpu_atomic_add_unless(v, a, u); 87} 88static inline void nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i) 89{ 90 return __nvgpu_atomic64_set(v, i); 91} 92static inline long nvgpu_atomic64_read(nvgpu_atomic64_t *v) 93{ 94 return __nvgpu_atomic64_read(v); 95} 96static inline void nvgpu_atomic64_add(long x, nvgpu_atomic64_t *v) 97{ 98 __nvgpu_atomic64_add(x, v); 99} 100static inline void nvgpu_atomic64_inc(nvgpu_atomic64_t *v) 101{ 102 __nvgpu_atomic64_inc(v); 103} 104static inline long nvgpu_atomic64_inc_return(nvgpu_atomic64_t *v) 105{ 106 return __nvgpu_atomic64_inc_return(v); 107} 108static inline void nvgpu_atomic64_dec(nvgpu_atomic64_t *v) 109{ 110 __nvgpu_atomic64_dec(v); 111} 112static inline void nvgpu_atomic64_dec_return(nvgpu_atomic64_t *v) 113{ 114 __nvgpu_atomic64_dec_return(v); 115} 116static inline long nvgpu_atomic64_cmpxchg(nvgpu_atomic64_t *v, long old, 117 long new) 118{ 119 return __nvgpu_atomic64_cmpxchg(v, old, new); 120} 121static inline void nvgpu_atomic64_sub(long x, nvgpu_atomic64_t *v) 122{ 123 __nvgpu_atomic64_sub(x, v); 124} 125static inline long nvgpu_atomic64_sub_return(long x, nvgpu_atomic64_t *v) 126{ 127 return __nvgpu_atomic64_sub_return(x, v); 128} 129 130#endif /* NVGPU_ATOMIC_H */
diff --git a/include/nvgpu/barrier.h b/include/nvgpu/barrier.h
deleted file mode 100644
index f0b6b2b..0000000
--- a/include/nvgpu/barrier.h
+++ /dev/null
@@ -1,61 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23/* This file contains NVGPU_* high-level abstractions for various 24 * memor-barrier operations available in linux/kernel. Every OS 25 * should provide their own OS specific calls under this common API 26 */ 27 28#ifndef NVGPU_BARRIER_H 29#define NVGPU_BARRIER_H 30 31#ifdef __KERNEL__ 32#include <nvgpu/linux/barrier.h> 33#elif defined(__NVGPU_POSIX__) 34#include <nvgpu/posix/barrier.h> 35#else 36#include <nvgpu_rmos/include/barrier.h> 37#endif 38 39#define nvgpu_mb() __nvgpu_mb() 40#define nvgpu_rmb() __nvgpu_rmb() 41#define nvgpu_wmb() __nvgpu_wmb() 42 43#define nvgpu_smp_mb() __nvgpu_smp_mb() 44#define nvgpu_smp_rmb() __nvgpu_smp_rmb() 45#define nvgpu_smp_wmb() __nvgpu_smp_wmb() 46 47#define nvgpu_read_barrier_depends() __nvgpu_read_barrier_depends() 48#define nvgpu_smp_read_barrier_depends() __nvgpu_smp_read_barrier_depends() 49 50#define NV_ACCESS_ONCE(x) __NV_ACCESS_ONCE(x) 51 52/* 53 * Sometimes we want to prevent speculation. 54 */ 55#ifdef __NVGPU_PREVENT_UNTRUSTED_SPECULATION 56#define nvgpu_speculation_barrier() __nvgpu_speculation_barrier() 57#else 58#define nvgpu_speculation_barrier() 59#endif 60 61#endif /* NVGPU_BARRIER_H */
diff --git a/include/nvgpu/bios.h b/include/nvgpu/bios.h
deleted file mode 100644
index 7d729b6..0000000
--- a/include/nvgpu/bios.h
+++ /dev/null
@@ -1,1123 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_BIOS_H 24#define NVGPU_BIOS_H 25 26#include <nvgpu/types.h> 27 28struct gk20a; 29 30#define PERF_PTRS_WIDTH 0x4 31#define PERF_PTRS_WIDTH_16 0x2 32 33enum { 34 CLOCKS_TABLE = 2, 35 CLOCK_PROGRAMMING_TABLE, 36 FLL_TABLE, 37 VIN_TABLE, 38 FREQUENCY_CONTROLLER_TABLE 39}; 40 41enum { 42 PERFORMANCE_TABLE = 0, 43 MEMORY_CLOCK_TABLE, 44 MEMORY_TWEAK_TABLE, 45 POWER_CONTROL_TABLE, 46 THERMAL_CONTROL_TABLE, 47 THERMAL_DEVICE_TABLE, 48 THERMAL_COOLERS_TABLE, 49 PERFORMANCE_SETTINGS_SCRIPT, 50 CONTINUOUS_VIRTUAL_BINNING_TABLE, 51 POWER_SENSORS_TABLE = 0xA, 52 POWER_CAPPING_TABLE = 0xB, 53 POWER_TOPOLOGY_TABLE = 0xF, 54 THERMAL_CHANNEL_TABLE = 0x12, 55 VOLTAGE_RAIL_TABLE = 26, 56 VOLTAGE_DEVICE_TABLE, 57 VOLTAGE_POLICY_TABLE, 58 LOWPOWER_TABLE, 59 LOWPOWER_GR_TABLE = 32, 60 LOWPOWER_MS_TABLE = 33, 61}; 62 63enum { 64 VP_FIELD_TABLE = 0, 65 VP_FIELD_REGISTER, 66 VP_TRANSLATION_TABLE, 67}; 68 69struct bit_token { 70 u8 token_id; 71 u8 data_version; 72 u16 data_size; 73 u16 data_ptr; 74} __packed; 75 76#define BIOS_GET_FIELD(value, name) ((value & name##_MASK) >> name##_SHIFT) 77 78struct fll_descriptor_header { 79 u8 version; 80 u8 size; 81} __packed; 82 83#define FLL_DESCRIPTOR_HEADER_10_SIZE_4 4U 84#define FLL_DESCRIPTOR_HEADER_10_SIZE_6 6U 85 86struct fll_descriptor_header_10 { 87 u8 version; 88 u8 header_size; 89 u8 entry_size; 90 u8 entry_count; 91 u16 max_min_freq_mhz; 92} __packed; 93 94#define FLL_DESCRIPTOR_ENTRY_10_SIZE 15U 95 96struct fll_descriptor_entry_10 { 97 u8 fll_device_type; 98 u8 clk_domain; 99 u8 fll_device_id; 100 u16 lut_params; 101 u8 vin_idx_logic; 102 u8 vin_idx_sram; 103 u16 fll_params; 104 u8 min_freq_vfe_idx; 105 u8 freq_ctrl_idx; 106 u16 ref_freq_mhz; 107 u16 ffr_cutoff_freq_mhz; 108} __packed; 109 110#define NV_FLL_DESC_FLL_PARAMS_MDIV_MASK 0x1F 111#define NV_FLL_DESC_FLL_PARAMS_MDIV_SHIFT 0 112 113#define NV_FLL_DESC_FLL_PARAMS_SKIP_PLDIV_BELOW_DVCO_MIN_MASK 0x20 114#define NV_FLL_DESC_FLL_PARAMS_SKIP_PLDIV_BELOW_DVCO_MIN_SHIFT 5 115 116#define NV_FLL_DESC_LUT_PARAMS_VSELECT_MASK 0x3 117#define NV_FLL_DESC_LUT_PARAMS_VSELECT_SHIFT 0 118 119#define NV_FLL_DESC_LUT_PARAMS_HYSTERISIS_THRESHOLD_MASK 0x3C 120#define NV_FLL_DESC_LUT_PARAMS_HYSTERISIS_THRESHOLD_SHIFT 2 121 122struct vin_descriptor_header_10 { 123 u8 version; 124 u8 header_sizee; 125 u8 entry_size; 126 u8 entry_count; 127 u8 flags0; 128 u32 vin_cal; 129} __packed; 130 131struct vin_descriptor_entry_10 { 132 u8 vin_device_type; 133 u8 volt_domain_vbios; 134 u8 vin_device_id; 135} __packed; 136 137#define NV_VIN_DESC_FLAGS0_VIN_CAL_REVISION_MASK 0x7 138#define NV_VIN_DESC_FLAGS0_VIN_CAL_REVISION_SHIFT 0 139 140#define NV_VIN_DESC_FLAGS0_VIN_CAL_TYPE_MASK 0xF0 141#define NV_VIN_DESC_FLAGS0_VIN_CAL_TYPE_SHIFT 4 142 143#define NV_VIN_DESC_FLAGS0_DISABLE_CONTROL_MASK 0x8 144#define NV_VIN_DESC_FLAGS0_DISABLE_CONTROL_SHIFT 3 145 146#define NV_VIN_DESC_VIN_CAL_SLOPE_FRACTION_MASK 0x1FF 147#define NV_VIN_DESC_VIN_CAL_SLOPE_FRACTION_SHIFT 0 148 149#define NV_VIN_DESC_VIN_CAL_SLOPE_INTEGER_MASK 0x3C00 150#define NV_VIN_DESC_VIN_CAL_SLOPE_INTEGER_SHIFT 10 151 152#define NV_VIN_DESC_VIN_CAL_INTERCEPT_FRACTION_MASK 0x3C000 153#define NV_VIN_DESC_VIN_CAL_INTERCEPT_FRACTION_SHIFT 14 154 155#define NV_VIN_DESC_VIN_CAL_INTERCEPT_INTEGER_MASK 0xFFC0000 156#define NV_VIN_DESC_VIN_CAL_INTERCEPT_INTEGER_SHIFT 18 157 158#define NV_VIN_DESC_VIN_CAL_OFFSET_MASK 0x7F 159#define NV_VIN_DESC_VIN_CAL_OFFSET_SHIFT 0 160 161#define NV_VIN_DESC_VIN_CAL_GAIN_MASK 0xF80 162#define NV_VIN_DESC_VIN_CAL_GAIN_SHIFT 7 163 164#define VBIOS_CLOCKS_TABLE_1X_HEADER_SIZE_07 0x07U 165struct vbios_clocks_table_1x_header { 166 u8 version; 167 u8 header_size; 168 u8 entry_size; 169 u8 entry_count; 170 u8 clocks_hal; 171 u16 cntr_sampling_periodms; 172} __packed; 173 174#define VBIOS_CLOCKS_TABLE_35_HEADER_SIZE_09 0x09U 175struct vbios_clocks_table_35_header { 176 u8 version; 177 u8 header_size; 178 u8 entry_size; 179 u8 entry_count; 180 u8 clocks_hal; 181 u16 cntr_sampling_periodms; 182 u16 reference_window; 183} __packed; 184 185#define VBIOS_CLOCKS_TABLE_1X_ENTRY_SIZE_09 0x09U 186struct vbios_clocks_table_1x_entry { 187 u8 flags0; 188 u16 param0; 189 u32 param1; 190 u16 param2; 191} __packed; 192 193#define VBIOS_CLOCKS_TABLE_35_ENTRY_SIZE_11 0x0BU 194struct vbios_clocks_table_35_entry { 195 u8 flags0; 196 u16 param0; 197 u32 param1; 198 u16 param2; 199 u16 param3; 200} __packed; 201 202#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_MASK 0x1F 203#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_SHIFT 0 204#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_FIXED 0x00 205#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_MASTER 0x01 206#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_FLAGS0_USAGE_SLAVE 0x02 207 208#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_FIRST_MASK 0xFF 209#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_FIRST_SHIFT 0 210#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_LAST_MASK 0xFF00 211#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM0_PROG_CLK_PROG_IDX_LAST_SHIFT 0x08 212 213#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_FIXED_FREQUENCY_MHZ_MASK 0xFFFF 214#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_FIXED_FREQUENCY_MHZ_SHIFT 0 215#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MIN_MHZ_MASK 0xFFFF 216#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MIN_MHZ_SHIFT 0 217 218#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MAX_MHZ_MASK 0xFFFF0000 219#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_MASTER_FREQ_OC_DELTA_MAX_MHZ_SHIFT 0 220 221#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_SLAVE_MASTER_DOMAIN_MASK 0xF 222#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM1_SLAVE_MASTER_DOMAIN_SHIFT 0 223 224#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_UNAWARE_ORDERING_IDX_MASK 0xF 225#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_UNAWARE_ORDERING_IDX_SHIFT 0 226 227#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_AWARE_ORDERING_IDX_MASK 0xF0 228#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_NOISE_AWARE_ORDERING_IDX_SHIFT 4 229 230#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING_MASK 0x100 231#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING_SHIFT 8 232#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING_FALSE 0x00 233#define NV_VBIOS_CLOCKS_TABLE_1X_ENTRY_PARAM2_PROG_FORCE_NOISE_UNAWARE_ORDERING_TRUE 0x01 234 235#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_PRE_VOLT_ORDERING_IDX_MASK 0xF 236#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_PRE_VOLT_ORDERING_IDX_SHIFT 0 237 238#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_POST_VOLT_ORDERING_IDX_MASK 0xF0 239#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM2_PROG_POST_VOLT_ORDERING_IDX_SHIFT 4 240 241#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM3_CLK_MONITOR_THRESHOLD_MIN_MASK 0xFF 242#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM3_CLK_MONITOR_THRESHOLD_MIN_SHIFT 0 243#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM3_CLK_MONITOR_THRESHOLD_MAX_MASK 0xFF00 244#define NV_VBIOS_CLOCKS_TABLE_35_ENTRY_PARAM3_CLK_MONITOR_THRESHOLD_MAX_SHIFT 0x08 245 246#define VBIOS_CLOCK_PROGRAMMING_TABLE_1X_HEADER_SIZE_08 0x08U 247struct vbios_clock_programming_table_1x_header { 248 u8 version; 249 u8 header_size; 250 u8 entry_size; 251 u8 entry_count; 252 u8 slave_entry_size; 253 u8 slave_entry_count; 254 u8 vf_entry_size; 255 u8 vf_entry_count; 256} __packed; 257 258#define VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_SIZE_05 0x05U 259#define VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_SIZE_0D 0x0DU 260struct vbios_clock_programming_table_1x_entry { 261 u8 flags0; 262 u16 freq_max_mhz; 263 u8 param0; 264 u8 param1; 265 u32 rsvd; 266 u32 rsvd1; 267} __packed; 268 269#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASK 0xF 270#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_SHIFT 0 271#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_RATIO 0x00 272#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_MASTER_TABLE 0x01 273#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_TYPE_SLAVE 0x02 274 275#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_MASK 0x70 276#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_SHIFT 4 277#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_PLL 0x00 278#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_ONE_SOURCE 0x01 279#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_SOURCE_FLL 0x02 280 281#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_OVOC_ENABLED_MASK 0x80 282#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_OVOC_ENABLED_SHIFT 7 283#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_OVOC_ENABLED_FALSE 0x00 284#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_FLAGS0_OVOC_ENABLED_TRUE 0x01 285 286#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_PARAM0_PLL_PLL_INDEX_MASK 0xFF 287#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_PARAM0_PLL_PLL_INDEX_SHIFT 0 288 289#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_PARAM1_PLL_FREQ_STEP_SIZE_MASK 0xFF 290#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_ENTRY_PARAM1_PLL_FREQ_STEP_SIZE_SHIFT 0 291 292#define VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_SIZE_03 0x03U 293struct vbios_clock_programming_table_1x_slave_entry { 294 u8 clk_dom_idx; 295 u16 param0; 296} __packed; 297 298#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_PARAM0_MASTER_RATIO_RATIO_MASK 0xFF 299#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_PARAM0_MASTER_RATIO_RATIO_SHIFT 0 300 301#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_PARAM0_MASTER_TABLE_FREQ_MASK 0x3FFF 302#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_SLAVE_ENTRY_PARAM0_MASTER_TABLE_FREQ_SHIFT 0 303 304#define VBIOS_CLOCK_PROGRAMMING_TABLE_1X_VF_ENTRY_SIZE_02 0x02U 305struct vbios_clock_programming_table_1x_vf_entry { 306 u8 vfe_idx; 307 u8 param0; 308} __packed; 309 310#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_VF_ENTRY_PARAM0_FLL_GAIN_VFE_IDX_MASK 0xFF 311#define NV_VBIOS_CLOCK_PROGRAMMING_TABLE_1X_VF_ENTRY_PARAM0_FLL_GAIN_VFE_IDX_SHIFT 0 312 313struct vbios_vfe_3x_header_struct { 314 u8 version; 315 u8 header_size; 316 u8 vfe_var_entry_size; 317 u8 vfe_var_entry_count; 318 u8 vfe_equ_entry_size; 319 u8 vfe_equ_entry_count; 320 u8 polling_periodms; 321} __packed; 322 323#define VBIOS_VFE_3X_VAR_ENTRY_SIZE_11 0x11U 324#define VBIOS_VFE_3X_VAR_ENTRY_SIZE_19 0x19U 325struct vbios_vfe_3x_var_entry_struct { 326 u8 type; 327 u32 out_range_min; 328 u32 out_range_max; 329 u32 param0; 330 u32 param1; 331 u32 param2; 332 u32 param3; 333} __packed; 334 335#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_DISABLED 0x00U 336#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_SINGLE_FREQUENCY 0x01U 337#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_SINGLE_VOLTAGE 0x02U 338#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_SINGLE_SENSED_TEMP 0x03U 339#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_SINGLE_SENSED_FUSE 0x04U 340#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_DERIVED_PRODUCT 0x05U 341#define VBIOS_VFE_3X_VAR_ENTRY_TYPE_DERIVED_SUM 0x06U 342 343#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSTEMP_TH_CH_IDX_MASK 0xFF 344#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSTEMP_TH_CH_IDX_SHIFT 0 345 346#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSTEMP_HYS_POS_MASK 0xFF00 347#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSTEMP_HYS_POS_SHIFT 8 348 349#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSTEMP_HYS_NEG_MASK 0xFF0000 350#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSTEMP_HYS_NEG_SHIFT 16 351 352#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_VFIELD_ID_MASK 0xFF 353#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_VFIELD_ID_SHIFT 0 354 355#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_VFIELD_ID_VER_MASK 0xFF00 356#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_VFIELD_ID_VER_SHIFT 8 357 358#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_EXPECTED_VER_MASK 0xFF0000 359#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_EXPECTED_VER_SHIFT 16 360 361#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_USE_DEFAULT_ON_VER_CHECK_FAIL_MASK 0x1000000 362#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_USE_DEFAULT_ON_VER_CHECK_FAIL_SHIFT 24 363 364#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_VALUE_SIGNED_INTEGER_MASK 0x2000000 365#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_VALUE_SIGNED_INTEGER_SHIFT 25 366 367#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_USE_DEFAULT_ON_VER_CHECK_FAIL_YES 0x00000001 368#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_SSFUSE_USE_DEFAULT_ON_VER_CHECK_FAIL_NO 0x00000000 369#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DPROD_VFE_VAR_IDX_0_MASK 0xFF 370#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DPROD_VFE_VAR_IDX_0_SHIFT 0 371 372#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DPROD_VFE_VAR_IDX_1_MASK 0xFF00 373#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DPROD_VFE_VAR_IDX_1_SHIFT 8 374 375#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DSUM_VFE_VAR_IDX_0_MASK 0xFF 376#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DSUM_VFE_VAR_IDX_0_SHIFT 0 377 378#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DSUM_VFE_VAR_IDX_1_MASK 0xFF00 379#define VBIOS_VFE_3X_VAR_ENTRY_PAR0_DSUM_VFE_VAR_IDX_1_SHIFT 8 380 381#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_DEFAULT_VAL_MASK 0xFFFFFFFF 382#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_DEFAULT_VAL_SHIFT 0 383 384#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_HW_CORRECTION_SCALE_MASK 0xFFFFFFFF 385#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_HW_CORRECTION_SCALE_SHIFT 0 386 387#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_HW_CORRECTION_OFFSET_MASK 0xFFFFFFFF 388#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_HW_CORRECTION_OFFSET_SHIFT 0 389 390#define VBIOS_VFE_3X_EQU_ENTRY_SIZE_17 0x17U 391#define VBIOS_VFE_3X_EQU_ENTRY_SIZE_18 0x18U 392 393struct vbios_vfe_3x_equ_entry_struct { 394 u8 type; 395 u8 var_idx; 396 u8 equ_idx_next; 397 u32 out_range_min; 398 u32 out_range_max; 399 u32 param0; 400 u32 param1; 401 u32 param2; 402 u8 param3; 403} __packed; 404 405 406#define VBIOS_VFE_3X_EQU_ENTRY_TYPE_DISABLED 0x00U 407#define VBIOS_VFE_3X_EQU_ENTRY_TYPE_QUADRATIC 0x01U 408#define VBIOS_VFE_3X_EQU_ENTRY_TYPE_MINMAX 0x02U 409#define VBIOS_VFE_3X_EQU_ENTRY_TYPE_COMPARE 0x03U 410#define VBIOS_VFE_3X_EQU_ENTRY_TYPE_QUADRATIC_FXP 0x04U 411#define VBIOS_VFE_3X_EQU_ENTRY_TYPE_MINMAX_FXP 0x05U 412 413#define VBIOS_VFE_3X_EQU_ENTRY_IDX_INVALID 0xFFU 414 415#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_QUADRATIC_C0_MASK 0xFFFFFFFF 416#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_QUADRATIC_C0_SHIFT 0 417 418#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_VFE_EQU_IDX_0_MASK 0xFF 419#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_VFE_EQU_IDX_0_SHIFT 0 420 421#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_VFE_EQU_IDX_1_MASK 0xFF00 422#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_VFE_EQU_IDX_1_SHIFT 8 423 424#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_CRIT_MASK 0x10000 425#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_CRIT_SHIFT 16 426#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_CRIT_MIN 0x00000000 427#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_MINMAX_CRIT_MAX 0x00000001 428 429#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_COMPARE_CRIT_MASK 0xFFFFFFFF 430#define VBIOS_VFE_3X_EQU_ENTRY_PAR0_COMPARE_CRIT_SHIFT 0 431 432#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_QUADRATIC_C1_MASK 0xFFFFFFFF 433#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_QUADRATIC_C1_SHIFT 0 434 435#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_VFE_EQU_IDX_TRUE_MASK 0xFF 436#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_VFE_EQU_IDX_TRUE_SHIFT 0 437 438#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_VFE_EQU_IDX_FALSE_MASK 0xFF00 439#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_VFE_EQU_IDX_FALSE_SHIFT 8 440 441#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_FUNCTION_MASK 0x70000 442#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_FUNCTION_SHIFT 16 443#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_FUNCTION_EQUAL 0x00000000 444#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_FUNCTION_GREATER_EQ 0x00000001 445#define VBIOS_VFE_3X_EQU_ENTRY_PAR1_COMPARE_FUNCTION_GREATER 0x00000002 446 447#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_MASK 0xF 448#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_SHIFT 0 449#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_UNITLESS 0x0 450#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_FREQ_MHZ 0x1 451#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_VOLT_UV 0x2 452#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_VF_GAIN 0x3 453#define VBIOS_VFE_3X_EQU_ENTRY_PAR3_OUTPUT_TYPE_VOLT_DELTA_UV 0x4 454 455#define NV_VFIELD_DESC_SIZE_BYTE 0x00000000U 456#define NV_VFIELD_DESC_SIZE_WORD 0x00000001U 457#define NV_VFIELD_DESC_SIZE_DWORD 0x00000002U 458#define VFIELD_SIZE(pvregentry) ((pvregentry->strap_reg_desc & 0x18U) >> 3U) 459 460#define NV_PMU_BIOS_VFIELD_DESC_CODE_INVALID 0x00000000U 461#define NV_PMU_BIOS_VFIELD_DESC_CODE_REG 0x00000001U 462#define NV_PMU_BIOS_VFIELD_DESC_CODE_INDEX_REG 0x00000002U 463 464#define NV_VFIELD_DESC_CODE_INVALID NV_PMU_BIOS_VFIELD_DESC_CODE_INVALID 465#define NV_VFIELD_DESC_CODE_REG NV_PMU_BIOS_VFIELD_DESC_CODE_REG 466#define NV_VFIELD_DESC_CODE_INDEX_REG NV_PMU_BIOS_VFIELD_DESC_CODE_INDEX_REG 467 468#define VFIELD_CODE(pvregentry) ((pvregentry->strap_reg_desc & 0xE0U) >> 5U) 469 470#define VFIELD_ID_STRAP_IDDQ 0x09U 471#define VFIELD_ID_STRAP_IDDQ_1 0x0BU 472 473#define VFIELD_REG_HEADER_SIZE 3U 474struct vfield_reg_header { 475 u8 version; 476 u8 entry_size; 477 u8 count; 478} __packed; 479 480#define VBIOS_VFIELD_REG_TABLE_VERSION_1_0 0x10U 481 482 483#define VFIELD_REG_ENTRY_SIZE 13U 484struct vfield_reg_entry { 485 u8 strap_reg_desc; 486 u32 reg; 487 u32 reg_index; 488 u32 index; 489} __packed; 490 491#define VFIELD_HEADER_SIZE 3U 492 493struct vfield_header { 494 u8 version; 495 u8 entry_size; 496 u8 count; 497} __packed; 498 499#define VBIOS_VFIELD_TABLE_VERSION_1_0 0x10U 500 501#define VFIELD_BIT_START(ventry) (ventry.strap_desc & 0x1FU) 502#define VFIELD_BIT_STOP(ventry) ((ventry.strap_desc & 0x3E0U) >> 5U) 503#define VFIELD_BIT_REG(ventry) ((ventry.strap_desc & 0x3C00U) >> 10U) 504 505#define VFIELD_ENTRY_SIZE 3U 506 507struct vfield_entry { 508 u8 strap_id; 509 u16 strap_desc; 510} __packed; 511 512#define PERF_CLK_DOMAINS_IDX_MAX (32U) 513#define PERF_CLK_DOMAINS_IDX_INVALID PERF_CLK_DOMAINS_IDX_MAX 514 515#define VBIOS_PSTATE_TABLE_VERSION_5X 0x50U 516#define VBIOS_PSTATE_HEADER_5X_SIZE_10 (10U) 517 518struct vbios_pstate_header_5x { 519 u8 version; 520 u8 header_size; 521 u8 base_entry_size; 522 u8 base_entry_count; 523 u8 clock_entry_size; 524 u8 clock_entry_count; 525 u8 flags0; 526 u8 initial_pstate; 527 u8 cpi_support_level; 528u8 cpi_features; 529} __packed; 530 531#define VBIOS_PSTATE_CLOCK_ENTRY_5X_SIZE_6 6U 532 533#define VBIOS_PSTATE_BASE_ENTRY_5X_SIZE_2 0x2U 534#define VBIOS_PSTATE_BASE_ENTRY_5X_SIZE_3 0x3U 535 536struct vbios_pstate_entry_clock_5x { 537 u16 param0; 538 u32 param1; 539} __packed; 540 541struct vbios_pstate_entry_5x { 542 u8 pstate_level; 543 u8 flags0; 544 u8 lpwr_entry_idx; 545 struct vbios_pstate_entry_clock_5x clockEntry[PERF_CLK_DOMAINS_IDX_MAX]; 546} __packed; 547 548#define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM0_NOM_FREQ_MHZ_SHIFT 0 549#define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM0_NOM_FREQ_MHZ_MASK 0x00003FFF 550 551#define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MIN_FREQ_MHZ_SHIFT 0 552#define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MIN_FREQ_MHZ_MASK 0x00003FFF 553 554#define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MAX_FREQ_MHZ_SHIFT 14 555#define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM1_MAX_FREQ_MHZ_MASK 0x0FFFC000 556 557#define VBIOS_PERFLEVEL_SKIP_ENTRY 0xFFU 558 559#define VBIOS_MEMORY_CLOCK_HEADER_11_VERSION 0x11U 560 561#define VBIOS_MEMORY_CLOCK_HEADER_11_0_SIZE 16U 562#define VBIOS_MEMORY_CLOCK_HEADER_11_1_SIZE 21U 563#define VBIOS_MEMORY_CLOCK_HEADER_11_2_SIZE 26U 564 565struct vbios_memory_clock_header_1x { 566 u8 version; 567 u8 header_size; 568 u8 base_entry_size; 569 u8 strap_entry_size; 570 u8 strap_entry_count; 571 u8 entry_count; 572 u8 flags; 573 u8 fbvdd_settle_time; 574 u32 cfg_pwrd_val; 575 u16 fbvddq_high; 576 u16 fbvddq_low; 577 u32 script_list_ptr; 578 u8 script_list_count; 579 u32 cmd_script_list_ptr; 580 u8 cmd_script_list_count; 581} __packed; 582 583#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_2_SIZE 20U 584 585struct vbios_memory_clock_base_entry_11 { 586 u16 minimum; 587 u16 maximum; 588 u32 script_pointer; 589 u8 flags0; 590 u32 fbpa_config; 591 u32 fbpa_config1; 592 u8 flags1; 593 u8 ref_mpllssf_freq_delta; 594 u8 flags2; 595} __packed; 596 597/* Script Pointer Index */ 598/* #define VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_FLAGS1_SCRIPT_INDEX 3:2*/ 599#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_FLAGS1_SCRIPT_INDEX_MASK \ 600 ((u8)0xc) 601#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_FLAGS1_SCRIPT_INDEX_SHIFT 2 602/* #define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX 1:0*/ 603#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX_MASK \ 604 ((u8)0x3) 605#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX_SHIFT 0 606 607#define VBIOS_POWER_SENSORS_VERSION_2X 0x20U 608#define VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08 0x00000008U 609 610struct pwr_sensors_2x_header { 611 u8 version; 612 u8 header_size; 613 u8 table_entry_size; 614 u8 num_table_entries; 615 u32 ba_script_pointer; 616} __packed; 617 618#define VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 0x00000015U 619 620struct pwr_sensors_2x_entry { 621 u8 flags0; 622 u32 class_param0; 623 u32 sensor_param0; 624 u32 sensor_param1; 625 u32 sensor_param2; 626 u32 sensor_param3; 627} __packed; 628 629#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_MASK 0xF 630#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_SHIFT 0 631#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C 0x00000001U 632 633#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX_MASK 0xFF 634#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX_SHIFT 0 635#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8_MASK 0x100 636#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8_SHIFT 8 637 638#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM_MASK 0xFFFF 639#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM_SHIFT 0 640#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM_MASK 0xFFFF0000 641#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM_SHIFT 16 642#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM_MASK 0xFFFF 643#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM_SHIFT 0 644#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION_MASK 0xFFFF0000 645#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION_SHIFT 16 646 647#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE_MASK 0xFFFF 648#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE_SHIFT 0 649#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION_MASK 0xFF0000 650#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION_SHIFT 16 651#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M_MASK 0xFFFF 652#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M_SHIFT 0 653#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B_MASK 0xFFFF0000 654#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B_SHIFT 16 655 656#define VBIOS_POWER_TOPOLOGY_VERSION_2X 0x20U 657#define VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06 0x00000006U 658 659struct pwr_topology_2x_header { 660 u8 version; 661 u8 header_size; 662 u8 table_entry_size; 663 u8 num_table_entries; 664 u8 rel_entry_size; 665 u8 num_rel_entries; 666} __packed; 667 668#define VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 0x00000016U 669 670struct pwr_topology_2x_entry { 671 u8 flags0; 672 u8 pwr_rail; 673 u32 param0; 674 u32 curr_corr_slope; 675 u32 curr_corr_offset; 676 u32 param1; 677 u32 param2; 678} __packed; 679 680#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_MASK 0xF 681#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SHIFT 0 682#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR U8(0x00000001) 683 684#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX_MASK 0xFF 685#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX_SHIFT 0 686#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX_MASK 0xFF00 687#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX_SHIFT 8 688 689#define VBIOS_POWER_POLICY_VERSION_3X 0x30U 690#define VBIOS_POWER_POLICY_3X_HEADER_SIZE_25 0x00000025U 691 692struct pwr_policy_3x_header_struct { 693 u8 version; 694 u8 header_size; 695 u8 table_entry_size; 696 u8 num_table_entries; 697 u16 base_sample_period; 698 u16 min_client_sample_period; 699 u8 table_rel_entry_size; 700 u8 num_table_rel_entries; 701 u8 tgp_policy_idx; 702 u8 rtp_policy_idx; 703 u8 mxm_policy_idx; 704 u8 dnotifier_policy_idx; 705 u32 d2_limit; 706 u32 d3_limit; 707 u32 d4_limit; 708 u32 d5_limit; 709 u8 low_sampling_mult; 710 u8 pwr_tgt_policy_idx; 711 u8 pwr_tgt_floor_policy_idx; 712 u8 sm_bus_policy_idx; 713 u8 table_viol_entry_size; 714 u8 num_table_viol_entries; 715} __packed; 716 717#define VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E 0x0000002EU 718 719struct pwr_policy_3x_entry_struct { 720 u8 flags0; 721 u8 ch_idx; 722 u32 limit_min; 723 u32 limit_rated; 724 u32 limit_max; 725 u32 param0; 726 u32 param1; 727 u32 param2; 728 u32 param3; 729 u32 limit_batt; 730 u8 flags1; 731 u8 past_length; 732 u8 next_length; 733 u16 ratio_min; 734 u16 ratio_max; 735 u8 sample_mult; 736 u32 filter_param; 737} __packed; 738 739#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_MASK 0xF 740#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_SHIFT 0 741#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD 0x00000005U 742#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT_MASK 0x10 743#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT_SHIFT 4 744 745#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FULL_DEFLECTION_LIMIT_MASK 0x1 746#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FULL_DEFLECTION_LIMIT_SHIFT 0 747#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL_MASK 0x2 748#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL_SHIFT 1 749#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE_MASK 0x3C 750#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE_SHIFT 2 751 752#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX_MASK 0xFF 753#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX_SHIFT 0 754#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX_MASK 0xFF00 755#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX_SHIFT 8 756#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE_MASK 0x10000 757#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE_SHIFT 16 758 759#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL_MASK 0xFFFF 760#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL_SHIFT 0 761 762/* Voltage Rail Table */ 763struct vbios_voltage_rail_table_1x_header { 764 u8 version; 765 u8 header_size; 766 u8 table_entry_size; 767 u8 num_table_entries; 768 u8 volt_domain_hal; 769} __packed; 770 771#define NV_VBIOS_VOLTAGE_RAIL_1X_ENTRY_SIZE_07 0X00000007U 772#define NV_VBIOS_VOLTAGE_RAIL_1X_ENTRY_SIZE_08 0X00000008U 773#define NV_VBIOS_VOLTAGE_RAIL_1X_ENTRY_SIZE_09 0X00000009U 774#define NV_VBIOS_VOLTAGE_RAIL_1X_ENTRY_SIZE_0A 0X0000000AU 775#define NV_VBIOS_VOLTAGE_RAIL_1X_ENTRY_SIZE_0B 0X0000000BU 776#define NV_VBIOS_VOLTAGE_RAIL_1X_ENTRY_SIZE_0C 0X0000000CU 777 778struct vbios_voltage_rail_table_1x_entry { 779 u32 boot_voltage_uv; 780 u8 rel_limit_vfe_equ_idx; 781 u8 alt_rel_limit_vfe_equidx; 782 u8 ov_limit_vfe_equ_idx; 783 u8 pwr_equ_idx; 784 u8 boot_volt_vfe_equ_idx; 785 u8 vmin_limit_vfe_equ_idx; 786 u8 volt_margin_limit_vfe_equ_idx; 787 u8 volt_scale_exp_pwr_equ_idx; 788} __packed; 789 790/* Voltage Device Table */ 791struct vbios_voltage_device_table_1x_header { 792 u8 version; 793 u8 header_size; 794 u8 table_entry_size; 795 u8 num_table_entries; 796} __packed; 797 798struct vbios_voltage_device_table_1x_entry { 799 u8 type; 800 u8 volt_domain; 801 u16 settle_time_us; 802 u32 param0; 803 u32 param1; 804 u32 param2; 805 u32 param3; 806 u32 param4; 807} __packed; 808 809#define NV_VBIOS_VOLTAGE_DEVICE_1X_ENTRY_TYPE_INVALID 0x00U 810#define NV_VBIOS_VOLTAGE_DEVICE_1X_ENTRY_TYPE_PSV 0x02U 811 812#define NV_VBIOS_VDT_1X_ENTRY_PARAM0_PSV_INPUT_FREQUENCY_MASK \ 813 GENMASK(23, 0) 814#define NV_VBIOS_VDT_1X_ENTRY_PARAM0_PSV_INPUT_FREQUENCY_SHIFT 0 815#define NV_VBIOS_VDT_1X_ENTRY_PARAM0_PSV_EXT_DEVICE_INDEX_MASK \ 816 GENMASK(31, 24) 817#define NV_VBIOS_VDT_1X_ENTRY_PARAM0_PSV_EXT_DEVICE_INDEX_SHIFT 24 818 819#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_VOLTAGE_MINIMUM_MASK \ 820 GENMASK(23, 0) 821#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_VOLTAGE_MINIMUM_SHIFT 0 822#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_OPERATION_TYPE_MASK \ 823 GENMASK(31, 24) 824#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_OPERATION_TYPE_SHIFT 24 825#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_OPERATION_TYPE_DEFAULT 0x00 826#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_OPERATION_TYPE_LPWR_STEADY_STATE \ 827 0x01 828#define NV_VBIOS_VDT_1X_ENTRY_PARAM1_PSV_OPERATION_TYPE_LPWR_SLEEP_STATE \ 829 0x02 830#define NV_VBIOS_VDT_1X_ENTRY_PARAM2_PSV_VOLTAGE_MAXIMUM_MASK \ 831 GENMASK(23, 0) 832#define NV_VBIOS_VDT_1X_ENTRY_PARAM2_PSV_VOLTAGE_MAXIMUM_SHIFT 0 833#define NV_VBIOS_VDT_1X_ENTRY_PARAM2_PSV_RSVD_MASK \ 834 GENMASK(31, 24) 835#define NV_VBIOS_VDT_1X_ENTRY_PARAM2_PSV_RSVD_SHIFT 24 836 837#define NV_VBIOS_VDT_1X_ENTRY_PARAM3_PSV_VOLTAGE_BASE_MASK \ 838 GENMASK(23, 0) 839#define NV_VBIOS_VDT_1X_ENTRY_PARAM3_PSV_VOLTAGE_BASE_SHIFT 0 840#define NV_VBIOS_VDT_1X_ENTRY_PARAM3_PSV_VOLTAGE_STEPS_MASK \ 841 GENMASK(31, 24) 842#define NV_VBIOS_VDT_1X_ENTRY_PARAM3_PSV_VOLTAGE_STEPS_SHIFT 24 843 844#define NV_VBIOS_VDT_1X_ENTRY_PARAM4_PSV_OFFSET_SCALE_MASK \ 845 GENMASK(23, 0) 846#define NV_VBIOS_VDT_1X_ENTRY_PARAM4_PSV_OFFSET_SCALE_SHIFT 0 847#define NV_VBIOS_VDT_1X_ENTRY_PARAM4_PSV_RSVD_MASK \ 848 GENMASK(31, 24) 849#define NV_VBIOS_VDT_1X_ENTRY_PARAM4_PSV_RSVD_SHIFT 24 850 851/* Voltage Policy Table */ 852struct vbios_voltage_policy_table_1x_header { 853 u8 version; 854 u8 header_size; 855 u8 table_entry_size; 856 u8 num_table_entries; 857 u8 perf_core_vf_seq_policy_idx; 858} __packed; 859 860struct vbios_voltage_policy_table_1x_entry { 861 u8 type; 862 u32 param0; 863 u32 param1; 864 u32 param2; 865 u32 param3; 866} __packed; 867 868#define NV_VBIOS_VOLTAGE_POLICY_1X_ENTRY_TYPE_INVALID 0x00U 869#define NV_VBIOS_VOLTAGE_POLICY_1X_ENTRY_TYPE_SINGLE_RAIL 0x01U 870#define NV_VBIOS_VOLTAGE_POLICY_1X_ENTRY_TYPE_SR_MULTI_STEP 0x02U 871#define NV_VBIOS_VOLTAGE_POLICY_1X_ENTRY_TYPE_SR_SINGLE_STEP 0x03U 872#define NV_VBIOS_VOLTAGE_POLICY_1X_ENTRY_TYPE_SINGLE_RAIL_MULTI_STEP 0x04U 873 874#define NV_VBIOS_VPT_ENTRY_PARAM0_SINGLE_RAIL_VOLT_DOMAIN_MASK \ 875 GENMASK(7, 0) 876#define NV_VBIOS_VPT_ENTRY_PARAM0_SINGLE_RAIL_VOLT_DOMAIN_SHIFT 0 877#define NV_VBIOS_VPT_ENTRY_PARAM0_RSVD_MASK GENMASK(8, 31) 878#define NV_VBIOS_VPT_ENTRY_PARAM0_RSVD_SHIFT 8 879 880#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_VD_MASTER_MASK \ 881 GENMASK(7, 0) 882#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_VD_MASTER_SHIFT 0 883#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_VD_SLAVE_MASK \ 884 GENMASK(15, 8) 885#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_VD_SLAVE_SHIFT 8 886#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_DELTA_SM_MIN_MASK \ 887 GENMASK(23, 16) 888#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_DELTA_SM_MIN_SHIFT 16 889#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_DELTA_SM_MAX_MASK \ 890 GENMASK(31, 24) 891#define NV_VBIOS_VPT_ENTRY_PARAM0_SR_DELTA_SM_MAX_SHIFT 24 892 893#define NV_VBIOS_VPT_ENTRY_PARAM1_SR_SETTLE_TIME_INTERMEDIATE_MASK \ 894 GENMASK(15, 0) 895#define NV_VBIOS_VPT_ENTRY_PARAM1_SR_SETTLE_TIME_INTERMEDIATE_SHIFT 0 896#define NV_VBIOS_VPT_ENTRY_PARAM2_SR_RAMP_UP_STEP_SIZE_UV_MASK \ 897 GENMASK(31, 0) 898#define NV_VBIOS_VPT_ENTRY_PARAM2_SR_RAMP_UP_STEP_SIZE_UV_SHIFT 0 899#define NV_VBIOS_VPT_ENTRY_PARAM3_SR_RAMP_DOWN_STEP_SIZE_UV_MASK \ 900 GENMASK(31, 0) 901#define NV_VBIOS_VPT_ENTRY_PARAM3_SR_RAMP_DOWN_STEP_SIZE_UV_SHIFT 0 902 903/* Type-Specific Parameter DWORD 0 - Type = _SR_MULTI_STEP */ 904#define NV_VBIOS_VPT_ENTRY_PARAM1_SR_SETTLE_TIME_INTERMEDIATE_MASK \ 905 GENMASK(15, 0) 906#define NV_VBIOS_VPT_ENTRY_PARAM1_SR_SETTLE_TIME_INTERMEDIATE_SHIFT \ 907 0 908 909#define VBIOS_THERM_DEVICE_VERSION_1X 0x10U 910 911#define VBIOS_THERM_DEVICE_1X_HEADER_SIZE_04 0x00000004U 912 913struct therm_device_1x_header { 914 u8 version; 915 u8 header_size; 916 u8 table_entry_size; 917 u8 num_table_entries; 918} ; 919 920struct therm_device_1x_entry { 921 u8 class_id; 922 u8 param0; 923 u8 flags; 924} ; 925 926#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_INVALID 0x00U 927#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_GPU 0x01U 928#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_GPU_GPC_TSOSC 0x02U 929#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_GPU_GPC_SCI 0x03U 930#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_HBM2_SITE 0x70U 931#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_HBM2_COMBINED 0x71U 932 933#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_PARAM0_I2C_DEVICE_INDEX_MASK 0xFF 934#define NV_VBIOS_THERM_DEVICE_1X_ENTRY_PARAM0_I2C_DEVICE_INDEX_SHIFT 0 935 936#define VBIOS_THERM_CHANNEL_VERSION_1X 0x10U 937 938#define VBIOS_THERM_CHANNEL_1X_HEADER_SIZE_09 0x00000009U 939 940struct therm_channel_1x_header { 941 u8 version; 942 u8 header_size; 943 u8 table_entry_size; 944 u8 num_table_entries; 945 u8 gpu_avg_pri_ch_idx; 946 u8 gpu_max_pri_ch_idx; 947 u8 board_pri_ch_idx; 948 u8 mem_pri_ch_idx; 949 u8 pwr_supply_pri_ch_idx; 950} __packed; 951 952struct therm_channel_1x_entry { 953 u8 class_id; 954 u8 param0; 955 u8 param1; 956 u8 param2; 957 u8 flags; 958} __packed; 959 960#define NV_VBIOS_THERM_CHANNEL_1X_ENTRY_CLASS_DEVICE 0x01U 961 962#define NV_VBIOS_THERM_CHANNEL_1X_ENTRY_PARAM0_DEVICE_INDEX_MASK 0xFF 963#define NV_VBIOS_THERM_CHANNEL_1X_ENTRY_PARAM0_DEVICE_INDEX_SHIFT 0 964 965#define NV_VBIOS_THERM_CHANNEL_1X_ENTRY_PARAM1_DEVICE_PROVIDER_INDEX_MASK 0xFF 966#define NV_VBIOS_THERM_CHANNEL_1X_ENTRY_PARAM1_DEVICE_PROVIDER_INDEX_SHIFT 0 967 968/* Frequency Controller Table */ 969struct vbios_fct_1x_header { 970 u8 version; 971 u8 header_size; 972 u8 entry_size; 973 u8 entry_count; 974 u16 sampling_period_ms; 975} __packed; 976 977struct vbios_fct_1x_entry { 978 u8 flags0; 979 u8 clk_domain_idx; 980 u16 param0; 981 u16 param1; 982 u32 param2; 983 u32 param3; 984 u32 param4; 985 u32 param5; 986 u32 param6; 987 u32 param7; 988 u32 param8; 989} __packed; 990 991#define NV_VBIOS_FCT_1X_ENTRY_FLAGS0_TYPE_MASK GENMASK(3, 0) 992#define NV_VBIOS_FCT_1X_ENTRY_FLAGS0_TYPE_SHIFT 0 993#define NV_VBIOS_FCT_1X_ENTRY_FLAGS0_TYPE_DISABLED 0x0 994#define NV_VBIOS_FCT_1X_ENTRY_FLAGS0_TYPE_PI 0x1 995 996#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_MASK GENMASK(7, 0) 997#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_SHIFT 0 998#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_SYS 0x00 999#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_LTC 0x01 1000#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_XBAR 0x02 1001#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPC0 0x03 1002#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPC1 0x04 1003#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPC2 0x05 1004#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPC3 0x06 1005#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPC4 0x07 1006#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPC5 0x08 1007#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID_GPCS 0x09 1008 1009#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE_MASK GENMASK(9, 8) 1010#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE_SHIFT 8 1011#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE_BCAST 0x0 1012#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE_MIN 0x1 1013#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE_MAX 0x2 1014#define NV_VBIOS_FCT_1X_ENTRY_PARAM0_FREQ_MODE_AVG 0x3 1015 1016#define NV_VBIOS_FCT_1X_ENTRY_PARAM1_SLOWDOWN_PCT_MIN_MASK GENMASK(7, 0) 1017#define NV_VBIOS_FCT_1X_ENTRY_PARAM1_SLOWDOWN_PCT_MIN_SHIFT 0 1018 1019#define NV_VBIOS_FCT_1X_ENTRY_PARAM1_POISON_MASK GENMASK(8, 8) 1020#define NV_VBIOS_FCT_1X_ENTRY_PARAM1_POISON_SHIFT 8 1021#define NV_VBIOS_FCT_1X_ENTRY_PARAM1_POISON_NO 0x0 1022#define NV_VBIOS_FCT_1X_ENTRY_PARAM1_POISON_YES 0x1 1023 1024#define NV_VBIOS_FCT_1X_ENTRY_PARAM2_PROP_GAIN_MASK GENMASK(31, 0) 1025#define NV_VBIOS_FCT_1X_ENTRY_PARAM2_PROP_GAIN_SHIFT 0 1026 1027#define NV_VBIOS_FCT_1X_ENTRY_PARAM3_INTEG_GAIN_MASK GENMASK(31, 0) 1028#define NV_VBIOS_FCT_1X_ENTRY_PARAM3_INTEG_GAIN_SHIFT 0 1029 1030 1031#define NV_VBIOS_FCT_1X_ENTRY_PARAM4_INTEG_DECAY_MASK GENMASK(31, 0) 1032#define NV_VBIOS_FCT_1X_ENTRY_PARAM4_INTEG_DECAY_SHIFT 0 1033 1034#define NV_VBIOS_FCT_1X_ENTRY_PARAM5_VOLT_DELTA_MIN_MASK GENMASK(31, 0) 1035#define NV_VBIOS_FCT_1X_ENTRY_PARAM5_VOLT_DELTA_MIN_SHIFT 0 1036 1037 1038#define NV_VBIOS_FCT_1X_ENTRY_PARAM6_VOLT_DELTA_MAX_MASK GENMASK(31, 0) 1039#define NV_VBIOS_FCT_1X_ENTRY_PARAM6_VOLT_DELTA_MAX_SHIFT 0 1040 1041#define NV_VBIOS_FCT_1X_ENTRY_PARAM7_FREQ_CAP_VF_MASK GENMASK(15, 0) 1042#define NV_VBIOS_FCT_1X_ENTRY_PARAM7_FREQ_CAP_VF_SHIFT 0 1043#define NV_VBIOS_FCT_1X_ENTRY_PARAM7_FREQ_CAP_VMIN_MASK GENMASK(31, 16) 1044#define NV_VBIOS_FCT_1X_ENTRY_PARAM7_FREQ_CAP_VMIN_SHIFT 16 1045 1046#define NV_VBIOS_FCT_1X_ENTRY_PARAM8_FREQ_HYST_POS_MASK GENMASK(15, 0) 1047#define NV_VBIOS_FCT_1X_ENTRY_PARAM8_FREQ_HYST_POS_SHIFT 0 1048#define NV_VBIOS_FCT_1X_ENTRY_PARAM8_FREQ_HYST_NEG_MASK GENMASK(31, 16) 1049#define NV_VBIOS_FCT_1X_ENTRY_PARAM8_FREQ_HYST_NEG_SHIFT 16 1050 1051/* LPWR Index Table */ 1052struct nvgpu_bios_lpwr_idx_table_1x_header { 1053 u8 version; 1054 u8 header_size; 1055 u8 entry_size; 1056 u8 entry_count; 1057 u16 base_sampling_period; 1058} __packed; 1059 1060struct nvgpu_bios_lpwr_idx_table_1x_entry { 1061 u8 pcie_idx; 1062 u8 gr_idx; 1063 u8 ms_idx; 1064 u8 di_idx; 1065 u8 gc6_idx; 1066} __packed; 1067 1068/* LPWR MS Table*/ 1069struct nvgpu_bios_lpwr_ms_table_1x_header { 1070 u8 version; 1071 u8 header_size; 1072 u8 entry_size; 1073 u8 entry_count; 1074 u8 default_entry_idx; 1075 u16 idle_threshold_us; 1076} __packed; 1077 1078struct nvgpu_bios_lpwr_ms_table_1x_entry { 1079 u32 feautre_mask; 1080 u16 dynamic_current_logic; 1081 u16 dynamic_current_sram; 1082} __packed; 1083 1084#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_MASK GENMASK(0, 0) 1085#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SHIFT 0 1086#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR_MASK GENMASK(2, 2) 1087#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR_SHIFT 2 1088#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING_MASK \ 1089 GENMASK(3, 3) 1090#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING_SHIFT 3 1091#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG_MASK GENMASK(5, 5) 1092#define NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG_SHIFT 5 1093 1094/* LPWR GR Table */ 1095struct nvgpu_bios_lpwr_gr_table_1x_header { 1096 u8 version; 1097 u8 header_size; 1098 u8 entry_size; 1099 u8 entry_count; 1100 u8 default_entry_idx; 1101 u16 idle_threshold_us; 1102 u8 adaptive_gr_multiplier; 1103} __packed; 1104 1105struct nvgpu_bios_lpwr_gr_table_1x_entry { 1106 u32 feautre_mask; 1107} __packed; 1108 1109#define NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_MASK GENMASK(0, 0) 1110#define NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_SHIFT 0 1111 1112#define NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG_MASK GENMASK(4, 4) 1113#define NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG_SHIFT 4 1114int nvgpu_bios_parse_rom(struct gk20a *g); 1115u8 nvgpu_bios_read_u8(struct gk20a *g, u32 offset); 1116s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset); 1117u16 nvgpu_bios_read_u16(struct gk20a *g, u32 offset); 1118u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset); 1119void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g, 1120 struct bit_token *ptoken, u8 table_id); 1121int nvgpu_bios_execute_script(struct gk20a *g, u32 offset); 1122u32 nvgpu_bios_get_nvlink_config_data(struct gk20a *g); 1123#endif
diff --git a/include/nvgpu/bitops.h b/include/nvgpu/bitops.h
deleted file mode 100644
index 00336d0..0000000
--- a/include/nvgpu/bitops.h
+++ /dev/null
@@ -1,44 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_BITOPS_H 23#define NVGPU_BITOPS_H 24 25#include <nvgpu/types.h> 26 27/* 28 * Explicit sizes for bit definitions. Please use these instead of BIT(). 29 */ 30#define BIT8(i) (U8(1) << (i)) 31#define BIT16(i) (U16(1) << (i)) 32#define BIT32(i) (U32(1) << (i)) 33#define BIT64(i) (U64(1) << (i)) 34 35#ifdef __KERNEL__ 36#include <linux/bitops.h> 37#include <linux/bitmap.h> 38#elif defined(__NVGPU_POSIX__) 39#include <nvgpu/posix/bitops.h> 40#else 41#include <nvgpu_rmos/include/bitops.h> 42#endif 43 44#endif /* NVGPU_BITOPS_H */
diff --git a/include/nvgpu/bsearch.h b/include/nvgpu/bsearch.h
deleted file mode 100644
index 46a2d04..0000000
--- a/include/nvgpu/bsearch.h
+++ /dev/null
@@ -1,31 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_BSEARCH_H 23#define NVGPU_BSEARCH_H 24 25#ifdef __KERNEL__ 26#include <linux/bsearch.h> 27#elif defined(__NVGPU_POSIX__) 28#include <stdlib.h> 29#endif 30 31#endif /*NVGPU_BSEARCH_H*/
diff --git a/include/nvgpu/bug.h b/include/nvgpu/bug.h
deleted file mode 100644
index 82d641b..0000000
--- a/include/nvgpu/bug.h
+++ /dev/null
@@ -1,51 +0,0 @@ 1/* 2 * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_BUG_H 23#define NVGPU_BUG_H 24 25#ifdef __KERNEL__ 26#include <linux/bug.h> 27/* 28 * Define an assert macro that code within nvgpu can use. 29 * 30 * The goal of this macro is for debugging but what that means varies from OS 31 * to OS. On Linux wee don't want to BUG() for general driver misbehaving. BUG() 32 * is a very heavy handed tool - in fact there's probably no where within the 33 * nvgpu core code where it makes sense to use a BUG() when running under Linux. 34 * 35 * However, on QNX (and POSIX) BUG() will just kill the current process. This 36 * means we can use it for handling bugs in nvgpu. 37 * 38 * As a result this macro varies depending on platform. 39 */ 40#define nvgpu_assert(cond) ((void) WARN_ON(!(cond))) 41#define nvgpu_do_assert_print(g, fmt, arg...) \ 42 do { \ 43 nvgpu_err(g, fmt, ##arg); \ 44 } while (false) 45#elif defined(__NVGPU_POSIX__) 46#include <nvgpu/posix/bug.h> 47#else 48#include <nvgpu_rmos/include/bug.h> 49#endif 50 51#endif /* NVGPU_BUG_H */
diff --git a/include/nvgpu/channel.h b/include/nvgpu/channel.h
deleted file mode 100644
index 764d047..0000000
--- a/include/nvgpu/channel.h
+++ /dev/null
@@ -1,478 +0,0 @@ 1/* 2 * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_CHANNEL_H 24#define NVGPU_CHANNEL_H 25 26#include <nvgpu/list.h> 27#include <nvgpu/lock.h> 28#include <nvgpu/timers.h> 29#include <nvgpu/cond.h> 30#include <nvgpu/atomic.h> 31#include <nvgpu/nvgpu_mem.h> 32#include <nvgpu/allocator.h> 33 34struct gk20a; 35struct dbg_session_gk20a; 36struct gk20a_fence; 37struct fifo_profile_gk20a; 38struct nvgpu_channel_sync; 39struct nvgpu_gpfifo_userdata; 40 41/* Flags to be passed to nvgpu_channel_setup_bind() */ 42#define NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR (1U << 0U) 43#define NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC (1U << 1U) 44#define NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE (1U << 2U) 45#define NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT (1U << 3U) 46 47/* Flags to be passed to nvgpu_submit_channel_gpfifo() */ 48#define NVGPU_SUBMIT_FLAGS_FENCE_WAIT (1U << 0U) 49#define NVGPU_SUBMIT_FLAGS_FENCE_GET (1U << 1U) 50#define NVGPU_SUBMIT_FLAGS_HW_FORMAT (1U << 2U) 51#define NVGPU_SUBMIT_FLAGS_SYNC_FENCE (1U << 3U) 52#define NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI (1U << 4U) 53#define NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING (1U << 5U) 54 55/* 56 * The binary format of 'struct nvgpu_channel_fence' introduced here 57 * should match that of 'struct nvgpu_fence' defined in uapi header, since 58 * this struct is intended to be a mirror copy of the uapi struct. This is 59 * not a hard requirement though because of nvgpu_get_fence_args conversion 60 * function. 61 */ 62struct nvgpu_channel_fence { 63 u32 id; 64 u32 value; 65}; 66 67/* 68 * The binary format of 'struct nvgpu_gpfifo_entry' introduced here 69 * should match that of 'struct nvgpu_gpfifo' defined in uapi header, since 70 * this struct is intended to be a mirror copy of the uapi struct. This is 71 * a rigid requirement because there's no conversion function and there are 72 * memcpy's present between the user gpfifo (of type nvgpu_gpfifo) and the 73 * kern gpfifo (of type nvgpu_gpfifo_entry). 74 */ 75struct nvgpu_gpfifo_entry { 76 u32 entry0; 77 u32 entry1; 78}; 79 80struct gpfifo_desc { 81 struct nvgpu_mem mem; 82 u32 entry_num; 83 84 u32 get; 85 u32 put; 86 87 bool wrap; 88 89 /* if gpfifo lives in vidmem or is forced to go via PRAMIN, first copy 90 * from userspace to pipe and then from pipe to gpu buffer */ 91 void *pipe; 92}; 93 94struct nvgpu_setup_bind_args { 95 u32 num_gpfifo_entries; 96 u32 num_inflight_jobs; 97 u32 userd_dmabuf_fd; 98 u64 userd_dmabuf_offset; 99 u32 gpfifo_dmabuf_fd; 100 u64 gpfifo_dmabuf_offset; 101 u32 work_submit_token; 102 u32 flags; 103}; 104 105struct notification { 106 struct { 107 u32 nanoseconds[2]; 108 } timestamp; 109 u32 info32; 110 u16 info16; 111 u16 status; 112}; 113 114struct priv_cmd_queue { 115 struct nvgpu_mem mem; 116 u32 size; /* num of entries in words */ 117 u32 put; /* put for priv cmd queue */ 118 u32 get; /* get for priv cmd queue */ 119}; 120 121struct priv_cmd_entry { 122 bool valid; 123 struct nvgpu_mem *mem; 124 u32 off; /* offset in mem, in u32 entries */ 125 u64 gva; 126 u32 get; /* start of entry in queue */ 127 u32 size; /* in words */ 128}; 129 130struct channel_gk20a_job { 131 struct nvgpu_mapped_buf **mapped_buffers; 132 int num_mapped_buffers; 133 struct gk20a_fence *post_fence; 134 struct priv_cmd_entry *wait_cmd; 135 struct priv_cmd_entry *incr_cmd; 136 struct nvgpu_list_node list; 137}; 138 139static inline struct channel_gk20a_job * 140channel_gk20a_job_from_list(struct nvgpu_list_node *node) 141{ 142 return (struct channel_gk20a_job *) 143 ((uintptr_t)node - offsetof(struct channel_gk20a_job, list)); 144}; 145 146struct channel_gk20a_joblist { 147 struct { 148 bool enabled; 149 unsigned int length; 150 unsigned int put; 151 unsigned int get; 152 struct channel_gk20a_job *jobs; 153 struct nvgpu_mutex read_lock; 154 } pre_alloc; 155 156 struct { 157 struct nvgpu_list_node jobs; 158 struct nvgpu_spinlock lock; 159 } dynamic; 160 161 /* 162 * Synchronize abort cleanup (when closing a channel) and job cleanup 163 * (asynchronously from worker) - protect from concurrent access when 164 * job resources are being freed. 165 */ 166 struct nvgpu_mutex cleanup_lock; 167}; 168 169struct channel_gk20a_timeout { 170 /* lock protects the running timer state */ 171 struct nvgpu_spinlock lock; 172 struct nvgpu_timeout timer; 173 bool running; 174 u32 gp_get; 175 u64 pb_get; 176 177 /* lock not needed */ 178 u32 limit_ms; 179 bool enabled; 180 bool debug_dump; 181}; 182 183/* 184 * Track refcount actions, saving their stack traces. This number specifies how 185 * many most recent actions are stored in a buffer. Set to 0 to disable. 128 186 * should be enough to track moderately hard problems from the start. 187 */ 188#define GK20A_CHANNEL_REFCOUNT_TRACKING 0 189/* Stack depth for the saved actions. */ 190#define GK20A_CHANNEL_REFCOUNT_TRACKING_STACKLEN 8 191 192/* 193 * Because the puts and gets are not linked together explicitly (although they 194 * should always come in pairs), it's not possible to tell which ref holder to 195 * delete from the list when doing a put. So, just store some number of most 196 * recent gets and puts in a ring buffer, to obtain a history. 197 * 198 * These are zeroed when a channel is closed, so a new one starts fresh. 199 */ 200 201enum channel_gk20a_ref_action_type { 202 channel_gk20a_ref_action_get, 203 channel_gk20a_ref_action_put 204}; 205 206#if GK20A_CHANNEL_REFCOUNT_TRACKING 207 208#include <linux/stacktrace.h> 209 210struct channel_gk20a_ref_action { 211 enum channel_gk20a_ref_action_type type; 212 s64 timestamp_ms; 213 /* 214 * Many of these traces will be similar. Simpler to just capture 215 * duplicates than to have a separate database for the entries. 216 */ 217 struct stack_trace trace; 218 unsigned long trace_entries[GK20A_CHANNEL_REFCOUNT_TRACKING_STACKLEN]; 219}; 220#endif 221 222/* this is the priv element of struct nvhost_channel */ 223struct channel_gk20a { 224 struct gk20a *g; /* set only when channel is active */ 225 226 struct nvgpu_list_node free_chs; 227 228 struct nvgpu_spinlock ref_obtain_lock; 229 nvgpu_atomic_t ref_count; 230 struct nvgpu_cond ref_count_dec_wq; 231#if GK20A_CHANNEL_REFCOUNT_TRACKING 232 /* 233 * Ring buffer for most recent refcount gets and puts. Protected by 234 * ref_actions_lock when getting or putting refs (i.e., adding 235 * entries), and when reading entries. 236 */ 237 struct channel_gk20a_ref_action ref_actions[ 238 GK20A_CHANNEL_REFCOUNT_TRACKING]; 239 size_t ref_actions_put; /* index of next write */ 240 struct nvgpu_spinlock ref_actions_lock; 241#endif 242 243 struct nvgpu_semaphore_int *hw_sema; 244 245 nvgpu_atomic_t bound; 246 247 u32 chid; 248 u32 tsgid; 249 pid_t pid; 250 pid_t tgid; 251 struct nvgpu_mutex ioctl_lock; 252 253 struct nvgpu_list_node ch_entry; /* channel's entry in TSG */ 254 255 struct channel_gk20a_joblist joblist; 256 struct nvgpu_allocator fence_allocator; 257 258 struct vm_gk20a *vm; 259 260 struct gpfifo_desc gpfifo; 261 262 struct nvgpu_mem usermode_userd; /* Used for Usermode Submission */ 263 struct nvgpu_mem usermode_gpfifo; 264 struct nvgpu_mem inst_block; 265 266 u64 userd_iova; 267 u64 userd_gpu_va; 268 269 struct priv_cmd_queue priv_cmd_q; 270 271 struct nvgpu_cond notifier_wq; 272 struct nvgpu_cond semaphore_wq; 273 274 /* kernel watchdog to kill stuck jobs */ 275 struct channel_gk20a_timeout timeout; 276 277 /* for job cleanup handling in the background worker */ 278 struct nvgpu_list_node worker_item; 279 280#if defined(CONFIG_GK20A_CYCLE_STATS) 281 struct { 282 void *cyclestate_buffer; 283 u32 cyclestate_buffer_size; 284 struct nvgpu_mutex cyclestate_buffer_mutex; 285 } cyclestate; 286 287 struct nvgpu_mutex cs_client_mutex; 288 struct gk20a_cs_snapshot_client *cs_client; 289#endif 290 struct nvgpu_mutex dbg_s_lock; 291 struct nvgpu_list_node dbg_s_list; 292 293 struct nvgpu_mutex sync_lock; 294 struct nvgpu_channel_sync *sync; 295 struct nvgpu_channel_sync *user_sync; 296 297#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 298 u64 virt_ctx; 299#endif 300 301 struct nvgpu_mem ctx_header; 302 303 struct nvgpu_spinlock ch_timedout_lock; 304 bool ch_timedout; 305 /* Any operating system specific data. */ 306 void *os_priv; 307 308 u32 obj_class; /* we support only one obj per channel */ 309 310 u32 timeout_accumulated_ms; 311 u32 timeout_gpfifo_get; 312 313 u32 subctx_id; 314 u32 runqueue_sel; 315 316 u32 timeout_ms_max; 317 u32 runlist_id; 318 319 bool mmu_nack_handled; 320 bool referenceable; 321 bool vpr; 322 bool deterministic; 323 /* deterministic, but explicitly idle and submits disallowed */ 324 bool deterministic_railgate_allowed; 325 bool cde; 326 bool usermode_submit_enabled; 327 bool timeout_debug_dump; 328 bool has_os_fence_framework_support; 329 330 bool is_privileged_channel; 331 332 /** 333 * MMU Debugger Mode is enabled for this channel if refcnt > 0 334 */ 335 u32 mmu_debug_mode_refcnt; 336}; 337 338static inline struct channel_gk20a * 339channel_gk20a_from_free_chs(struct nvgpu_list_node *node) 340{ 341 return (struct channel_gk20a *) 342 ((uintptr_t)node - offsetof(struct channel_gk20a, free_chs)); 343}; 344 345static inline struct channel_gk20a * 346channel_gk20a_from_ch_entry(struct nvgpu_list_node *node) 347{ 348 return (struct channel_gk20a *) 349 ((uintptr_t)node - offsetof(struct channel_gk20a, ch_entry)); 350}; 351 352static inline struct channel_gk20a * 353channel_gk20a_from_worker_item(struct nvgpu_list_node *node) 354{ 355 return (struct channel_gk20a *) 356 ((uintptr_t)node - offsetof(struct channel_gk20a, worker_item)); 357}; 358 359static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch) 360{ 361 return !!ch->vm; 362} 363int channel_gk20a_commit_va(struct channel_gk20a *c); 364int gk20a_init_channel_support(struct gk20a *, u32 chid); 365 366/* must be inside gk20a_busy()..gk20a_idle() */ 367void gk20a_channel_close(struct channel_gk20a *ch); 368void __gk20a_channel_kill(struct channel_gk20a *ch); 369 370bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch, 371 u32 timeout_delta_ms, bool *progress); 372void gk20a_disable_channel(struct channel_gk20a *ch); 373void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt); 374void gk20a_channel_abort_clean_up(struct channel_gk20a *ch); 375void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events); 376int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size, 377 struct priv_cmd_entry *entry); 378int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e); 379 380int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch); 381int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch); 382 383int gk20a_channel_suspend(struct gk20a *g); 384int gk20a_channel_resume(struct gk20a *g); 385 386void gk20a_channel_deterministic_idle(struct gk20a *g); 387void gk20a_channel_deterministic_unidle(struct gk20a *g); 388 389int nvgpu_channel_worker_init(struct gk20a *g); 390void nvgpu_channel_worker_deinit(struct gk20a *g); 391 392struct channel_gk20a *gk20a_get_channel_from_file(int fd); 393void gk20a_channel_update(struct channel_gk20a *c); 394 395/* returns ch if reference was obtained */ 396struct channel_gk20a *__must_check _gk20a_channel_get(struct channel_gk20a *ch, 397 const char *caller); 398#define gk20a_channel_get(ch) _gk20a_channel_get(ch, __func__) 399 400 401void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller); 402#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__) 403 404/* returns NULL if could not take a ref to the channel */ 405struct channel_gk20a *__must_check _gk20a_channel_from_id(struct gk20a *g, 406 u32 chid, const char *caller); 407#define gk20a_channel_from_id(g, chid) _gk20a_channel_from_id(g, chid, __func__) 408 409int gk20a_wait_channel_idle(struct channel_gk20a *ch); 410 411/* runlist_id -1 is synonym for ENGINE_GR_GK20A runlist id */ 412struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, 413 s32 runlist_id, 414 bool is_privileged_channel, 415 pid_t pid, pid_t tid); 416 417int nvgpu_channel_setup_bind(struct channel_gk20a *c, 418 struct nvgpu_setup_bind_args *args); 419 420void gk20a_channel_timeout_restart_all_channels(struct gk20a *g); 421 422bool channel_gk20a_is_prealloc_enabled(struct channel_gk20a *c); 423void channel_gk20a_joblist_lock(struct channel_gk20a *c); 424void channel_gk20a_joblist_unlock(struct channel_gk20a *c); 425bool channel_gk20a_joblist_is_empty(struct channel_gk20a *c); 426 427int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add); 428int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g, 429 unsigned int timeslice_period, 430 unsigned int *__timeslice_timeout, unsigned int *__timeslice_scale); 431 432void gk20a_wait_until_counter_is_N( 433 struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value, 434 struct nvgpu_cond *c, const char *caller, const char *counter_name); 435int channel_gk20a_alloc_job(struct channel_gk20a *c, 436 struct channel_gk20a_job **job_out); 437void channel_gk20a_free_job(struct channel_gk20a *c, 438 struct channel_gk20a_job *job); 439u32 nvgpu_get_gp_free_count(struct channel_gk20a *c); 440u32 nvgpu_gp_free_count(struct channel_gk20a *c); 441int gk20a_channel_add_job(struct channel_gk20a *c, 442 struct channel_gk20a_job *job, 443 bool skip_buffer_refcounting); 444void free_priv_cmdbuf(struct channel_gk20a *c, 445 struct priv_cmd_entry *e); 446void gk20a_channel_clean_up_jobs(struct channel_gk20a *c, 447 bool clean_all); 448 449void gk20a_channel_free_usermode_buffers(struct channel_gk20a *c); 450u32 nvgpu_get_gpfifo_entry_size(void); 451 452int nvgpu_submit_channel_gpfifo_user(struct channel_gk20a *c, 453 struct nvgpu_gpfifo_userdata userdata, 454 u32 num_entries, 455 u32 flags, 456 struct nvgpu_channel_fence *fence, 457 struct gk20a_fence **fence_out, 458 struct fifo_profile_gk20a *profile); 459 460int nvgpu_submit_channel_gpfifo_kernel(struct channel_gk20a *c, 461 struct nvgpu_gpfifo_entry *gpfifo, 462 u32 num_entries, 463 u32 flags, 464 struct nvgpu_channel_fence *fence, 465 struct gk20a_fence **fence_out); 466 467#ifdef CONFIG_DEBUG_FS 468void trace_write_pushbuffers(struct channel_gk20a *c, u32 count); 469#else 470static inline void trace_write_pushbuffers(struct channel_gk20a *c, u32 count) 471{ 472} 473#endif 474 475void gk20a_channel_set_timedout(struct channel_gk20a *ch); 476bool gk20a_channel_check_timedout(struct channel_gk20a *ch); 477 478#endif
diff --git a/include/nvgpu/channel_sync.h b/include/nvgpu/channel_sync.h
deleted file mode 100644
index f0b2b86..0000000
--- a/include/nvgpu/channel_sync.h
+++ /dev/null
@@ -1,113 +0,0 @@ 1/* 2 * 3 * Nvgpu Channel Synchronization Abstraction 4 * 5 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 * DEALINGS IN THE SOFTWARE. 24 */ 25 26#ifndef NVGPU_CHANNEL_SYNC_H 27#define NVGPU_CHANNEL_SYNC_H 28 29#include <nvgpu/atomic.h> 30 31struct nvgpu_channel_sync; 32struct priv_cmd_entry; 33struct channel_gk20a; 34struct gk20a_fence; 35struct gk20a; 36struct nvgpu_semaphore; 37 38struct nvgpu_channel_sync { 39 nvgpu_atomic_t refcount; 40 41 /* Generate a gpu wait cmdbuf from syncpoint. 42 * Returns a gpu cmdbuf that performs the wait when executed 43 */ 44 int (*wait_syncpt)(struct nvgpu_channel_sync *s, u32 id, u32 thresh, 45 struct priv_cmd_entry *entry); 46 47 /* Generate a gpu wait cmdbuf from sync fd. 48 * Returns a gpu cmdbuf that performs the wait when executed 49 */ 50 int (*wait_fd)(struct nvgpu_channel_sync *s, int fd, 51 struct priv_cmd_entry *entry, int max_wait_cmds); 52 53 /* Increment syncpoint/semaphore. 54 * Returns 55 * - a gpu cmdbuf that performs the increment when executed, 56 * - a fence that can be passed to wait_cpu() and is_expired(). 57 */ 58 int (*incr)(struct nvgpu_channel_sync *s, 59 struct priv_cmd_entry *entry, 60 struct gk20a_fence *fence, 61 bool need_sync_fence, 62 bool register_irq); 63 64 /* Increment syncpoint/semaphore, so that the returned fence represents 65 * work completion (may need wfi) and can be returned to user space. 66 * Returns 67 * - a gpu cmdbuf that performs the increment when executed, 68 * - a fence that can be passed to wait_cpu() and is_expired(), 69 * - a gk20a_fence that signals when the incr has happened. 70 */ 71 int (*incr_user)(struct nvgpu_channel_sync *s, 72 int wait_fence_fd, 73 struct priv_cmd_entry *entry, 74 struct gk20a_fence *fence, 75 bool wfi, 76 bool need_sync_fence, 77 bool register_irq); 78 79 /* Reset the channel syncpoint/semaphore. */ 80 void (*set_min_eq_max)(struct nvgpu_channel_sync *s); 81 82 /* 83 * Set the channel syncpoint/semaphore to safe state 84 * This should be used to reset User managed syncpoint since we don't 85 * track threshold values for those syncpoints 86 */ 87 void (*set_safe_state)(struct nvgpu_channel_sync *s); 88 89 /* Returns the sync point id or negative number if no syncpt*/ 90 int (*syncpt_id)(struct nvgpu_channel_sync *s); 91 92 /* Returns the sync point address of sync point or 0 if not supported */ 93 u64 (*syncpt_address)(struct nvgpu_channel_sync *s); 94 95 /* Free the resources allocated by nvgpu_channel_sync_create. */ 96 void (*destroy)(struct nvgpu_channel_sync *s); 97}; 98 99void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c, 100 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, 101 u32 wait_cmd_size, u32 pos); 102 103int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c, 104 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, 105 u32 wait_cmd_size, u32 pos, bool preallocated); 106 107void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync, 108 bool set_safe_state); 109struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c, 110 bool user_managed); 111bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g); 112 113#endif /* NVGPU_CHANNEL_SYNC_H */
diff --git a/include/nvgpu/circ_buf.h b/include/nvgpu/circ_buf.h
deleted file mode 100644
index 76998ca..0000000
--- a/include/nvgpu/circ_buf.h
+++ /dev/null
@@ -1,31 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_CIRC_BUF_H 23#define NVGPU_CIRC_BUF_H 24 25#ifdef __KERNEL__ 26#include <linux/circ_buf.h> 27#elif defined(__NVGPU_POSIX__) 28#include <nvgpu/posix/circ_buf.h> 29#endif 30 31#endif /* NVGPU_CIRC_BUF_H */
diff --git a/include/nvgpu/clk.h b/include/nvgpu/clk.h
deleted file mode 100644
index 62bb0f9..0000000
--- a/include/nvgpu/clk.h
+++ /dev/null
@@ -1,42 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef __NVGPU_CLK_H__ 24#define __NVGPU_CLK_H__ 25 26#define CLK_NAME_MAX 24 27 28struct namemap_cfg { 29 u32 namemap; 30 u32 is_enable; /* Namemap enabled */ 31 u32 is_counter; /* Using cntr */ 32 struct gk20a *g; 33 struct { 34 u32 reg_ctrl_addr; 35 u32 reg_ctrl_idx; 36 u32 reg_cntr_addr; 37 } cntr; 38 u32 scale; 39 char name[CLK_NAME_MAX]; 40}; 41 42#endif
diff --git a/include/nvgpu/clk_arb.h b/include/nvgpu/clk_arb.h
deleted file mode 100644
index 43af631..0000000
--- a/include/nvgpu/clk_arb.h
+++ /dev/null
@@ -1,378 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_CLK_ARB_H 24#define NVGPU_CLK_ARB_H 25 26struct gk20a; 27 28#include <nvgpu/types.h> 29#include <nvgpu/bitops.h> 30#include <nvgpu/lock.h> 31#include <nvgpu/kmem.h> 32#include <nvgpu/atomic.h> 33#include <nvgpu/bug.h> 34#include <nvgpu/kref.h> 35#include <nvgpu/log.h> 36#include <nvgpu/barrier.h> 37#include <nvgpu/cond.h> 38 39#include "clk/clk.h" 40#include "pstate/pstate.h" 41#include "lpwr/lpwr.h" 42#include "volt/volt.h" 43 44#define MAX_F_POINTS 256 45#define DEFAULT_EVENT_NUMBER 32 46 47struct nvgpu_clk_dev; 48struct nvgpu_clk_arb_target; 49struct nvgpu_clk_notification_queue; 50struct nvgpu_clk_session; 51 52#define VF_POINT_INVALID_PSTATE ~0U 53#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (1UL << (b))) 54#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\ 55 __fls((a)->pstates) :\ 56 VF_POINT_INVALID_PSTATE) 57#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) ?\ 58 __fls((a)->pstates & (b)->pstates) :\ 59 VF_POINT_INVALID_PSTATE) 60 61/* 62 * These events, defined in common code are the counterparts of the uapi 63 * events. There should be a conversion function to take care to convert 64 * these to the uapi events. 65 */ 66/* Event associated to a VF update */ 67#define NVGPU_EVENT_VF_UPDATE 0 68 69/* Recoverable alarms (POLLPRI) */ 70/* Alarm when target frequency on any session is not possible */ 71#define NVGPU_EVENT_ALARM_TARGET_VF_NOT_POSSIBLE 1 72/* Alarm when target frequency on current session is not possible */ 73#define NVGPU_EVENT_ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE 2 74/* Alarm when Clock Arbiter failed */ 75#define NVGPU_EVENT_ALARM_CLOCK_ARBITER_FAILED 3 76/* Alarm when VF table update failed */ 77#define NVGPU_EVENT_ALARM_VF_TABLE_UPDATE_FAILED 4 78/* Alarm on thermal condition */ 79#define NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD 5 80/* Alarm on power condition */ 81#define NVGPU_EVENT_ALARM_POWER_ABOVE_THRESHOLD 6 82 83/* Non recoverable alarm (POLLHUP) */ 84/* Alarm on GPU shutdown/fall from bus */ 85#define NVGPU_EVENT_ALARM_GPU_LOST 7 86 87#define NVGPU_EVENT_LAST NVGPU_EVENT_ALARM_GPU_LOST 88 89/* Local Alarms */ 90#define EVENT(alarm) (0x1UL << NVGPU_EVENT_##alarm) 91 92#define LOCAL_ALARM_MASK (EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE) | \ 93 EVENT(VF_UPDATE)) 94 95#define _WRAPGTEQ(a, b) ((a-b) > 0) 96 97/* 98 * NVGPU_POLL* defines equivalent to the POLL* linux defines 99 */ 100#define NVGPU_POLLIN (1 << 0) 101#define NVGPU_POLLPRI (1 << 1) 102#define NVGPU_POLLOUT (1 << 2) 103#define NVGPU_POLLRDNORM (1 << 3) 104#define NVGPU_POLLHUP (1 << 4) 105 106/* NVGPU_CLK_DOMAIN_* defines equivalent to NVGPU_GPU_CLK_DOMAIN_* 107 * defines in uapi header 108 */ 109/* Memory clock */ 110#define NVGPU_CLK_DOMAIN_MCLK (0) 111/* Main graphics core clock */ 112#define NVGPU_CLK_DOMAIN_GPCCLK (1) 113 114#define NVGPU_CLK_DOMAIN_MAX (NVGPU_CLK_DOMAIN_GPCCLK) 115 116#define clk_arb_dbg(g, fmt, args...) \ 117 do { \ 118 nvgpu_log(g, gpu_dbg_clk_arb, \ 119 fmt, ##args); \ 120 } while (0) 121 122struct nvgpu_clk_notification { 123 u32 notification; 124 u64 timestamp; 125}; 126 127struct nvgpu_clk_notification_queue { 128 u32 size; 129 nvgpu_atomic_t head; 130 nvgpu_atomic_t tail; 131 struct nvgpu_clk_notification *notifications; 132}; 133 134struct nvgpu_clk_vf_point { 135 u16 pstates; 136 union { 137 struct { 138 u16 gpc_mhz; 139 u16 sys_mhz; 140 u16 xbar_mhz; 141 }; 142 u16 mem_mhz; 143 }; 144 u32 uvolt; 145 u32 uvolt_sram; 146}; 147 148struct nvgpu_clk_vf_table { 149 u32 mclk_num_points; 150 struct nvgpu_clk_vf_point *mclk_points; 151 u32 gpc2clk_num_points; 152 struct nvgpu_clk_vf_point *gpc2clk_points; 153}; 154#ifdef CONFIG_DEBUG_FS 155struct nvgpu_clk_arb_debug { 156 s64 switch_max; 157 s64 switch_min; 158 u64 switch_num; 159 s64 switch_avg; 160 s64 switch_std; 161}; 162#endif 163 164struct nvgpu_clk_arb_target { 165 u16 mclk; 166 u16 gpc2clk; 167 u32 pstate; 168}; 169 170enum clk_arb_work_item_type { 171 CLK_ARB_WORK_UPDATE_VF_TABLE, 172 CLK_ARB_WORK_UPDATE_ARB 173}; 174 175struct nvgpu_clk_arb_work_item { 176 enum clk_arb_work_item_type item_type; 177 struct nvgpu_clk_arb *arb; 178 struct nvgpu_list_node worker_item; 179}; 180 181struct nvgpu_clk_arb { 182 struct nvgpu_spinlock sessions_lock; 183 struct nvgpu_spinlock users_lock; 184 struct nvgpu_spinlock requests_lock; 185 186 struct nvgpu_mutex pstate_lock; 187 struct nvgpu_list_node users; 188 struct nvgpu_list_node sessions; 189 struct nvgpu_list_node requests; 190 191 struct gk20a *g; 192 int status; 193 194 struct nvgpu_clk_arb_target actual_pool[2]; 195 struct nvgpu_clk_arb_target *actual; 196 197 u16 gpc2clk_default_mhz; 198 u16 mclk_default_mhz; 199 u32 voltuv_actual; 200 201 u16 gpc2clk_min, gpc2clk_max; 202 u16 mclk_min, mclk_max; 203 204 struct nvgpu_clk_arb_work_item update_vf_table_work_item; 205 struct nvgpu_clk_arb_work_item update_arb_work_item; 206 207 struct nvgpu_cond request_wq; 208 209 struct nvgpu_clk_vf_table *current_vf_table; 210 struct nvgpu_clk_vf_table vf_table_pool[2]; 211 u32 vf_table_index; 212 213 u16 *mclk_f_points; 214 nvgpu_atomic_t req_nr; 215 216 u32 mclk_f_numpoints; 217 u16 *gpc2clk_f_points; 218 u32 gpc2clk_f_numpoints; 219 220 bool clk_arb_events_supported; 221 222 nvgpu_atomic64_t alarm_mask; 223 struct nvgpu_clk_notification_queue notification_queue; 224 225#ifdef CONFIG_DEBUG_FS 226 struct nvgpu_clk_arb_debug debug_pool[2]; 227 struct nvgpu_clk_arb_debug *debug; 228 bool debugfs_set; 229#endif 230}; 231 232struct nvgpu_clk_dev { 233 struct nvgpu_clk_session *session; 234 union { 235 struct nvgpu_list_node link; 236 struct nvgpu_list_node node; 237 }; 238 struct nvgpu_cond readout_wq; 239 nvgpu_atomic_t poll_mask; 240 u16 gpc2clk_target_mhz; 241 u16 mclk_target_mhz; 242 u32 alarms_reported; 243 nvgpu_atomic_t enabled_mask; 244 struct nvgpu_clk_notification_queue queue; 245 u32 arb_queue_head; 246 struct nvgpu_ref refcount; 247}; 248 249struct nvgpu_clk_session { 250 bool zombie; 251 struct gk20a *g; 252 struct nvgpu_ref refcount; 253 struct nvgpu_list_node link; 254 struct nvgpu_list_node targets; 255 256 struct nvgpu_spinlock session_lock; 257 struct nvgpu_clk_arb_target target_pool[2]; 258 struct nvgpu_clk_arb_target *target; 259}; 260 261static inline struct nvgpu_clk_session * 262nvgpu_clk_session_from_link(struct nvgpu_list_node *node) 263{ 264 return (struct nvgpu_clk_session *) 265 ((uintptr_t)node - offsetof(struct nvgpu_clk_session, link)); 266}; 267 268static inline struct nvgpu_clk_dev * 269nvgpu_clk_dev_from_node(struct nvgpu_list_node *node) 270{ 271 return (struct nvgpu_clk_dev *) 272 ((uintptr_t)node - offsetof(struct nvgpu_clk_dev, node)); 273}; 274 275static inline struct nvgpu_clk_dev * 276nvgpu_clk_dev_from_link(struct nvgpu_list_node *node) 277{ 278 return (struct nvgpu_clk_dev *) 279 ((uintptr_t)node - offsetof(struct nvgpu_clk_dev, link)); 280}; 281 282static inline struct nvgpu_clk_arb_work_item * 283nvgpu_clk_arb_work_item_from_worker_item(struct nvgpu_list_node *node) 284{ 285 return (struct nvgpu_clk_arb_work_item *) 286 ((uintptr_t)node - offsetof(struct nvgpu_clk_arb_work_item, worker_item)); 287}; 288 289void nvgpu_clk_arb_worker_enqueue(struct gk20a *g, 290 struct nvgpu_clk_arb_work_item *work_item); 291 292int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb); 293 294int nvgpu_clk_arb_worker_init(struct gk20a *g); 295 296int nvgpu_clk_arb_init_arbiter(struct gk20a *g); 297 298bool nvgpu_clk_arb_has_active_req(struct gk20a *g); 299 300int nvgpu_clk_arb_get_arbiter_clk_range(struct gk20a *g, u32 api_domain, 301 u16 *min_mhz, u16 *max_mhz); 302 303int nvgpu_clk_arb_get_arbiter_actual_mhz(struct gk20a *g, 304 u32 api_domain, u16 *actual_mhz); 305 306int nvgpu_clk_arb_get_arbiter_effective_mhz(struct gk20a *g, 307 u32 api_domain, u16 *effective_mhz); 308 309int nvgpu_clk_arb_get_arbiter_clk_f_points(struct gk20a *g, 310 u32 api_domain, u32 *max_points, u16 *fpoints); 311 312u32 nvgpu_clk_arb_get_arbiter_clk_domains(struct gk20a *g); 313bool nvgpu_clk_arb_is_valid_domain(struct gk20a *g, u32 api_domain); 314 315void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g); 316 317int nvgpu_clk_arb_install_session_fd(struct gk20a *g, 318 struct nvgpu_clk_session *session); 319 320int nvgpu_clk_arb_init_session(struct gk20a *g, 321 struct nvgpu_clk_session **_session); 322 323void nvgpu_clk_arb_release_session(struct gk20a *g, 324 struct nvgpu_clk_session *session); 325 326int nvgpu_clk_arb_commit_request_fd(struct gk20a *g, 327 struct nvgpu_clk_session *session, int request_fd); 328 329int nvgpu_clk_arb_set_session_target_mhz(struct nvgpu_clk_session *session, 330 int fd, u32 api_domain, u16 target_mhz); 331 332int nvgpu_clk_arb_get_session_target_mhz(struct nvgpu_clk_session *session, 333 u32 api_domain, u16 *target_mhz); 334 335int nvgpu_clk_arb_install_event_fd(struct gk20a *g, 336 struct nvgpu_clk_session *session, int *event_fd, u32 alarm_mask); 337 338int nvgpu_clk_arb_install_request_fd(struct gk20a *g, 339 struct nvgpu_clk_session *session, int *event_fd); 340 341void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g); 342 343int nvgpu_clk_arb_get_current_pstate(struct gk20a *g); 344 345void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock); 346 347void nvgpu_clk_arb_send_thermal_alarm(struct gk20a *g); 348 349void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm); 350 351void nvgpu_clk_arb_schedule_alarm(struct gk20a *g, u32 alarm); 352 353void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm); 354 355void nvgpu_clk_arb_free_session(struct nvgpu_ref *refcount); 356 357void nvgpu_clk_arb_free_fd(struct nvgpu_ref *refcount); 358 359u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev, 360 struct nvgpu_clk_arb_target *target, 361 u32 alarm); 362 363int nvgpu_clk_notification_queue_alloc(struct gk20a *g, 364 struct nvgpu_clk_notification_queue *queue, 365 size_t events_number); 366 367void nvgpu_clk_notification_queue_free(struct gk20a *g, 368 struct nvgpu_clk_notification_queue *queue); 369 370void nvgpu_clk_arb_event_post_event(struct nvgpu_clk_dev *dev); 371 372unsigned long nvgpu_clk_measure_freq(struct gk20a *g, u32 api_domain); 373 374#ifdef CONFIG_DEBUG_FS 375int nvgpu_clk_arb_debugfs_init(struct gk20a *g); 376#endif 377#endif /* NVGPU_CLK_ARB_H */ 378
diff --git a/include/nvgpu/comptags.h b/include/nvgpu/comptags.h
deleted file mode 100644
index 3df1b6f..0000000
--- a/include/nvgpu/comptags.h
+++ /dev/null
@@ -1,104 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_COMPTAGS_H 24#define NVGPU_COMPTAGS_H 25 26#include <nvgpu/lock.h> 27#include <nvgpu/types.h> 28 29struct gk20a; 30struct nvgpu_os_buffer; 31 32struct gk20a_comptags { 33 u32 offset; 34 u32 lines; 35 36 /* 37 * This signals whether allocation has been attempted. Observe 'lines' 38 * to see whether the comptags were actually allocated. We try alloc 39 * only once per buffer in order not to break multiple compressible-kind 40 * mappings. 41 */ 42 bool allocated; 43 44 /* 45 * Do comptags need to be cleared before mapping? 46 */ 47 bool needs_clear; 48}; 49 50struct gk20a_comptag_allocator { 51 struct gk20a *g; 52 53 struct nvgpu_mutex lock; 54 55 /* This bitmap starts at ctag 1. 0th cannot be taken. */ 56 unsigned long *bitmap; 57 58 /* Size of bitmap, not max ctags, so one less. */ 59 unsigned long size; 60}; 61 62/* real size here, but first (ctag 0) isn't used */ 63int gk20a_comptag_allocator_init(struct gk20a *g, 64 struct gk20a_comptag_allocator *allocator, 65 unsigned long size); 66void gk20a_comptag_allocator_destroy(struct gk20a *g, 67 struct gk20a_comptag_allocator *allocator); 68 69int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator, 70 u32 *offset, u32 len); 71void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator, 72 u32 offset, u32 len); 73 74/* 75 * Defined by OS specific code since comptags are stored in a highly OS specific 76 * way. 77 */ 78int gk20a_alloc_or_get_comptags(struct gk20a *g, 79 struct nvgpu_os_buffer *buf, 80 struct gk20a_comptag_allocator *allocator, 81 struct gk20a_comptags *comptags); 82void gk20a_get_comptags(struct nvgpu_os_buffer *buf, 83 struct gk20a_comptags *comptags); 84 85/* 86 * These functions must be used to synchronize comptags clear. The usage: 87 * 88 * if (gk20a_comptags_start_clear(os_buf)) { 89 * // we now hold the buffer lock for clearing 90 * 91 * bool successful = hw_clear_comptags(); 92 * 93 * // mark the buf cleared (or not) and release the buffer lock 94 * gk20a_comptags_finish_clear(os_buf, successful); 95 * } 96 * 97 * If gk20a_start_comptags_clear() returns false, another caller has 98 * already cleared the comptags. 99 */ 100bool gk20a_comptags_start_clear(struct nvgpu_os_buffer *buf); 101void gk20a_comptags_finish_clear(struct nvgpu_os_buffer *buf, 102 bool clear_successful); 103 104#endif /* NVGPU_COMPTAGS_H */
diff --git a/include/nvgpu/cond.h b/include/nvgpu/cond.h
deleted file mode 100644
index 49e9d1f..0000000
--- a/include/nvgpu/cond.h
+++ /dev/null
@@ -1,106 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_COND_H 24#define NVGPU_COND_H 25 26#ifdef __KERNEL__ 27#include <nvgpu/linux/cond.h> 28#elif defined(__NVGPU_POSIX__) 29#include <nvgpu/posix/cond.h> 30#else 31#include <nvgpu_rmos/include/cond.h> 32#endif 33 34/* 35 * struct nvgpu_cond 36 * 37 * Should be implemented per-OS in a separate library 38 */ 39struct nvgpu_cond; 40 41/** 42 * nvgpu_cond_init - Initialize a condition variable 43 * 44 * @cond - The condition variable to initialize 45 * 46 * Initialize a condition variable before using it. 47 */ 48int nvgpu_cond_init(struct nvgpu_cond *cond); 49 50/** 51 * nvgpu_cond_signal - Signal a condition variable 52 * 53 * @cond - The condition variable to signal 54 * 55 * Wake up a waiter for a condition variable to check if its condition has been 56 * satisfied. 57 * 58 * The waiter is using an uninterruptible wait. 59 */ 60int nvgpu_cond_signal(struct nvgpu_cond *cond); 61 62/** 63 * nvgpu_cond_signal_interruptible - Signal a condition variable 64 * 65 * @cond - The condition variable to signal 66 * 67 * Wake up a waiter for a condition variable to check if its condition has been 68 * satisfied. 69 * 70 * The waiter is using an interruptible wait. 71 */ 72int nvgpu_cond_signal_interruptible(struct nvgpu_cond *cond); 73 74/** 75 * nvgpu_cond_broadcast - Signal all waiters of a condition variable 76 * 77 * @cond - The condition variable to signal 78 * 79 * Wake up all waiters for a condition variable to check if their conditions 80 * have been satisfied. 81 * 82 * The waiters are using an uninterruptible wait. 83 */ 84int nvgpu_cond_broadcast(struct nvgpu_cond *cond); 85 86/** 87 * nvgpu_cond_broadcast_interruptible - Signal all waiters of a condition 88 * variable 89 * 90 * @cond - The condition variable to signal 91 * 92 * Wake up all waiters for a condition variable to check if their conditions 93 * have been satisfied. 94 * 95 * The waiters are using an interruptible wait. 96 */ 97int nvgpu_cond_broadcast_interruptible(struct nvgpu_cond *cond); 98 99/** 100 * nvgpu_cond_destroy - Destroy a condition variable 101 * 102 * @cond - The condition variable to destroy 103 */ 104void nvgpu_cond_destroy(struct nvgpu_cond *cond); 105 106#endif /* NVGPU_COND_H */
diff --git a/include/nvgpu/ctxsw_trace.h b/include/nvgpu/ctxsw_trace.h
deleted file mode 100644
index 033e020..0000000
--- a/include/nvgpu/ctxsw_trace.h
+++ /dev/null
@@ -1,94 +0,0 @@ 1/* 2 * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_CTXSW_TRACE_H 24#define NVGPU_CTXSW_TRACE_H 25 26#include <nvgpu/types.h> 27 28struct gk20a; 29struct tsg_gk20a; 30struct channel_gk20a; 31 32#define NVGPU_GPU_CTXSW_TAG_SOF 0x00 33#define NVGPU_GPU_CTXSW_TAG_CTXSW_REQ_BY_HOST 0x01 34#define NVGPU_GPU_CTXSW_TAG_FE_ACK 0x02 35#define NVGPU_GPU_CTXSW_TAG_FE_ACK_WFI 0x0a 36#define NVGPU_GPU_CTXSW_TAG_FE_ACK_GFXP 0x0b 37#define NVGPU_GPU_CTXSW_TAG_FE_ACK_CTAP 0x0c 38#define NVGPU_GPU_CTXSW_TAG_FE_ACK_CILP 0x0d 39#define NVGPU_GPU_CTXSW_TAG_SAVE_END 0x03 40#define NVGPU_GPU_CTXSW_TAG_RESTORE_START 0x04 41#define NVGPU_GPU_CTXSW_TAG_CONTEXT_START 0x05 42#define NVGPU_GPU_CTXSW_TAG_ENGINE_RESET 0xfe 43#define NVGPU_GPU_CTXSW_TAG_INVALID_TIMESTAMP 0xff 44#define NVGPU_GPU_CTXSW_TAG_LAST \ 45 NVGPU_GPU_CTXSW_TAG_INVALID_TIMESTAMP 46 47#define NVGPU_GPU_CTXSW_FILTER_ISSET(n, p) \ 48 ((p)->tag_bits[(n) / 64] & (1 << ((n) & 63))) 49 50#define NVGPU_GPU_CTXSW_FILTER_SIZE (NVGPU_GPU_CTXSW_TAG_LAST + 1) 51#define NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT 31 52 53struct nvgpu_gpu_ctxsw_trace_filter { 54 u64 tag_bits[(NVGPU_GPU_CTXSW_FILTER_SIZE + 63) / 64]; 55}; 56 57/* 58 * The binary format of 'struct nvgpu_gpu_ctxsw_trace_entry' introduced here 59 * should match that of 'struct nvgpu_ctxsw_trace_entry' defined in uapi 60 * header, since this struct is intended to be a mirror copy of the uapi 61 * struct. 62 */ 63struct nvgpu_gpu_ctxsw_trace_entry { 64 u8 tag; 65 u8 vmid; 66 u16 seqno; /* sequence number to detect drops */ 67 u32 context_id; /* context_id as allocated by FECS */ 68 u64 pid; /* 64-bit is max bits of different OS pid */ 69 u64 timestamp; /* 64-bit time */ 70}; 71 72int gk20a_ctxsw_trace_init(struct gk20a *g); 73 74void gk20a_ctxsw_trace_channel_reset(struct gk20a *g, struct channel_gk20a *ch); 75void gk20a_ctxsw_trace_tsg_reset(struct gk20a *g, struct tsg_gk20a *tsg); 76 77void gk20a_ctxsw_trace_cleanup(struct gk20a *g); 78int gk20a_ctxsw_trace_write(struct gk20a *g, 79 struct nvgpu_gpu_ctxsw_trace_entry *entry); 80void gk20a_ctxsw_trace_wake_up(struct gk20a *g, int vmid); 81 82#ifdef CONFIG_GK20A_CTXSW_TRACE 83struct file; 84struct vm_area_struct; 85 86int gk20a_ctxsw_dev_mmap(struct file *filp, struct vm_area_struct *vma); 87int gk20a_ctxsw_dev_ring_alloc(struct gk20a *g, void **buf, size_t *size); 88int gk20a_ctxsw_dev_ring_free(struct gk20a *g); 89int gk20a_ctxsw_dev_mmap_buffer(struct gk20a *g, struct vm_area_struct *vma); 90#endif 91 92u8 nvgpu_gpu_ctxsw_tags_to_common_tags(u8 tags); 93 94#endif /*NVGPU_CTXSW_TRACE_H */
diff --git a/include/nvgpu/debug.h b/include/nvgpu/debug.h
deleted file mode 100644
index 33bf621..0000000
--- a/include/nvgpu/debug.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * GK20A Debug functionality 3 * 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#ifndef NVGPU_DEBUG_H 26#define NVGPU_DEBUG_H 27 28#include <nvgpu/types.h> 29 30struct gk20a; 31struct gpu_ops; 32 33struct gk20a_debug_output { 34 void (*fn)(void *ctx, const char *str, size_t len); 35 void *ctx; 36 char buf[256]; 37}; 38 39#ifdef CONFIG_DEBUG_FS 40extern unsigned int gk20a_debug_trace_cmdbuf; 41 42void gk20a_debug_output(struct gk20a_debug_output *o, 43 const char *fmt, ...); 44 45void gk20a_debug_dump(struct gk20a *g); 46void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o); 47int gk20a_gr_debug_dump(struct gk20a *g); 48void gk20a_init_debug_ops(struct gpu_ops *gops); 49 50void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink); 51void gk20a_debug_deinit(struct gk20a *g); 52#else 53static inline void gk20a_debug_output(struct gk20a_debug_output *o, 54 const char *fmt, ...) {} 55 56static inline void gk20a_debug_dump(struct gk20a *g) {} 57static inline void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o) {} 58static inline int gk20a_gr_debug_dump(struct gk20a *g) { return 0;} 59static inline void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink) {} 60static inline void gk20a_debug_deinit(struct gk20a *g) {} 61#endif 62 63#endif /* NVGPU_DEBUG_H */
diff --git a/include/nvgpu/defaults.h b/include/nvgpu/defaults.h
deleted file mode 100644
index cae380a..0000000
--- a/include/nvgpu/defaults.h
+++ /dev/null
@@ -1,33 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef __NVGPU_DEFAULTS_H__ 24#define __NVGPU_DEFAULTS_H__ 25 26/* 27 * Default timeout used for channel watchdog and ctxsw timeout. 28 */ 29#define NVGPU_DEFAULT_GR_IDLE_TIMEOUT 3000 30 31#define NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT 500 32 33#endif
diff --git a/include/nvgpu/dma.h b/include/nvgpu/dma.h
deleted file mode 100644
index cbb829b..0000000
--- a/include/nvgpu/dma.h
+++ /dev/null
@@ -1,361 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_DMA_H 24#define NVGPU_DMA_H 25 26#include <nvgpu/types.h> 27 28struct gk20a; 29struct vm_gk20a; 30struct nvgpu_mem; 31 32/* 33 * Flags for the below nvgpu_dma_{alloc,alloc_map}_flags* 34 */ 35 36/* 37 * Don't create a virtual kernel mapping for the buffer but only allocate it; 38 * this may save some resources. The buffer can be mapped later explicitly. 39 */ 40#define NVGPU_DMA_NO_KERNEL_MAPPING BIT32(0) 41 42/* 43 * Don't allow building the buffer from individual pages but require a 44 * physically contiguous block. 45 */ 46#define NVGPU_DMA_FORCE_CONTIGUOUS BIT32(1) 47 48/* 49 * Make the mapping read-only. 50 */ 51#define NVGPU_DMA_READ_ONLY BIT32(2) 52 53/** 54 * nvgpu_iommuable - Check if GPU is behind IOMMU 55 * 56 * @g - The GPU. 57 * 58 * Returns true if the passed GPU is behind an IOMMU; false otherwise. If the 59 * GPU is iommuable then the DMA address in nvgpu_mem_sgl is valid. 60 * 61 * Note that even if a GPU is behind an IOMMU that does not necessarily mean the 62 * GPU _must_ use DMA addresses. GPUs may still use physical addresses if it 63 * makes sense. 64 */ 65bool nvgpu_iommuable(struct gk20a *g); 66 67/** 68 * nvgpu_dma_alloc - Allocate DMA memory 69 * 70 * @g - The GPU. 71 * @size - Size of the allocation in bytes. 72 * @mem - Struct for storing the allocation information. 73 * 74 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 75 * Returns 0 on success and a suitable error code when there's an error. This 76 * memory can be either placed in VIDMEM or SYSMEM, which ever is more 77 * convenient for the driver. 78 */ 79int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem); 80 81/** 82 * nvgpu_dma_alloc_flags - Allocate DMA memory 83 * 84 * @g - The GPU. 85 * @flags - Flags modifying the operation of the DMA allocation. 86 * @size - Size of the allocation in bytes. 87 * @mem - Struct for storing the allocation information. 88 * 89 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 90 * Returns 0 on success and a suitable error code when there's an error. This 91 * memory can be either placed in VIDMEM or SYSMEM, which ever is more 92 * convenient for the driver. 93 * 94 * The following flags are accepted: 95 * 96 * %NVGPU_DMA_NO_KERNEL_MAPPING 97 * %NVGPU_DMA_FORCE_CONTIGUOUS 98 * %NVGPU_DMA_READ_ONLY 99 */ 100int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, 101 struct nvgpu_mem *mem); 102 103/** 104 * nvgpu_dma_alloc_sys - Allocate DMA memory 105 * 106 * @g - The GPU. 107 * @size - Size of the allocation in bytes. 108 * @mem - Struct for storing the allocation information. 109 * 110 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 111 * Returns 0 on success and a suitable error code when there's an error. This 112 * allocates memory specifically in SYSMEM. 113 */ 114int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem); 115 116/** 117 * nvgpu_dma_alloc_flags_sys - Allocate DMA memory 118 * 119 * @g - The GPU. 120 * @flags - Flags modifying the operation of the DMA allocation. 121 * @size - Size of the allocation in bytes. 122 * @mem - Struct for storing the allocation information. 123 * 124 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 125 * Returns 0 on success and a suitable error code when there's an error. This 126 * allocates memory specifically in SYSMEM. 127 * 128 * The following flags are accepted: 129 * 130 * %NVGPU_DMA_NO_KERNEL_MAPPING 131 * %NVGPU_DMA_FORCE_CONTIGUOUS 132 * %NVGPU_DMA_READ_ONLY 133 */ 134int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, 135 size_t size, struct nvgpu_mem *mem); 136 137/** 138 * nvgpu_dma_alloc_vid - Allocate DMA memory 139 * 140 * @g - The GPU. 141 * @size - Size of the allocation in bytes. 142 * @mem - Struct for storing the allocation information. 143 * 144 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 145 * Returns 0 on success and a suitable error code when there's an error. This 146 * allocates memory specifically in VIDMEM. 147 */ 148int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem); 149 150/** 151 * nvgpu_dma_alloc_flags_vid - Allocate DMA memory 152 * 153 * @g - The GPU. 154 * @flags - Flags modifying the operation of the DMA allocation. 155 * @size - Size of the allocation in bytes. 156 * @mem - Struct for storing the allocation information. 157 * 158 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 159 * Returns 0 on success and a suitable error code when there's an error. This 160 * allocates memory specifically in VIDMEM. 161 * 162 * Only the following flags are accepted: 163 * 164 * %NVGPU_DMA_NO_KERNEL_MAPPING 165 * 166 */ 167int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags, 168 size_t size, struct nvgpu_mem *mem); 169 170 171/** 172 * nvgpu_dma_alloc_flags_vid_at - Allocate DMA memory 173 * 174 * @g - The GPU. 175 * @size - Size of the allocation in bytes. 176 * @mem - Struct for storing the allocation information. 177 * @at - A specific location to attempt to allocate memory from or 0 if the 178 * caller does not care what the address is. 179 * 180 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 181 * Returns 0 on success and a suitable error code when there's an error. This 182 * allocates memory specifically in VIDMEM. 183 * 184 */ 185int nvgpu_dma_alloc_vid_at(struct gk20a *g, 186 size_t size, struct nvgpu_mem *mem, u64 at); 187 188/** 189 * nvgpu_dma_alloc_flags_vid_at - Allocate DMA memory 190 * 191 * @g - The GPU. 192 * @flags - Flags modifying the operation of the DMA allocation. 193 * @size - Size of the allocation in bytes. 194 * @mem - Struct for storing the allocation information. 195 * @at - A specific location to attempt to allocate memory from or 0 if the 196 * caller does not care what the address is. 197 * 198 * Allocate memory suitable for doing DMA. Store the allocation info in @mem. 199 * Returns 0 on success and a suitable error code when there's an error. This 200 * allocates memory specifically in VIDMEM. 201 * 202 * Only the following flags are accepted: 203 * 204 * %NVGPU_DMA_NO_KERNEL_MAPPING 205 */ 206int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, 207 size_t size, struct nvgpu_mem *mem, u64 at); 208 209/** 210 * nvgpu_dma_free - Free a DMA allocation 211 * 212 * @g - The GPU. 213 * @mem - An allocation to free. 214 * 215 * Free memory created with any of: 216 * 217 * nvgpu_dma_alloc() 218 * nvgpu_dma_alloc_flags() 219 * nvgpu_dma_alloc_sys() 220 * nvgpu_dma_alloc_flags_sys() 221 * nvgpu_dma_alloc_vid() 222 * nvgpu_dma_alloc_flags_vid() 223 * nvgpu_dma_alloc_flags_vid_at() 224 */ 225void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem); 226 227/** 228 * nvgpu_dma_alloc_map - Allocate DMA memory and map into GMMU. 229 * 230 * @vm - VM context for GMMU mapping. 231 * @size - Size of the allocation in bytes. 232 * @mem - Struct for storing the allocation information. 233 * 234 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 235 * Note this is different than mapping it into the CPU. This memory can be 236 * either placed in VIDMEM or SYSMEM, which ever is more convenient for the 237 * driver. 238 * 239 * Note: currently a bug exists in the nvgpu_dma_alloc_map*() routines: you 240 * cannot use nvgpu_gmmu_map() on said buffer - it will overwrite the necessary 241 * information for the DMA unmap routines to actually unmap the buffer. You 242 * will either leak mappings or see GMMU faults. 243 */ 244int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, 245 struct nvgpu_mem *mem); 246 247/** 248 * nvgpu_dma_alloc_map_flags - Allocate DMA memory and map into GMMU. 249 * 250 * @vm - VM context for GMMU mapping. 251 * @flags - Flags modifying the operation of the DMA allocation. 252 * @size - Size of the allocation in bytes. 253 * @mem - Struct for storing the allocation information. 254 * 255 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 256 * Note this is different than mapping it into the CPU. This memory can be 257 * either placed in VIDMEM or SYSMEM, which ever is more convenient for the 258 * driver. 259 * 260 * This version passes @flags on to the underlying DMA allocation. The accepted 261 * flags are: 262 * 263 * %NVGPU_DMA_NO_KERNEL_MAPPING 264 * %NVGPU_DMA_FORCE_CONTIGUOUS 265 * %NVGPU_DMA_READ_ONLY 266 */ 267int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, 268 size_t size, struct nvgpu_mem *mem); 269 270/** 271 * nvgpu_dma_alloc_map_sys - Allocate DMA memory and map into GMMU. 272 * 273 * @vm - VM context for GMMU mapping. 274 * @size - Size of the allocation in bytes. 275 * @mem - Struct for storing the allocation information. 276 * 277 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 278 * This memory will be placed in SYSMEM. 279 */ 280int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size, 281 struct nvgpu_mem *mem); 282 283/** 284 * nvgpu_dma_alloc_map_flags_sys - Allocate DMA memory and map into GMMU. 285 * 286 * @vm - VM context for GMMU mapping. 287 * @flags - Flags modifying the operation of the DMA allocation. 288 * @size - Size of the allocation in bytes. 289 * @mem - Struct for storing the allocation information. 290 * 291 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 292 * This memory will be placed in SYSMEM. 293 * 294 * This version passes @flags on to the underlying DMA allocation. The accepted 295 * flags are: 296 * 297 * %NVGPU_DMA_NO_KERNEL_MAPPING 298 * %NVGPU_DMA_FORCE_CONTIGUOUS 299 * %NVGPU_DMA_READ_ONLY 300 */ 301int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, 302 size_t size, struct nvgpu_mem *mem); 303 304/** 305 * nvgpu_dma_alloc_map_vid - Allocate DMA memory and map into GMMU. 306 * 307 * @vm - VM context for GMMU mapping. 308 * @size - Size of the allocation in bytes. 309 * @mem - Struct for storing the allocation information. 310 * 311 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 312 * This memory will be placed in VIDMEM. 313 */ 314int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size, 315 struct nvgpu_mem *mem); 316 317/** 318 * nvgpu_dma_alloc_map_flags_vid - Allocate DMA memory and map into GMMU. 319 * 320 * @vm - VM context for GMMU mapping. 321 * @flags - Flags modifying the operation of the DMA allocation. 322 * @size - Size of the allocation in bytes. 323 * @mem - Struct for storing the allocation information. 324 * 325 * Allocate memory suitable for doing DMA and map that memory into the GMMU. 326 * This memory will be placed in VIDMEM. 327 * 328 * This version passes @flags on to the underlying DMA allocation. The accepted 329 * flags are: 330 * 331 * %NVGPU_DMA_NO_KERNEL_MAPPING 332 * %NVGPU_DMA_FORCE_CONTIGUOUS 333 * %NVGPU_DMA_READ_ONLY 334 */ 335int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, 336 size_t size, struct nvgpu_mem *mem); 337 338/** 339 * nvgpu_dma_unmap_free - Free a DMA allocation 340 * 341 * @g - The GPU. 342 * @mem - An allocation to free. 343 * 344 * Free memory created with any of: 345 * 346 * nvgpu_dma_alloc_map() 347 * nvgpu_dma_alloc_map_flags() 348 * nvgpu_dma_alloc_map_sys() 349 * nvgpu_dma_alloc_map_flags_sys() 350 * nvgpu_dma_alloc_map_vid() 351 * nvgpu_dma_alloc_map_flags_vid() 352 */ 353void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem); 354 355/* 356 * Don't use these directly. Instead use nvgpu_dma_free(). 357 */ 358void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem); 359void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem); 360 361#endif /* NVGPU_DMA_H */
diff --git a/include/nvgpu/dt.h b/include/nvgpu/dt.h
deleted file mode 100644
index b5fdbfc..0000000
--- a/include/nvgpu/dt.h
+++ /dev/null
@@ -1,28 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <nvgpu/types.h> 24 25struct gk20a; 26 27int nvgpu_dt_read_u32_index(struct gk20a *g, const char *name, 28 u32 index, u32 *value);
diff --git a/include/nvgpu/ecc.h b/include/nvgpu/ecc.h
deleted file mode 100644
index 9b211ef..0000000
--- a/include/nvgpu/ecc.h
+++ /dev/null
@@ -1,162 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_ECC_H 24#define NVGPU_ECC_H 25 26#include <nvgpu/types.h> 27#include <nvgpu/list.h> 28 29#define NVGPU_ECC_STAT_NAME_MAX_SIZE 100 30 31struct gk20a; 32 33struct nvgpu_ecc_stat { 34 char name[NVGPU_ECC_STAT_NAME_MAX_SIZE]; 35 u32 counter; 36 struct nvgpu_list_node node; 37}; 38 39static inline struct nvgpu_ecc_stat *nvgpu_ecc_stat_from_node( 40 struct nvgpu_list_node *node) 41{ 42 return (struct nvgpu_ecc_stat *)( 43 (uintptr_t)node - offsetof(struct nvgpu_ecc_stat, node) 44 ); 45} 46 47struct nvgpu_ecc { 48 struct { 49 /* stats per tpc */ 50 51 struct nvgpu_ecc_stat **sm_lrf_ecc_single_err_count; 52 struct nvgpu_ecc_stat **sm_lrf_ecc_double_err_count; 53 54 struct nvgpu_ecc_stat **sm_shm_ecc_sec_count; 55 struct nvgpu_ecc_stat **sm_shm_ecc_sed_count; 56 struct nvgpu_ecc_stat **sm_shm_ecc_ded_count; 57 58 struct nvgpu_ecc_stat **tex_ecc_total_sec_pipe0_count; 59 struct nvgpu_ecc_stat **tex_ecc_total_ded_pipe0_count; 60 struct nvgpu_ecc_stat **tex_unique_ecc_sec_pipe0_count; 61 struct nvgpu_ecc_stat **tex_unique_ecc_ded_pipe0_count; 62 struct nvgpu_ecc_stat **tex_ecc_total_sec_pipe1_count; 63 struct nvgpu_ecc_stat **tex_ecc_total_ded_pipe1_count; 64 struct nvgpu_ecc_stat **tex_unique_ecc_sec_pipe1_count; 65 struct nvgpu_ecc_stat **tex_unique_ecc_ded_pipe1_count; 66 67 struct nvgpu_ecc_stat **sm_l1_tag_ecc_corrected_err_count; 68 struct nvgpu_ecc_stat **sm_l1_tag_ecc_uncorrected_err_count; 69 struct nvgpu_ecc_stat **sm_cbu_ecc_corrected_err_count; 70 struct nvgpu_ecc_stat **sm_cbu_ecc_uncorrected_err_count; 71 struct nvgpu_ecc_stat **sm_l1_data_ecc_corrected_err_count; 72 struct nvgpu_ecc_stat **sm_l1_data_ecc_uncorrected_err_count; 73 struct nvgpu_ecc_stat **sm_icache_ecc_corrected_err_count; 74 struct nvgpu_ecc_stat **sm_icache_ecc_uncorrected_err_count; 75 76 /* stats per gpc */ 77 78 struct nvgpu_ecc_stat *gcc_l15_ecc_corrected_err_count; 79 struct nvgpu_ecc_stat *gcc_l15_ecc_uncorrected_err_count; 80 81 struct nvgpu_ecc_stat *gpccs_ecc_corrected_err_count; 82 struct nvgpu_ecc_stat *gpccs_ecc_uncorrected_err_count; 83 struct nvgpu_ecc_stat *mmu_l1tlb_ecc_corrected_err_count; 84 struct nvgpu_ecc_stat *mmu_l1tlb_ecc_uncorrected_err_count; 85 86 /* stats per device */ 87 struct nvgpu_ecc_stat *fecs_ecc_corrected_err_count; 88 struct nvgpu_ecc_stat *fecs_ecc_uncorrected_err_count; 89 } gr; 90 91 struct { 92 /* stats per lts */ 93 struct nvgpu_ecc_stat **ecc_sec_count; 94 struct nvgpu_ecc_stat **ecc_ded_count; 95 } ltc; 96 97 struct { 98 /* stats per device */ 99 struct nvgpu_ecc_stat *mmu_l2tlb_ecc_corrected_err_count; 100 struct nvgpu_ecc_stat *mmu_l2tlb_ecc_uncorrected_err_count; 101 struct nvgpu_ecc_stat *mmu_hubtlb_ecc_corrected_err_count; 102 struct nvgpu_ecc_stat *mmu_hubtlb_ecc_uncorrected_err_count; 103 struct nvgpu_ecc_stat *mmu_fillunit_ecc_corrected_err_count; 104 struct nvgpu_ecc_stat *mmu_fillunit_ecc_uncorrected_err_count; 105 } fb; 106 107 struct { 108 /* stats per device */ 109 struct nvgpu_ecc_stat *pmu_ecc_corrected_err_count; 110 struct nvgpu_ecc_stat *pmu_ecc_uncorrected_err_count; 111 } pmu; 112 113 struct { 114 /* stats per fbpa */ 115 struct nvgpu_ecc_stat *fbpa_ecc_sec_err_count; 116 struct nvgpu_ecc_stat *fbpa_ecc_ded_err_count; 117 } fbpa; 118 119 struct nvgpu_list_node stats_list; 120 int stats_count; 121}; 122 123int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g, 124 struct nvgpu_ecc_stat ***stat, const char *name); 125#define NVGPU_ECC_COUNTER_INIT_PER_TPC(stat) \ 126 nvgpu_ecc_counter_init_per_tpc(g, &g->ecc.gr.stat, #stat) 127 128int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g, 129 struct nvgpu_ecc_stat **stat, const char *name); 130#define NVGPU_ECC_COUNTER_INIT_PER_GPC(stat) \ 131 nvgpu_ecc_counter_init_per_gpc(g, &g->ecc.gr.stat, #stat) 132 133int nvgpu_ecc_counter_init(struct gk20a *g, 134 struct nvgpu_ecc_stat **stat, const char *name); 135#define NVGPU_ECC_COUNTER_INIT_GR(stat) \ 136 nvgpu_ecc_counter_init(g, &g->ecc.gr.stat, #stat) 137#define NVGPU_ECC_COUNTER_INIT_FB(stat) \ 138 nvgpu_ecc_counter_init(g, &g->ecc.fb.stat, #stat) 139#define NVGPU_ECC_COUNTER_INIT_PMU(stat) \ 140 nvgpu_ecc_counter_init(g, &g->ecc.pmu.stat, #stat) 141 142int nvgpu_ecc_counter_init_per_lts(struct gk20a *g, 143 struct nvgpu_ecc_stat ***stat, const char *name); 144#define NVGPU_ECC_COUNTER_INIT_PER_LTS(stat) \ 145 nvgpu_ecc_counter_init_per_lts(g, &g->ecc.ltc.stat, #stat) 146 147int nvgpu_ecc_counter_init_per_fbpa(struct gk20a *g, 148 struct nvgpu_ecc_stat **stat, const char *name); 149#define NVGPU_ECC_COUNTER_INIT_PER_FBPA(stat) \ 150 nvgpu_ecc_counter_init_per_fbpa(g, &g->ecc.fbpa.stat, #stat) 151 152void nvgpu_ecc_free(struct gk20a *g); 153 154int nvgpu_ecc_init_support(struct gk20a *g); 155void nvgpu_ecc_remove_support(struct gk20a *g); 156 157/* OSes to implement */ 158 159int nvgpu_ecc_sysfs_init(struct gk20a *g); 160void nvgpu_ecc_sysfs_remove(struct gk20a *g); 161 162#endif
diff --git a/include/nvgpu/enabled.h b/include/nvgpu/enabled.h
deleted file mode 100644
index 51e9358..0000000
--- a/include/nvgpu/enabled.h
+++ /dev/null
@@ -1,221 +0,0 @@ 1/* 2 * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_ENABLED_H 24#define NVGPU_ENABLED_H 25 26struct gk20a; 27 28#include <nvgpu/types.h> 29 30/* 31 * Available flags that describe what's enabled and what's not in the GPU. Each 32 * flag here is defined by it's offset in a bitmap. 33 */ 34#define NVGPU_IS_FMODEL 1 35#define NVGPU_DRIVER_IS_DYING 2 36#define NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP 3 37#define NVGPU_FECS_TRACE_VA 4 38#define NVGPU_CAN_RAILGATE 5 39#define NVGPU_KERNEL_IS_DYING 6 40#define NVGPU_FECS_TRACE_FEATURE_CONTROL 7 41 42/* 43 * ECC flags 44 */ 45/* SM LRF ECC is enabled */ 46#define NVGPU_ECC_ENABLED_SM_LRF 8 47/* SM SHM ECC is enabled */ 48#define NVGPU_ECC_ENABLED_SM_SHM 9 49/* TEX ECC is enabled */ 50#define NVGPU_ECC_ENABLED_TEX 10 51/* L2 ECC is enabled */ 52#define NVGPU_ECC_ENABLED_LTC 11 53/* SM L1 DATA ECC is enabled */ 54#define NVGPU_ECC_ENABLED_SM_L1_DATA 12 55/* SM L1 TAG ECC is enabled */ 56#define NVGPU_ECC_ENABLED_SM_L1_TAG 13 57/* SM CBU ECC is enabled */ 58#define NVGPU_ECC_ENABLED_SM_CBU 14 59/* SM ICAHE ECC is enabled */ 60#define NVGPU_ECC_ENABLED_SM_ICACHE 15 61 62/* 63 * MM flags. 64 */ 65#define NVGPU_MM_UNIFY_ADDRESS_SPACES 16 66/* false if vidmem aperture actually points to sysmem */ 67#define NVGPU_MM_HONORS_APERTURE 17 68/* unified or split memory with separate vidmem? */ 69#define NVGPU_MM_UNIFIED_MEMORY 18 70/* User-space managed address spaces support */ 71#define NVGPU_SUPPORT_USERSPACE_MANAGED_AS 20 72/* IO coherence support is available */ 73#define NVGPU_SUPPORT_IO_COHERENCE 21 74/* MAP_BUFFER_EX with partial mappings */ 75#define NVGPU_SUPPORT_PARTIAL_MAPPINGS 22 76/* MAP_BUFFER_EX with sparse allocations */ 77#define NVGPU_SUPPORT_SPARSE_ALLOCS 23 78/* Direct PTE kind control is supported (map_buffer_ex) */ 79#define NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL 24 80/* Support batch mapping */ 81#define NVGPU_SUPPORT_MAP_BUFFER_BATCH 25 82/* Use coherent aperture for sysmem. */ 83#define NVGPU_USE_COHERENT_SYSMEM 26 84/* Use physical scatter tables instead of IOMMU */ 85#define NVGPU_MM_USE_PHYSICAL_SG 27 86/* WAR for gm20b chips. */ 87#define NVGPU_MM_FORCE_128K_PMU_VM 28 88/* SW ERRATA to disable L3 alloc Bit of the physical address. 89 * Bit number varies between SOCs. 90 * E.g. 64GB physical RAM support for gv11b requires this SW errata 91 * to be enabled. 92 */ 93#define NVGPU_DISABLE_L3_SUPPORT 29 94/* 95 * Host flags 96 */ 97#define NVGPU_HAS_SYNCPOINTS 30 98/* sync fence FDs are available in, e.g., submit_gpfifo */ 99#define NVGPU_SUPPORT_SYNC_FENCE_FDS 31 100/* NVGPU_DBG_GPU_IOCTL_CYCLE_STATS is available */ 101#define NVGPU_SUPPORT_CYCLE_STATS 32 102/* NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT is available */ 103#define NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT 33 104/* Both gpu driver and device support TSG */ 105#define NVGPU_SUPPORT_TSG 34 106/* Fast deterministic submits with no job tracking are supported */ 107#define NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_NO_JOBTRACKING 35 108/* Deterministic submits are supported even with job tracking */ 109#define NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL 36 110/* NVGPU_IOCTL_CHANNEL_RESCHEDULE_RUNLIST is available */ 111#define NVGPU_SUPPORT_RESCHEDULE_RUNLIST 37 112 113/* NVGPU_GPU_IOCTL_GET_EVENT_FD is available */ 114#define NVGPU_SUPPORT_DEVICE_EVENTS 38 115/* FECS context switch tracing is available */ 116#define NVGPU_SUPPORT_FECS_CTXSW_TRACE 39 117 118/* NVGPU_GPU_IOCTL_SET_DETERMINISTIC_OPTS is available */ 119#define NVGPU_SUPPORT_DETERMINISTIC_OPTS 40 120 121/* 122 * Security flags 123 */ 124 125#define NVGPU_SEC_SECUREGPCCS 41 126#define NVGPU_SEC_PRIVSECURITY 42 127/* VPR is supported */ 128#define NVGPU_SUPPORT_VPR 43 129 130/* 131 * Nvlink flags 132 */ 133 134#define NVGPU_SUPPORT_NVLINK 45 135/* 136 * PMU flags. 137 */ 138/* perfmon enabled or disabled for PMU */ 139#define NVGPU_PMU_PERFMON 48 140#define NVGPU_PMU_PSTATE 49 141#define NVGPU_PMU_ZBC_SAVE 50 142#define NVGPU_PMU_FECS_BOOTSTRAP_DONE 51 143#define NVGPU_GPU_CAN_BLCG 52 144#define NVGPU_GPU_CAN_SLCG 53 145#define NVGPU_GPU_CAN_ELCG 54 146/* Clock control support */ 147#define NVGPU_SUPPORT_CLOCK_CONTROLS 55 148/* NVGPU_GPU_IOCTL_GET_VOLTAGE is available */ 149#define NVGPU_SUPPORT_GET_VOLTAGE 56 150/* NVGPU_GPU_IOCTL_GET_CURRENT is available */ 151#define NVGPU_SUPPORT_GET_CURRENT 57 152/* NVGPU_GPU_IOCTL_GET_POWER is available */ 153#define NVGPU_SUPPORT_GET_POWER 58 154/* NVGPU_GPU_IOCTL_GET_TEMPERATURE is available */ 155#define NVGPU_SUPPORT_GET_TEMPERATURE 59 156/* NVGPU_GPU_IOCTL_SET_THERM_ALERT_LIMIT is available */ 157#define NVGPU_SUPPORT_SET_THERM_ALERT_LIMIT 60 158 159/* whether to run PREOS binary on dGPUs */ 160#define NVGPU_PMU_RUN_PREOS 61 161 162/* set if ASPM is enabled; only makes sense for PCI */ 163#define NVGPU_SUPPORT_ASPM 62 164/* subcontexts are available */ 165#define NVGPU_SUPPORT_TSG_SUBCONTEXTS 63 166/* Simultaneous Compute and Graphics (SCG) is available */ 167#define NVGPU_SUPPORT_SCG 64 168 169/* GPU_VA address of a syncpoint is supported */ 170#define NVGPU_SUPPORT_SYNCPOINT_ADDRESS 65 171/* Allocating per-channel syncpoint in user space is supported */ 172#define NVGPU_SUPPORT_USER_SYNCPOINT 66 173 174/* USERMODE enable bit */ 175#define NVGPU_SUPPORT_USERMODE_SUBMIT 67 176 177/* Multiple WPR support */ 178#define NVGPU_SUPPORT_MULTIPLE_WPR 68 179 180/* SEC2 RTOS support*/ 181#define NVGPU_SUPPORT_SEC2_RTOS 69 182 183/* NVGPU_GPU_IOCTL_GET_GPU_LOAD is available */ 184#define NVGPU_SUPPORT_GET_GPU_LOAD 70 185 186/* PLATFORM_ATOMIC support */ 187#define NVGPU_SUPPORT_PLATFORM_ATOMIC 71 188 189/* NVGPU_GPU_IOCTL_SET_MMU_DEBUG_MODE is available */ 190#define NVGPU_SUPPORT_SET_CTX_MMU_DEBUG_MODE 72 191 192/* 193 * Must be greater than the largest bit offset in the above list. 194 */ 195#define NVGPU_MAX_ENABLED_BITS 73U 196 197/** 198 * nvgpu_is_enabled - Check if the passed flag is enabled. 199 * 200 * @g - The GPU. 201 * @flag - Which flag to check. 202 * 203 * Returns true if the passed @flag is true; false otherwise. 204 */ 205bool nvgpu_is_enabled(struct gk20a *g, int flag); 206 207/** 208 * __nvgpu_set_enabled - Set the state of a flag. 209 * 210 * @g - The GPU. 211 * @flag - Which flag to modify. 212 * @state - The state to set the flag to. 213 * 214 * Set the state of the passed @flag to @state. 215 */ 216void __nvgpu_set_enabled(struct gk20a *g, int flag, bool state); 217 218int nvgpu_init_enabled_flags(struct gk20a *g); 219void nvgpu_free_enabled_flags(struct gk20a *g); 220 221#endif /* NVGPU_ENABLED_H */
diff --git a/include/nvgpu/errno.h b/include/nvgpu/errno.h
deleted file mode 100644
index 7e8b110..0000000
--- a/include/nvgpu/errno.h
+++ /dev/null
@@ -1,41 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_ERRNO_H 24#define NVGPU_ERRNO_H 25 26/* 27 * Explicit include to get all the -E* error messages. Useful for header files 28 * with static inlines that return error messages. In actual C code normally 29 * enough Linux/QNX headers bleed in to get the error messages but header files 30 * with sparse includes do not have this luxury. 31 */ 32 33#ifdef __KERNEL__ 34#include <linux/errno.h> 35#endif 36 37/* 38 * TODO: add else path above for QNX. 39 */ 40 41#endif /* NVGPU_ERRNO_H */
diff --git a/include/nvgpu/error_notifier.h b/include/nvgpu/error_notifier.h
deleted file mode 100644
index 7ba01e9..0000000
--- a/include/nvgpu/error_notifier.h
+++ /dev/null
@@ -1,49 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_ERROR_NOTIFIER_H 24#define NVGPU_ERROR_NOTIFIER_H 25 26#include <nvgpu/types.h> 27 28struct channel_gk20a; 29 30enum { 31 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT = 0, 32 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_METHOD, 33 NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY, 34 NVGPU_ERR_NOTIFIER_GR_EXCEPTION, 35 NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT, 36 NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY, 37 NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT, 38 NVGPU_ERR_NOTIFIER_PBDMA_ERROR, 39 NVGPU_ERR_NOTIFIER_FECS_ERR_UNIMP_FIRMWARE_METHOD, 40 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, 41 NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH, 42}; 43 44void nvgpu_set_error_notifier_locked(struct channel_gk20a *ch, u32 error); 45void nvgpu_set_error_notifier(struct channel_gk20a *ch, u32 error); 46void nvgpu_set_error_notifier_if_empty(struct channel_gk20a *ch, u32 error); 47bool nvgpu_is_error_notifier_set(struct channel_gk20a *ch, u32 error_notifier); 48 49#endif /* NVGPU_ERROR_NOTIFIER_H */
diff --git a/include/nvgpu/falcon.h b/include/nvgpu/falcon.h
deleted file mode 100644
index 4fc97ee..0000000
--- a/include/nvgpu/falcon.h
+++ /dev/null
@@ -1,335 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_FALCON_H 24#define NVGPU_FALCON_H 25 26#include <nvgpu/types.h> 27#include <nvgpu/lock.h> 28 29/* 30 * Falcon Id Defines 31 */ 32#define FALCON_ID_PMU (0U) 33#define FALCON_ID_GSPLITE (1U) 34#define FALCON_ID_FECS (2U) 35#define FALCON_ID_GPCCS (3U) 36#define FALCON_ID_NVDEC (4U) 37#define FALCON_ID_SEC2 (7U) 38#define FALCON_ID_MINION (10U) 39 40/* 41 * Falcon Base address Defines 42 */ 43#define FALCON_NVDEC_BASE 0x00084000 44#define FALCON_PWR_BASE 0x0010a000 45#define FALCON_SEC_BASE 0x00087000 46#define FALCON_FECS_BASE 0x00409000 47#define FALCON_GPCCS_BASE 0x0041a000 48 49/* Falcon Register index */ 50#define FALCON_REG_R0 (0) 51#define FALCON_REG_R1 (1) 52#define FALCON_REG_R2 (2) 53#define FALCON_REG_R3 (3) 54#define FALCON_REG_R4 (4) 55#define FALCON_REG_R5 (5) 56#define FALCON_REG_R6 (6) 57#define FALCON_REG_R7 (7) 58#define FALCON_REG_R8 (8) 59#define FALCON_REG_R9 (9) 60#define FALCON_REG_R10 (10) 61#define FALCON_REG_R11 (11) 62#define FALCON_REG_R12 (12) 63#define FALCON_REG_R13 (13) 64#define FALCON_REG_R14 (14) 65#define FALCON_REG_R15 (15) 66#define FALCON_REG_IV0 (16) 67#define FALCON_REG_IV1 (17) 68#define FALCON_REG_UNDEFINED (18) 69#define FALCON_REG_EV (19) 70#define FALCON_REG_SP (20) 71#define FALCON_REG_PC (21) 72#define FALCON_REG_IMB (22) 73#define FALCON_REG_DMB (23) 74#define FALCON_REG_CSW (24) 75#define FALCON_REG_CCR (25) 76#define FALCON_REG_SEC (26) 77#define FALCON_REG_CTX (27) 78#define FALCON_REG_EXCI (28) 79#define FALCON_REG_RSVD0 (29) 80#define FALCON_REG_RSVD1 (30) 81#define FALCON_REG_RSVD2 (31) 82#define FALCON_REG_SIZE (32) 83 84#define FALCON_MAILBOX_0 0x0 85#define FALCON_MAILBOX_1 0x1 86#define FALCON_MAILBOX_COUNT 0x02 87#define FALCON_BLOCK_SIZE 0x100U 88 89#define GET_IMEM_TAG(IMEM_ADDR) (IMEM_ADDR >> 8) 90 91#define GET_NEXT_BLOCK(ADDR) \ 92 ((((ADDR + (FALCON_BLOCK_SIZE - 1)) & ~(FALCON_BLOCK_SIZE-1)) \ 93 / FALCON_BLOCK_SIZE) << 8) 94 95/* 96 * Falcon HWCFG request read types defines 97 */ 98enum flcn_hwcfg_read { 99 FALCON_IMEM_SIZE = 0, 100 FALCON_DMEM_SIZE, 101 FALCON_CORE_REV, 102 FALCON_SECURITY_MODEL, 103 FLACON_MAILBOX_COUNT 104}; 105 106/* 107 * Falcon HWCFG request write types defines 108 */ 109enum flcn_hwcfg_write { 110 FALCON_STARTCPU = 0, 111 FALCON_STARTCPU_SECURE, 112 FALCON_BOOTVEC, 113 FALCON_ITF_EN 114}; 115 116#define FALCON_MEM_SCRUBBING_TIMEOUT_MAX 1000 117#define FALCON_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 118 119enum flcn_dma_dir { 120 DMA_TO_FB = 0, 121 DMA_FROM_FB 122}; 123 124enum flcn_mem_type { 125 MEM_DMEM = 0, 126 MEM_IMEM 127}; 128 129/* Falcon ucode header format 130 * OS Code Offset 131 * OS Code Size 132 * OS Data Offset 133 * OS Data Size 134 * NumApps (N) 135 * App 0 Code Offset 136 * App 0 Code Size 137 * . . . . 138 * App N - 1 Code Offset 139 * App N - 1 Code Size 140 * App 0 Data Offset 141 * App 0 Data Size 142 * . . . . 143 * App N - 1 Data Offset 144 * App N - 1 Data Size 145 * OS Ovl Offset 146 * OS Ovl Size 147*/ 148#define OS_CODE_OFFSET 0x0 149#define OS_CODE_SIZE 0x1 150#define OS_DATA_OFFSET 0x2 151#define OS_DATA_SIZE 0x3 152#define NUM_APPS 0x4 153#define APP_0_CODE_OFFSET 0x5 154#define APP_0_CODE_SIZE 0x6 155 156struct nvgpu_falcon_dma_info { 157 u32 fb_base; 158 u32 fb_off; 159 u32 flcn_mem_off; 160 u32 size_in_bytes; 161 enum flcn_dma_dir dir; 162 u32 ctx_dma; 163 enum flcn_mem_type flcn_mem; 164 u32 is_wait_complete; 165}; 166 167struct gk20a; 168struct nvgpu_falcon; 169struct nvgpu_falcon_bl_info; 170 171/* Queue Type */ 172#define QUEUE_TYPE_DMEM 0x0U 173#define QUEUE_TYPE_EMEM 0x1U 174 175struct nvgpu_falcon_queue { 176 177 /* Queue Type (queue_type) */ 178 u8 queue_type; 179 180 /* used by nvgpu, for command LPQ/HPQ */ 181 struct nvgpu_mutex mutex; 182 183 /* current write position */ 184 u32 position; 185 /* physical dmem offset where this queue begins */ 186 u32 offset; 187 /* logical queue identifier */ 188 u32 id; 189 /* physical queue index */ 190 u32 index; 191 /* in bytes */ 192 u32 size; 193 /* open-flag */ 194 u32 oflag; 195 196 /* queue type(DMEM-Q/FB-Q) specific ops */ 197 int (*rewind)(struct nvgpu_falcon *flcn, 198 struct nvgpu_falcon_queue *queue); 199 int (*pop)(struct nvgpu_falcon *flcn, 200 struct nvgpu_falcon_queue *queue, void *data, u32 size, 201 u32 *bytes_read); 202 int (*push)(struct nvgpu_falcon *flcn, 203 struct nvgpu_falcon_queue *queue, void *data, u32 size); 204 bool (*has_room)(struct nvgpu_falcon *flcn, 205 struct nvgpu_falcon_queue *queue, u32 size, 206 bool *need_rewind); 207 int (*tail)(struct nvgpu_falcon *flcn, 208 struct nvgpu_falcon_queue *queue, u32 *tail, bool set); 209 int (*head)(struct nvgpu_falcon *flcn, 210 struct nvgpu_falcon_queue *queue, u32 *head, bool set); 211}; 212 213struct nvgpu_falcon_version_ops { 214 void (*start_cpu_secure)(struct nvgpu_falcon *flcn); 215 void (*write_dmatrfbase)(struct nvgpu_falcon *flcn, u32 addr); 216}; 217 218/* ops which are falcon engine specific */ 219struct nvgpu_falcon_engine_dependency_ops { 220 int (*reset_eng)(struct gk20a *g); 221 int (*queue_head)(struct gk20a *g, struct nvgpu_falcon_queue *queue, 222 u32 *head, bool set); 223 int (*queue_tail)(struct gk20a *g, struct nvgpu_falcon_queue *queue, 224 u32 *tail, bool set); 225 void (*msgq_tail)(struct gk20a *g, u32 *tail, bool set); 226 int (*copy_from_emem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst, 227 u32 size, u8 port); 228 int (*copy_to_emem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src, 229 u32 size, u8 port); 230}; 231 232struct nvgpu_falcon_ops { 233 int (*reset)(struct nvgpu_falcon *flcn); 234 void (*set_irq)(struct nvgpu_falcon *flcn, bool enable); 235 bool (*clear_halt_interrupt_status)(struct nvgpu_falcon *flcn); 236 bool (*is_falcon_cpu_halted)(struct nvgpu_falcon *flcn); 237 bool (*is_falcon_idle)(struct nvgpu_falcon *flcn); 238 bool (*is_falcon_scrubbing_done)(struct nvgpu_falcon *flcn); 239 int (*copy_from_dmem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst, 240 u32 size, u8 port); 241 int (*copy_to_dmem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src, 242 u32 size, u8 port); 243 int (*copy_from_imem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst, 244 u32 size, u8 port); 245 int (*copy_to_imem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src, 246 u32 size, u8 port, bool sec, u32 tag); 247 int (*dma_copy)(struct nvgpu_falcon *flcn, 248 struct nvgpu_falcon_dma_info *dma_info); 249 u32 (*mailbox_read)(struct nvgpu_falcon *flcn, u32 mailbox_index); 250 void (*mailbox_write)(struct nvgpu_falcon *flcn, u32 mailbox_index, 251 u32 data); 252 int (*bootstrap)(struct nvgpu_falcon *flcn, u32 boot_vector); 253 void (*dump_falcon_stats)(struct nvgpu_falcon *flcn); 254 int (*bl_bootstrap)(struct nvgpu_falcon *flcn, 255 struct nvgpu_falcon_bl_info *bl_info); 256}; 257 258struct nvgpu_falcon_bl_info { 259 void *bl_src; 260 u8 *bl_desc; 261 u32 bl_desc_size; 262 u32 bl_size; 263 u32 bl_start_tag; 264}; 265 266struct nvgpu_falcon { 267 struct gk20a *g; 268 u32 flcn_id; 269 u32 flcn_base; 270 u32 flcn_core_rev; 271 bool is_falcon_supported; 272 bool is_interrupt_enabled; 273 u32 intr_mask; 274 u32 intr_dest; 275 bool isr_enabled; 276 struct nvgpu_mutex isr_mutex; 277 struct nvgpu_mutex copy_lock; 278 struct nvgpu_falcon_ops flcn_ops; 279 struct nvgpu_falcon_version_ops flcn_vops; 280 struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops; 281}; 282 283int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn); 284int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout); 285int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn, 286 unsigned int timeout); 287int nvgpu_flcn_reset(struct nvgpu_falcon *flcn); 288void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable, 289 u32 intr_mask, u32 intr_dest); 290bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn); 291int nvgpu_flcn_mem_scrub_wait(struct nvgpu_falcon *flcn); 292bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn); 293bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn); 294int nvgpu_flcn_copy_from_emem(struct nvgpu_falcon *flcn, 295 u32 src, u8 *dst, u32 size, u8 port); 296int nvgpu_flcn_copy_to_emem(struct nvgpu_falcon *flcn, 297 u32 dst, u8 *src, u32 size, u8 port); 298int nvgpu_flcn_copy_from_dmem(struct nvgpu_falcon *flcn, 299 u32 src, u8 *dst, u32 size, u8 port); 300int nvgpu_flcn_copy_to_dmem(struct nvgpu_falcon *flcn, 301 u32 dst, u8 *src, u32 size, u8 port); 302int nvgpu_flcn_copy_to_imem(struct nvgpu_falcon *flcn, 303 u32 dst, u8 *src, u32 size, u8 port, bool sec, u32 tag); 304int nvgpu_flcn_copy_from_imem(struct nvgpu_falcon *flcn, 305 u32 src, u8 *dst, u32 size, u8 port); 306int nvgpu_flcn_dma_copy(struct nvgpu_falcon *flcn, 307 struct nvgpu_falcon_dma_info *dma_info); 308u32 nvgpu_flcn_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index); 309void nvgpu_flcn_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index, 310 u32 data); 311int nvgpu_flcn_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector); 312void nvgpu_flcn_print_dmem(struct nvgpu_falcon *flcn, u32 src, u32 size); 313void nvgpu_flcn_print_imem(struct nvgpu_falcon *flcn, u32 src, u32 size); 314void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn); 315int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn, 316 struct nvgpu_falcon_bl_info *bl_info); 317 318/* queue public functions */ 319int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn, 320 struct nvgpu_falcon_queue *queue); 321bool nvgpu_flcn_queue_is_empty(struct nvgpu_falcon *flcn, 322 struct nvgpu_falcon_queue *queue); 323int nvgpu_flcn_queue_rewind(struct nvgpu_falcon *flcn, 324 struct nvgpu_falcon_queue *queue); 325int nvgpu_flcn_queue_pop(struct nvgpu_falcon *flcn, 326 struct nvgpu_falcon_queue *queue, void *data, u32 size, 327 u32 *bytes_read); 328int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn, 329 struct nvgpu_falcon_queue *queue, void *data, u32 size); 330void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn, 331 struct nvgpu_falcon_queue *queue); 332 333int nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id); 334 335#endif /* NVGPU_FALCON_H */
diff --git a/include/nvgpu/fecs_trace.h b/include/nvgpu/fecs_trace.h
deleted file mode 100644
index 5dc3530..0000000
--- a/include/nvgpu/fecs_trace.h
+++ /dev/null
@@ -1,60 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_FECS_TRACE_H 24#define NVGPU_FECS_TRACE_H 25 26struct gk20a; 27 28/* 29 * If HW circular buffer is getting too many "buffer full" conditions, 30 * increasing this constant should help (it drives Linux' internal buffer size). 31 */ 32#define GK20A_FECS_TRACE_NUM_RECORDS (1 << 10) 33#define GK20A_FECS_TRACE_HASH_BITS 8 /* 2^8 */ 34#define GK20A_FECS_TRACE_FRAME_PERIOD_US (1000000ULL/60ULL) 35#define GK20A_FECS_TRACE_PTIMER_SHIFT 5 36 37struct gk20a_fecs_trace_record { 38 u32 magic_lo; 39 u32 magic_hi; 40 u32 context_id; 41 u32 context_ptr; 42 u32 new_context_id; 43 u32 new_context_ptr; 44 u64 ts[]; 45}; 46 47#ifdef CONFIG_GK20A_CTXSW_TRACE 48u32 gk20a_fecs_trace_record_ts_tag_invalid_ts_v(void); 49u32 gk20a_fecs_trace_record_ts_tag_v(u64 ts); 50u64 gk20a_fecs_trace_record_ts_timestamp_v(u64 ts); 51int gk20a_fecs_trace_num_ts(void); 52struct gk20a_fecs_trace_record *gk20a_fecs_trace_get_record(struct gk20a *g, 53 int idx); 54bool gk20a_fecs_trace_is_valid_record(struct gk20a_fecs_trace_record *r); 55int gk20a_fecs_trace_get_read_index(struct gk20a *g); 56int gk20a_fecs_trace_get_write_index(struct gk20a *g); 57 58#endif /* CONFIG_GK20A_CTXSW_TRACE */ 59 60#endif
diff --git a/include/nvgpu/firmware.h b/include/nvgpu/firmware.h
deleted file mode 100644
index 54d6795..0000000
--- a/include/nvgpu/firmware.h
+++ /dev/null
@@ -1,74 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_FIRMWARE_H 24#define NVGPU_FIRMWARE_H 25 26#include <nvgpu/types.h> 27 28struct gk20a; 29 30#define NVGPU_REQUEST_FIRMWARE_NO_WARN (1UL << 0) 31#define NVGPU_REQUEST_FIRMWARE_NO_SOC (1UL << 1) 32 33struct nvgpu_firmware { 34 u8 *data; 35 size_t size; 36}; 37 38/** 39 * nvgpu_request_firmware - load a firmware blob from filesystem. 40 * 41 * @g The GPU driver struct for device to load firmware for 42 * @fw_name The base name of the firmware file. 43 * @flags Flags for loading; 44 * 45 * NVGPU_REQUEST_FIRMWARE_NO_WARN: Do not display warning on 46 * failed load. 47 * 48 * NVGPU_REQUEST_FIRMWARE_NO_SOC: Do not attempt loading from 49 * path <SOC_NAME>. 50 * 51 * nvgpu_request_firmware() will load firmware from: 52 * 53 * <system firmware load path>/<GPU name>/<fw_name> 54 * 55 * If that fails and NO_SOC is not enabled, it'll try next from: 56 * 57 * <system firmware load path>/<SOC name>/<fw_name> 58 * 59 * It'll allocate a nvgpu_firmware structure and initializes it and returns 60 * it to caller. 61 */ 62struct nvgpu_firmware *nvgpu_request_firmware(struct gk20a *g, 63 const char *fw_name, 64 int flags); 65 66/** 67 * nvgpu_release_firmware - free firmware and associated nvgpu_firmware blob 68 * 69 * @g The GPU driver struct for device to free firmware for 70 * @fw The firmware to free. fw blob will also be freed. 71 */ 72void nvgpu_release_firmware(struct gk20a *g, struct nvgpu_firmware *fw); 73 74#endif /* NVGPU_FIRMWARE_H */
diff --git a/include/nvgpu/flcnif_cmn.h b/include/nvgpu/flcnif_cmn.h
deleted file mode 100644
index 273da1e..0000000
--- a/include/nvgpu/flcnif_cmn.h
+++ /dev/null
@@ -1,121 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_FLCNIF_CMN_H 24#define NVGPU_FLCNIF_CMN_H 25 26#define PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED 0 27 28struct falc_u64 { 29 u32 lo; 30 u32 hi; 31}; 32 33struct falc_dma_addr { 34 u32 dma_base; 35 /* 36 * dma_base1 is 9-bit MSB for FB Base 37 * address for the transfer in FB after 38 * address using 49b FB address 39 */ 40 u16 dma_base1; 41 u8 dma_offset; 42}; 43 44struct pmu_mem_v1 { 45 u32 dma_base; 46 u8 dma_offset; 47 u8 dma_idx; 48 u16 fb_size; 49}; 50 51struct pmu_mem_desc_v0 { 52 struct falc_u64 dma_addr; 53 u16 dma_sizemax; 54 u8 dma_idx; 55}; 56 57struct pmu_dmem { 58 u16 size; 59 u32 offset; 60}; 61 62struct flcn_mem_desc_v0 { 63 struct falc_u64 address; 64 u32 params; 65}; 66 67#define nv_flcn_mem_desc flcn_mem_desc_v0 68 69struct pmu_allocation_v1 { 70 struct { 71 struct pmu_dmem dmem; 72 struct pmu_mem_v1 fb; 73 } alloc; 74}; 75 76struct pmu_allocation_v2 { 77 struct { 78 struct pmu_dmem dmem; 79 struct pmu_mem_desc_v0 fb; 80 } alloc; 81}; 82 83struct pmu_allocation_v3 { 84 struct { 85 struct pmu_dmem dmem; 86 struct flcn_mem_desc_v0 fb; 87 } alloc; 88}; 89 90#define nv_pmu_allocation pmu_allocation_v3 91 92struct pmu_hdr { 93 u8 unit_id; 94 u8 size; 95 u8 ctrl_flags; 96 u8 seq_id; 97}; 98 99#define NV_FLCN_UNIT_ID_REWIND (0x00U) 100 101#define PMU_MSG_HDR_SIZE sizeof(struct pmu_hdr) 102#define PMU_CMD_HDR_SIZE sizeof(struct pmu_hdr) 103 104#define nv_pmu_hdr pmu_hdr 105typedef u8 flcn_status; 106 107#define PMU_DMEM_ALLOC_ALIGNMENT (32) 108#define PMU_DMEM_ALIGNMENT (4) 109 110#define PMU_CMD_FLAGS_PMU_MASK (0xF0) 111 112#define PMU_CMD_FLAGS_STATUS BIT(0) 113#define PMU_CMD_FLAGS_INTR BIT(1) 114#define PMU_CMD_FLAGS_EVENT BIT(2) 115#define PMU_CMD_FLAGS_WATERMARK BIT(3) 116 117#define ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((gran)-1)) 118 119#define NV_UNSIGNED_ROUNDED_DIV(a, b) (((a) + ((b) / 2)) / (b)) 120 121#endif /* NVGPU_FLCNIF_CMN_H */
diff --git a/include/nvgpu/fuse.h b/include/nvgpu/fuse.h
deleted file mode 100644
index 1d459a9..0000000
--- a/include/nvgpu/fuse.h
+++ /dev/null
@@ -1,38 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef NVGPU_FUSE_H 23#define NVGPU_FUSE_H 24 25struct gk20a; 26 27#include <nvgpu/types.h> 28 29int nvgpu_tegra_get_gpu_speedo_id(struct gk20a *g); 30 31void nvgpu_tegra_fuse_write_bypass(struct gk20a *g, u32 val); 32void nvgpu_tegra_fuse_write_access_sw(struct gk20a *g, u32 val); 33void nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(struct gk20a *g, u32 val); 34void nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(struct gk20a *g, u32 val); 35int nvgpu_tegra_fuse_read_gcplex_config_fuse(struct gk20a *g, u32 *val); 36int nvgpu_tegra_fuse_read_reserved_calib(struct gk20a *g, u32 *val); 37 38#endif /* NVGPU_FUSE_H */
diff --git a/include/nvgpu/gk20a.h b/include/nvgpu/gk20a.h
deleted file mode 100644
index 19bfaee..0000000
--- a/include/nvgpu/gk20a.h
+++ /dev/null
@@ -1,1807 +0,0 @@ 1/* 2 * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. 3 * 4 * GK20A Graphics 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef GK20A_H 25#define GK20A_H 26 27struct gk20a; 28struct fifo_gk20a; 29struct channel_gk20a; 30struct gr_gk20a; 31struct sim_nvgpu; 32struct gk20a_ctxsw_ucode_segments; 33struct gk20a_fecs_trace; 34struct gk20a_ctxsw_trace; 35struct acr_desc; 36struct nvgpu_mem_alloc_tracker; 37struct dbg_profiler_object_data; 38struct gk20a_debug_output; 39struct nvgpu_clk_pll_debug_data; 40struct nvgpu_nvhost_dev; 41struct nvgpu_cpu_time_correlation_sample; 42struct nvgpu_mem_sgt; 43struct nvgpu_warpstate; 44struct nvgpu_clk_session; 45struct nvgpu_clk_arb; 46#ifdef CONFIG_GK20A_CTXSW_TRACE 47struct nvgpu_gpu_ctxsw_trace_filter; 48#endif 49struct priv_cmd_entry; 50struct nvgpu_setup_bind_args; 51 52#ifdef __KERNEL__ 53#include <linux/notifier.h> 54#endif 55#include <nvgpu/lock.h> 56#include <nvgpu/thread.h> 57 58#include <nvgpu/mm.h> 59#include <nvgpu/as.h> 60#include <nvgpu/log.h> 61#include <nvgpu/pramin.h> 62#include <nvgpu/acr/nvgpu_acr.h> 63#include <nvgpu/kref.h> 64#include <nvgpu/falcon.h> 65#include <nvgpu/pmu.h> 66#include <nvgpu/atomic.h> 67#include <nvgpu/barrier.h> 68#include <nvgpu/rwsem.h> 69#include <nvgpu/nvlink.h> 70#include <nvgpu/sim.h> 71#include <nvgpu/ecc.h> 72#include <nvgpu/tsg.h> 73#include <nvgpu/sec2.h> 74#include <nvgpu/sched.h> 75 76#include "gk20a/clk_gk20a.h" 77#include "gk20a/ce2_gk20a.h" 78#include "gk20a/fifo_gk20a.h" 79#include "clk/clk.h" 80#include "pmu_perf/pmu_perf.h" 81#include "pmgr/pmgr.h" 82#include "therm/thrm.h" 83 84#ifdef CONFIG_DEBUG_FS 85struct railgate_stats { 86 unsigned long last_rail_gate_start; 87 unsigned long last_rail_gate_complete; 88 unsigned long last_rail_ungate_start; 89 unsigned long last_rail_ungate_complete; 90 unsigned long total_rail_gate_time_ms; 91 unsigned long total_rail_ungate_time_ms; 92 unsigned long railgating_cycle_count; 93}; 94#endif 95 96enum gk20a_cbc_op { 97 gk20a_cbc_op_clear, 98 gk20a_cbc_op_clean, 99 gk20a_cbc_op_invalidate, 100}; 101 102#define MC_INTR_UNIT_DISABLE false 103#define MC_INTR_UNIT_ENABLE true 104 105#define GPU_LIT_NUM_GPCS 0 106#define GPU_LIT_NUM_PES_PER_GPC 1 107#define GPU_LIT_NUM_ZCULL_BANKS 2 108#define GPU_LIT_NUM_TPC_PER_GPC 3 109#define GPU_LIT_NUM_SM_PER_TPC 4 110#define GPU_LIT_NUM_FBPS 5 111#define GPU_LIT_GPC_BASE 6 112#define GPU_LIT_GPC_STRIDE 7 113#define GPU_LIT_GPC_SHARED_BASE 8 114#define GPU_LIT_TPC_IN_GPC_BASE 9 115#define GPU_LIT_TPC_IN_GPC_STRIDE 10 116#define GPU_LIT_TPC_IN_GPC_SHARED_BASE 11 117#define GPU_LIT_PPC_IN_GPC_BASE 12 118#define GPU_LIT_PPC_IN_GPC_STRIDE 13 119#define GPU_LIT_PPC_IN_GPC_SHARED_BASE 14 120#define GPU_LIT_ROP_BASE 15 121#define GPU_LIT_ROP_STRIDE 16 122#define GPU_LIT_ROP_SHARED_BASE 17 123#define GPU_LIT_HOST_NUM_ENGINES 18 124#define GPU_LIT_HOST_NUM_PBDMA 19 125#define GPU_LIT_LTC_STRIDE 20 126#define GPU_LIT_LTS_STRIDE 21 127#define GPU_LIT_NUM_FBPAS 22 128#define GPU_LIT_FBPA_STRIDE 23 129#define GPU_LIT_FBPA_BASE 24 130#define GPU_LIT_FBPA_SHARED_BASE 25 131#define GPU_LIT_SM_PRI_STRIDE 26 132#define GPU_LIT_SMPC_PRI_BASE 27 133#define GPU_LIT_SMPC_PRI_SHARED_BASE 28 134#define GPU_LIT_SMPC_PRI_UNIQUE_BASE 29 135#define GPU_LIT_SMPC_PRI_STRIDE 30 136#define GPU_LIT_TWOD_CLASS 31 137#define GPU_LIT_THREED_CLASS 32 138#define GPU_LIT_COMPUTE_CLASS 33 139#define GPU_LIT_GPFIFO_CLASS 34 140#define GPU_LIT_I2M_CLASS 35 141#define GPU_LIT_DMA_COPY_CLASS 36 142#define GPU_LIT_GPC_PRIV_STRIDE 37 143#define GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START 38 144#define GPU_LIT_PERFMON_PMMGPCTPCB_DOMAIN_START 39 145#define GPU_LIT_PERFMON_PMMGPCTPC_DOMAIN_COUNT 40 146#define GPU_LIT_PERFMON_PMMFBP_LTC_DOMAIN_START 41 147#define GPU_LIT_PERFMON_PMMFBP_LTC_DOMAIN_COUNT 42 148#define GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_START 43 149#define GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT 44 150 151#define nvgpu_get_litter_value(g, v) (g)->ops.get_litter_value((g), v) 152 153#define MAX_TPC_PG_CONFIGS 9 154 155enum nvgpu_unit; 156 157enum nvgpu_flush_op; 158enum gk20a_mem_rw_flag; 159 160struct _resmgr_context; 161struct nvgpu_gpfifo_entry; 162 163struct nvgpu_gpfifo_userdata { 164 struct nvgpu_gpfifo_entry __user *entries; 165 struct _resmgr_context *context; 166}; 167 168/* 169 * gpu_ops should only contain function pointers! Non-function pointer members 170 * should go in struct gk20a or be implemented with the boolean flag API defined 171 * in nvgpu/enabled.h 172 */ 173 174/* index for FB fault buffer functions */ 175#define NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX 0U 176#define NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX 1U 177#define NVGPU_FB_MMU_FAULT_BUF_DISABLED 0U 178#define NVGPU_FB_MMU_FAULT_BUF_ENABLED 1U 179 180/* Parameters for init_elcg_mode/init_blcg_mode */ 181enum { 182 ELCG_RUN, /* clk always run, i.e. disable elcg */ 183 ELCG_STOP, /* clk is stopped */ 184 ELCG_AUTO /* clk will run when non-idle, standard elcg mode */ 185}; 186 187enum { 188 BLCG_RUN, /* clk always run, i.e. disable blcg */ 189 BLCG_AUTO /* clk will run when non-idle, standard blcg mode */ 190}; 191 192struct gpu_ops { 193 struct { 194 int (*determine_L2_size_bytes)(struct gk20a *gk20a); 195 u64 (*get_cbc_base_divisor)(struct gk20a *g); 196 int (*init_comptags)(struct gk20a *g, struct gr_gk20a *gr); 197 int (*cbc_ctrl)(struct gk20a *g, enum gk20a_cbc_op op, 198 u32 min, u32 max); 199 void (*set_zbc_color_entry)(struct gk20a *g, 200 struct zbc_entry *color_val, 201 u32 index); 202 void (*set_zbc_depth_entry)(struct gk20a *g, 203 struct zbc_entry *depth_val, 204 u32 index); 205 void (*set_zbc_s_entry)(struct gk20a *g, 206 struct zbc_entry *s_val, 207 u32 index); 208 void (*init_cbc)(struct gk20a *g, struct gr_gk20a *gr); 209 void (*set_enabled)(struct gk20a *g, bool enabled); 210 void (*init_fs_state)(struct gk20a *g); 211 void (*isr)(struct gk20a *g); 212 u32 (*cbc_fix_config)(struct gk20a *g, int base); 213 void (*flush)(struct gk20a *g); 214 void (*intr_en_illegal_compstat)(struct gk20a *g, bool enable); 215 bool (*pri_is_ltc_addr)(struct gk20a *g, u32 addr); 216 bool (*is_ltcs_ltss_addr)(struct gk20a *g, u32 addr); 217 bool (*is_ltcn_ltss_addr)(struct gk20a *g, u32 addr); 218 void (*split_lts_broadcast_addr)(struct gk20a *g, u32 addr, 219 u32 *priv_addr_table, 220 u32 *priv_addr_table_index); 221 void (*split_ltc_broadcast_addr)(struct gk20a *g, u32 addr, 222 u32 *priv_addr_table, 223 u32 *priv_addr_table_index); 224 } ltc; 225 struct { 226 void (*isr_stall)(struct gk20a *g, u32 inst_id, u32 pri_base); 227 u32 (*isr_nonstall)(struct gk20a *g, u32 inst_id, u32 pri_base); 228 u32 (*get_num_pce)(struct gk20a *g); 229 void (*init_prod_values)(struct gk20a *g); 230 } ce2; 231 struct { 232 u32 (*get_patch_slots)(struct gk20a *g); 233 int (*init_fs_state)(struct gk20a *g); 234 int (*init_preemption_state)(struct gk20a *g); 235 void (*access_smpc_reg)(struct gk20a *g, u32 quad, u32 offset); 236 void (*bundle_cb_defaults)(struct gk20a *g); 237 void (*cb_size_default)(struct gk20a *g); 238 int (*calc_global_ctx_buffer_size)(struct gk20a *g); 239 void (*commit_global_attrib_cb)(struct gk20a *g, 240 struct nvgpu_gr_ctx *ch_ctx, 241 u64 addr, bool patch); 242 void (*commit_global_bundle_cb)(struct gk20a *g, 243 struct nvgpu_gr_ctx *ch_ctx, 244 u64 addr, u64 size, bool patch); 245 int (*commit_global_cb_manager)(struct gk20a *g, 246 struct channel_gk20a *ch, 247 bool patch); 248 void (*commit_global_pagepool)(struct gk20a *g, 249 struct nvgpu_gr_ctx *ch_ctx, 250 u64 addr, u32 size, bool patch); 251 void (*init_gpc_mmu)(struct gk20a *g); 252 int (*handle_sw_method)(struct gk20a *g, u32 addr, 253 u32 class_num, u32 offset, u32 data); 254 void (*set_alpha_circular_buffer_size)(struct gk20a *g, 255 u32 data); 256 void (*set_circular_buffer_size)(struct gk20a *g, u32 data); 257 void (*set_bes_crop_debug3)(struct gk20a *g, u32 data); 258 void (*set_bes_crop_debug4)(struct gk20a *g, u32 data); 259 void (*enable_hww_exceptions)(struct gk20a *g); 260 bool (*is_valid_class)(struct gk20a *g, u32 class_num); 261 bool (*is_valid_gfx_class)(struct gk20a *g, u32 class_num); 262 bool (*is_valid_compute_class)(struct gk20a *g, u32 class_num); 263 void (*get_sm_dsm_perf_regs)(struct gk20a *g, 264 u32 *num_sm_dsm_perf_regs, 265 u32 **sm_dsm_perf_regs, 266 u32 *perf_register_stride); 267 void (*get_sm_dsm_perf_ctrl_regs)(struct gk20a *g, 268 u32 *num_sm_dsm_perf_regs, 269 u32 **sm_dsm_perf_regs, 270 u32 *perf_register_stride); 271 void (*get_ovr_perf_regs)(struct gk20a *g, 272 u32 *num_ovr_perf_regs, 273 u32 **ovr_perf_regsr); 274 void (*set_hww_esr_report_mask)(struct gk20a *g); 275 int (*setup_alpha_beta_tables)(struct gk20a *g, 276 struct gr_gk20a *gr); 277 int (*falcon_load_ucode)(struct gk20a *g, 278 u64 addr_base, 279 struct gk20a_ctxsw_ucode_segments *segments, 280 u32 reg_offset); 281 int (*load_ctxsw_ucode)(struct gk20a *g); 282 u32 (*get_gpc_mask)(struct gk20a *g); 283 u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); 284 void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); 285 int (*alloc_obj_ctx)(struct channel_gk20a *c, 286 u32 class_num, u32 flags); 287 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr, 288 struct channel_gk20a *c, u64 zcull_va, 289 u32 mode); 290 int (*get_zcull_info)(struct gk20a *g, struct gr_gk20a *gr, 291 struct gr_zcull_info *zcull_params); 292 int (*decode_egpc_addr)(struct gk20a *g, 293 u32 addr, enum ctxsw_addr_type *addr_type, 294 u32 *gpc_num, u32 *tpc_num, u32 *broadcast_flags); 295 void (*egpc_etpc_priv_addr_table)(struct gk20a *g, u32 addr, 296 u32 gpc, u32 tpc, u32 broadcast_flags, 297 u32 *priv_addr_table, 298 u32 *priv_addr_table_index); 299 bool (*is_tpc_addr)(struct gk20a *g, u32 addr); 300 bool (*is_egpc_addr)(struct gk20a *g, u32 addr); 301 bool (*is_etpc_addr)(struct gk20a *g, u32 addr); 302 void (*get_egpc_etpc_num)(struct gk20a *g, u32 addr, 303 u32 *gpc_num, u32 *tpc_num); 304 u32 (*get_tpc_num)(struct gk20a *g, u32 addr); 305 u32 (*get_egpc_base)(struct gk20a *g); 306 void (*detect_sm_arch)(struct gk20a *g); 307 int (*add_zbc_color)(struct gk20a *g, struct gr_gk20a *gr, 308 struct zbc_entry *color_val, u32 index); 309 int (*add_zbc_depth)(struct gk20a *g, struct gr_gk20a *gr, 310 struct zbc_entry *depth_val, u32 index); 311 int (*add_zbc_s)(struct gk20a *g, struct gr_gk20a *gr, 312 struct zbc_entry *s_val, u32 index); 313 int (*zbc_set_table)(struct gk20a *g, struct gr_gk20a *gr, 314 struct zbc_entry *zbc_val); 315 int (*zbc_query_table)(struct gk20a *g, struct gr_gk20a *gr, 316 struct zbc_query_params *query_params); 317 int (*zbc_s_query_table)(struct gk20a *g, struct gr_gk20a *gr, 318 struct zbc_query_params *query_params); 319 int (*load_zbc_s_default_tbl)(struct gk20a *g, 320 struct gr_gk20a *gr); 321 int (*load_zbc_s_tbl)(struct gk20a *g, 322 struct gr_gk20a *gr); 323 void (*pmu_save_zbc)(struct gk20a *g, u32 entries); 324 int (*add_zbc)(struct gk20a *g, struct gr_gk20a *gr, 325 struct zbc_entry *zbc_val); 326 bool (*add_zbc_type_s)(struct gk20a *g, struct gr_gk20a *gr, 327 struct zbc_entry *zbc_val, int *ret_val); 328 u32 (*pagepool_default_size)(struct gk20a *g); 329 int (*init_ctx_state)(struct gk20a *g); 330 int (*alloc_gr_ctx)(struct gk20a *g, 331 struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, 332 u32 class, u32 padding); 333 void (*free_gr_ctx)(struct gk20a *g, 334 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx); 335 void (*powergate_tpc)(struct gk20a *g); 336 void (*update_ctxsw_preemption_mode)(struct gk20a *g, 337 struct channel_gk20a *c, 338 struct nvgpu_mem *mem); 339 int (*update_smpc_ctxsw_mode)(struct gk20a *g, 340 struct channel_gk20a *c, 341 bool enable); 342 u32 (*get_hw_accessor_stream_out_mode)(void); 343 int (*update_hwpm_ctxsw_mode)(struct gk20a *g, 344 struct channel_gk20a *c, 345 u64 gpu_va, 346 u32 mode); 347 void (*init_hwpm_pmm_register)(struct gk20a *g); 348 void (*get_num_hwpm_perfmon)(struct gk20a *g, u32 *num_sys_perfmon, 349 u32 *num_fbp_perfmon, u32 *num_gpc_perfmon); 350 void (*set_pmm_register)(struct gk20a *g, u32 offset, u32 val, 351 u32 num_chiplets, u32 num_perfmons); 352 int (*dump_gr_regs)(struct gk20a *g, 353 struct gk20a_debug_output *o); 354 int (*update_pc_sampling)(struct channel_gk20a *ch, 355 bool enable); 356 u32 (*get_max_fbps_count)(struct gk20a *g); 357 u32 (*get_fbp_en_mask)(struct gk20a *g); 358 u32 (*get_max_ltc_per_fbp)(struct gk20a *g); 359 u32 (*get_max_lts_per_ltc)(struct gk20a *g); 360 u32* (*get_rop_l2_en_mask)(struct gk20a *g); 361 void (*init_sm_dsm_reg_info)(void); 362 void (*init_ovr_sm_dsm_perf)(void); 363 int (*wait_empty)(struct gk20a *g, unsigned long duration_ms, 364 u32 expect_delay); 365 void (*init_cyclestats)(struct gk20a *g); 366 void (*enable_cde_in_fecs)(struct gk20a *g, 367 struct nvgpu_mem *mem); 368 int (*set_sm_debug_mode)(struct gk20a *g, struct channel_gk20a *ch, 369 u64 sms, bool enable); 370 void (*bpt_reg_info)(struct gk20a *g, 371 struct nvgpu_warpstate *w_state); 372 void (*get_access_map)(struct gk20a *g, 373 u32 **whitelist, int *num_entries); 374 int (*handle_fecs_error)(struct gk20a *g, 375 struct channel_gk20a *ch, 376 struct gr_gk20a_isr_data *isr_data); 377 int (*pre_process_sm_exception)(struct gk20a *g, 378 u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, 379 bool sm_debugger_attached, 380 struct channel_gk20a *fault_ch, 381 bool *early_exit, bool *ignore_debugger); 382 u32 (*get_sm_hww_warp_esr)(struct gk20a *g, 383 u32 gpc, u32 tpc, u32 sm); 384 u32 (*get_sm_hww_global_esr)(struct gk20a *g, 385 u32 gpc, u32 tpc, u32 sm); 386 u32 (*get_sm_no_lock_down_hww_global_esr_mask)(struct gk20a *g); 387 int (*lock_down_sm)(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 388 u32 global_esr_mask, bool check_errors); 389 int (*wait_for_sm_lock_down)(struct gk20a *g, u32 gpc, u32 tpc, 390 u32 sm, u32 global_esr_mask, bool check_errors); 391 void (*clear_sm_hww)(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 392 u32 global_esr); 393 void (*get_esr_sm_sel)(struct gk20a *g, u32 gpc, u32 tpc, 394 u32 *esr_sm_sel); 395 int (*handle_tpc_sm_ecc_exception)(struct gk20a *g, 396 u32 gpc, u32 tpc, 397 bool *post_event, struct channel_gk20a *fault_ch, 398 u32 *hww_global_esr); 399 int (*handle_sm_exception)(struct gk20a *g, 400 u32 gpc, u32 tpc, u32 sm, 401 bool *post_event, struct channel_gk20a *fault_ch, 402 u32 *hww_global_esr); 403 int (*handle_gcc_exception)(struct gk20a *g, u32 gpc, u32 tpc, 404 bool *post_event, struct channel_gk20a *fault_ch, 405 u32 *hww_global_esr); 406 int (*handle_tex_exception)(struct gk20a *g, u32 gpc, u32 tpc, 407 bool *post_event); 408 int (*handle_tpc_mpc_exception)(struct gk20a *g, 409 u32 gpc, u32 tpc, bool *post_event); 410 int (*handle_gpc_gpccs_exception)(struct gk20a *g, u32 gpc, 411 u32 gpc_exception); 412 int (*handle_gpc_gpcmmu_exception)(struct gk20a *g, u32 gpc, 413 u32 gpc_exception); 414 void (*enable_gpc_exceptions)(struct gk20a *g); 415 void (*enable_exceptions)(struct gk20a *g); 416 int (*init_ecc)(struct gk20a *g); 417 u32 (*get_lrf_tex_ltc_dram_override)(struct gk20a *g); 418 int (*record_sm_error_state)(struct gk20a *g, u32 gpc, u32 tpc, 419 u32 sm, struct channel_gk20a *fault_ch); 420 int (*clear_sm_error_state)(struct gk20a *g, 421 struct channel_gk20a *ch, u32 sm_id); 422 int (*suspend_contexts)(struct gk20a *g, 423 struct dbg_session_gk20a *dbg_s, 424 int *ctx_resident_ch_fd); 425 int (*resume_contexts)(struct gk20a *g, 426 struct dbg_session_gk20a *dbg_s, 427 int *ctx_resident_ch_fd); 428 int (*set_preemption_mode)(struct channel_gk20a *ch, 429 u32 graphics_preempt_mode, 430 u32 compute_preempt_mode); 431 int (*get_preemption_mode_flags)(struct gk20a *g, 432 struct nvgpu_preemption_modes_rec *preemption_modes_rec); 433 int (*set_ctxsw_preemption_mode)(struct gk20a *g, 434 struct nvgpu_gr_ctx *gr_ctx, 435 struct vm_gk20a *vm, u32 class, 436 u32 graphics_preempt_mode, 437 u32 compute_preempt_mode); 438 int (*set_boosted_ctx)(struct channel_gk20a *ch, bool boost); 439 void (*update_boosted_ctx)(struct gk20a *g, 440 struct nvgpu_mem *mem, 441 struct nvgpu_gr_ctx *gr_ctx); 442 int (*init_sm_id_table)(struct gk20a *g); 443 int (*load_smid_config)(struct gk20a *g); 444 void (*program_sm_id_numbering)(struct gk20a *g, 445 u32 gpc, u32 tpc, u32 smid); 446 void (*program_active_tpc_counts)(struct gk20a *g, u32 gpc); 447 int (*setup_rop_mapping)(struct gk20a *g, struct gr_gk20a *gr); 448 int (*init_sw_veid_bundle)(struct gk20a *g); 449 void (*program_zcull_mapping)(struct gk20a *g, 450 u32 zcull_alloc_num, u32 *zcull_map_tiles); 451 int (*commit_global_timeslice)(struct gk20a *g, 452 struct channel_gk20a *c); 453 int (*commit_inst)(struct channel_gk20a *c, u64 gpu_va); 454 void (*write_zcull_ptr)(struct gk20a *g, 455 struct nvgpu_mem *mem, u64 gpu_va); 456 void (*write_pm_ptr)(struct gk20a *g, 457 struct nvgpu_mem *mem, u64 gpu_va); 458 void (*set_preemption_buffer_va)(struct gk20a *g, 459 struct nvgpu_mem *mem, u64 gpu_va); 460 void (*load_tpc_mask)(struct gk20a *g); 461 int (*trigger_suspend)(struct gk20a *g); 462 int (*wait_for_pause)(struct gk20a *g, struct nvgpu_warpstate *w_state); 463 int (*resume_from_pause)(struct gk20a *g); 464 int (*clear_sm_errors)(struct gk20a *g); 465 u32 (*tpc_enabled_exceptions)(struct gk20a *g); 466 int (*set_czf_bypass)(struct gk20a *g, 467 struct channel_gk20a *ch); 468 void (*init_czf_bypass)(struct gk20a *g); 469 bool (*sm_debugger_attached)(struct gk20a *g); 470 void (*suspend_single_sm)(struct gk20a *g, 471 u32 gpc, u32 tpc, u32 sm, 472 u32 global_esr_mask, bool check_errors); 473 void (*suspend_all_sms)(struct gk20a *g, 474 u32 global_esr_mask, bool check_errors); 475 void (*resume_single_sm)(struct gk20a *g, 476 u32 gpc, u32 tpc, u32 sm); 477 void (*resume_all_sms)(struct gk20a *g); 478 void (*disable_rd_coalesce)(struct gk20a *g); 479 void (*init_ctxsw_hdr_data)(struct gk20a *g, 480 struct nvgpu_mem *mem); 481 void (*init_gfxp_wfi_timeout_count)(struct gk20a *g); 482 unsigned long (*get_max_gfxp_wfi_timeout_count) 483 (struct gk20a *g); 484 void (*ecc_init_scrub_reg)(struct gk20a *g); 485 u32 (*get_gpcs_swdx_dss_zbc_c_format_reg)(struct gk20a *g); 486 u32 (*get_gpcs_swdx_dss_zbc_z_format_reg)(struct gk20a *g); 487 void (*dump_ctxsw_stats)(struct gk20a *g, struct vm_gk20a *vm, 488 struct nvgpu_gr_ctx *gr_ctx); 489 void (*fecs_host_int_enable)(struct gk20a *g); 490 int (*handle_ssync_hww)(struct gk20a *g); 491 int (*handle_notify_pending)(struct gk20a *g, 492 struct gr_gk20a_isr_data *isr_data); 493 int (*handle_semaphore_pending)(struct gk20a *g, 494 struct gr_gk20a_isr_data *isr_data); 495 int (*add_ctxsw_reg_pm_fbpa)(struct gk20a *g, 496 struct ctxsw_buf_offset_map_entry *map, 497 struct aiv_list_gk20a *regs, 498 u32 *count, u32 *offset, 499 u32 max_cnt, u32 base, 500 u32 num_fbpas, u32 stride, u32 mask); 501 int (*add_ctxsw_reg_perf_pma)(struct ctxsw_buf_offset_map_entry *map, 502 struct aiv_list_gk20a *regs, 503 u32 *count, u32 *offset, 504 u32 max_cnt, u32 base, u32 mask); 505 int (*decode_priv_addr)(struct gk20a *g, u32 addr, 506 enum ctxsw_addr_type *addr_type, 507 u32 *gpc_num, u32 *tpc_num, 508 u32 *ppc_num, u32 *be_num, 509 u32 *broadcast_flags); 510 int (*create_priv_addr_table)(struct gk20a *g, 511 u32 addr, 512 u32 *priv_addr_table, 513 u32 *num_registers); 514 u32 (*get_pmm_per_chiplet_offset)(void); 515 void (*split_fbpa_broadcast_addr)(struct gk20a *g, u32 addr, 516 u32 num_fbpas, 517 u32 *priv_addr_table, 518 u32 *priv_addr_table_index); 519 u32 (*fecs_ctxsw_mailbox_size)(void); 520 u32 (*gpc0_gpccs_ctxsw_mailbox_size)(void); 521 int (*init_sw_bundle64)(struct gk20a *g); 522 int (*alloc_global_ctx_buffers)(struct gk20a *g); 523 int (*map_global_ctx_buffers)(struct gk20a *g, 524 struct channel_gk20a *c); 525 int (*commit_global_ctx_buffers)(struct gk20a *g, 526 struct channel_gk20a *c, bool patch); 527 u32 (*get_nonpes_aware_tpc)(struct gk20a *g, u32 gpc, u32 tpc); 528 int (*get_offset_in_gpccs_segment)(struct gk20a *g, 529 enum ctxsw_addr_type addr_type, u32 num_tpcs, 530 u32 num_ppcs, u32 reg_list_ppc_count, 531 u32 *__offset_in_segment); 532 void (*set_debug_mode)(struct gk20a *g, bool enable); 533 int (*set_mmu_debug_mode)(struct gk20a *g, 534 struct channel_gk20a *ch, bool enable); 535 int (*set_fecs_watchdog_timeout)(struct gk20a *g); 536 } gr; 537 struct { 538 void (*init_hw)(struct gk20a *g); 539 void (*init_cbc)(struct gk20a *g, struct gr_gk20a *gr); 540 void (*init_fs_state)(struct gk20a *g); 541 void (*init_uncompressed_kind_map)(struct gk20a *g); 542 void (*init_kind_attr)(struct gk20a *g); 543 void (*set_mmu_page_size)(struct gk20a *g); 544 bool (*set_use_full_comp_tag_line)(struct gk20a *g); 545 u32 (*mmu_ctrl)(struct gk20a *g); 546 u32 (*mmu_debug_ctrl)(struct gk20a *g); 547 u32 (*mmu_debug_wr)(struct gk20a *g); 548 u32 (*mmu_debug_rd)(struct gk20a *g); 549 550 /* 551 * Compression tag line coverage. When mapping a compressible 552 * buffer, ctagline is increased when the virtual address 553 * crosses over the compression page boundary. 554 */ 555 unsigned int (*compression_page_size)(struct gk20a *g); 556 557 /* 558 * Minimum page size that can be used for compressible kinds. 559 */ 560 unsigned int (*compressible_page_size)(struct gk20a *g); 561 562 /* 563 * Compressible kind mappings: Mask for the virtual and physical 564 * address bits that must match. 565 */ 566 u32 (*compression_align_mask)(struct gk20a *g); 567 568 void (*dump_vpr_info)(struct gk20a *g); 569 void (*dump_wpr_info)(struct gk20a *g); 570 int (*vpr_info_fetch)(struct gk20a *g); 571 void (*read_wpr_info)(struct gk20a *g, 572 struct wpr_carveout_info *inf); 573 bool (*is_debug_mode_enabled)(struct gk20a *g); 574 void (*set_debug_mode)(struct gk20a *g, bool enable); 575 void (*set_mmu_debug_mode)(struct gk20a *g, bool enable); 576 int (*tlb_invalidate)(struct gk20a *g, struct nvgpu_mem *pdb); 577 void (*hub_isr)(struct gk20a *g); 578 void (*handle_replayable_fault)(struct gk20a *g); 579 int (*mem_unlock)(struct gk20a *g); 580 int (*init_nvlink)(struct gk20a *g); 581 int (*enable_nvlink)(struct gk20a *g); 582 void (*enable_hub_intr)(struct gk20a *g); 583 void (*disable_hub_intr)(struct gk20a *g); 584 int (*init_fbpa)(struct gk20a *g); 585 void (*handle_fbpa_intr)(struct gk20a *g, u32 fbpa_id); 586 void (*write_mmu_fault_buffer_lo_hi)(struct gk20a *g, u32 index, 587 u32 addr_lo, u32 addr_hi); 588 void (*write_mmu_fault_buffer_get)(struct gk20a *g, u32 index, 589 u32 reg_val); 590 void (*write_mmu_fault_buffer_size)(struct gk20a *g, u32 index, 591 u32 reg_val); 592 void (*write_mmu_fault_status)(struct gk20a *g, u32 reg_val); 593 u32 (*read_mmu_fault_buffer_get)(struct gk20a *g, u32 index); 594 u32 (*read_mmu_fault_buffer_put)(struct gk20a *g, u32 index); 595 u32 (*read_mmu_fault_buffer_size)(struct gk20a *g, u32 index); 596 void (*read_mmu_fault_addr_lo_hi)(struct gk20a *g, 597 u32 *addr_lo, u32 *addr_hi); 598 void (*read_mmu_fault_inst_lo_hi)(struct gk20a *g, 599 u32 *inst_lo, u32 *inst_hi); 600 u32 (*read_mmu_fault_info)(struct gk20a *g); 601 u32 (*read_mmu_fault_status)(struct gk20a *g); 602 int (*mmu_invalidate_replay)(struct gk20a *g, 603 u32 invalidate_replay_val); 604 bool (*mmu_fault_pending)(struct gk20a *g); 605 bool (*is_fault_buf_enabled)(struct gk20a *g, u32 index); 606 void (*fault_buf_set_state_hw)(struct gk20a *g, 607 u32 index, u32 state); 608 void (*fault_buf_configure_hw)(struct gk20a *g, u32 index); 609 size_t (*get_vidmem_size)(struct gk20a *g); 610 int (*apply_pdb_cache_war)(struct gk20a *g); 611 } fb; 612 struct { 613 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod); 614 void (*slcg_ce2_load_gating_prod)(struct gk20a *g, bool prod); 615 void (*slcg_chiplet_load_gating_prod)(struct gk20a *g, bool prod); 616 void (*slcg_ctxsw_firmware_load_gating_prod)(struct gk20a *g, bool prod); 617 void (*slcg_fb_load_gating_prod)(struct gk20a *g, bool prod); 618 void (*slcg_fifo_load_gating_prod)(struct gk20a *g, bool prod); 619 void (*slcg_gr_load_gating_prod)(struct gk20a *g, bool prod); 620 void (*slcg_ltc_load_gating_prod)(struct gk20a *g, bool prod); 621 void (*slcg_perf_load_gating_prod)(struct gk20a *g, bool prod); 622 void (*slcg_priring_load_gating_prod)(struct gk20a *g, bool prod); 623 void (*slcg_pmu_load_gating_prod)(struct gk20a *g, bool prod); 624 void (*slcg_therm_load_gating_prod)(struct gk20a *g, bool prod); 625 void (*slcg_xbar_load_gating_prod)(struct gk20a *g, bool prod); 626 void (*slcg_hshub_load_gating_prod)(struct gk20a *g, bool prod); 627 void (*slcg_acb_load_gating_prod)(struct gk20a *g, bool prod); 628 void (*blcg_bus_load_gating_prod)(struct gk20a *g, bool prod); 629 void (*blcg_ce_load_gating_prod)(struct gk20a *g, bool prod); 630 void (*blcg_ctxsw_firmware_load_gating_prod)(struct gk20a *g, bool prod); 631 void (*blcg_fb_load_gating_prod)(struct gk20a *g, bool prod); 632 void (*blcg_fifo_load_gating_prod)(struct gk20a *g, bool prod); 633 void (*blcg_gr_load_gating_prod)(struct gk20a *g, bool prod); 634 void (*blcg_ltc_load_gating_prod)(struct gk20a *g, bool prod); 635 void (*blcg_pwr_csb_load_gating_prod)(struct gk20a *g, bool prod); 636 void (*blcg_pmu_load_gating_prod)(struct gk20a *g, bool prod); 637 void (*blcg_xbar_load_gating_prod)(struct gk20a *g, bool prod); 638 void (*blcg_hshub_load_gating_prod)(struct gk20a *g, bool prod); 639 void (*pg_gr_load_gating_prod)(struct gk20a *g, bool prod); 640 } clock_gating; 641 struct { 642 void (*post_events)(struct channel_gk20a *ch); 643 } debugger; 644 struct { 645 int (*setup_sw)(struct gk20a *g); 646 int (*init_fifo_setup_hw)(struct gk20a *g); 647 void (*bind_channel)(struct channel_gk20a *ch_gk20a); 648 void (*unbind_channel)(struct channel_gk20a *ch_gk20a); 649 void (*disable_channel)(struct channel_gk20a *ch); 650 void (*enable_channel)(struct channel_gk20a *ch); 651 int (*alloc_inst)(struct gk20a *g, struct channel_gk20a *ch); 652 void (*free_inst)(struct gk20a *g, struct channel_gk20a *ch); 653 int (*setup_ramfc)(struct channel_gk20a *c, u64 gpfifo_base, 654 u32 gpfifo_entries, 655 unsigned long acquire_timeout, 656 u32 flags); 657 int (*resetup_ramfc)(struct channel_gk20a *c); 658 int (*preempt_channel)(struct gk20a *g, struct channel_gk20a *ch); 659 int (*preempt_tsg)(struct gk20a *g, struct tsg_gk20a *tsg); 660 int (*enable_tsg)(struct tsg_gk20a *tsg); 661 int (*disable_tsg)(struct tsg_gk20a *tsg); 662 int (*tsg_verify_channel_status)(struct channel_gk20a *ch); 663 void (*tsg_verify_status_ctx_reload)(struct channel_gk20a *ch); 664 void (*tsg_verify_status_faulted)(struct channel_gk20a *ch); 665 int (*reschedule_runlist)(struct channel_gk20a *ch, 666 bool preempt_next); 667 int (*update_runlist)(struct gk20a *g, u32 runlist_id, 668 u32 chid, bool add, 669 bool wait_for_finish); 670 void (*trigger_mmu_fault)(struct gk20a *g, 671 unsigned long engine_ids); 672 void (*get_mmu_fault_info)(struct gk20a *g, u32 mmu_fault_id, 673 struct mmu_fault_info *mmfault); 674 void (*get_mmu_fault_desc)(struct mmu_fault_info *mmfault); 675 void (*get_mmu_fault_client_desc)( 676 struct mmu_fault_info *mmfault); 677 void (*get_mmu_fault_gpc_desc)(struct mmu_fault_info *mmfault); 678 void (*apply_pb_timeout)(struct gk20a *g); 679 void (*apply_ctxsw_timeout_intr)(struct gk20a *g); 680 int (*wait_engine_idle)(struct gk20a *g); 681 u32 (*get_num_fifos)(struct gk20a *g); 682 u32 (*get_pbdma_signature)(struct gk20a *g); 683 int (*set_runlist_interleave)(struct gk20a *g, u32 id, 684 u32 runlist_id, 685 u32 new_level); 686 int (*tsg_set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice); 687 u32 (*default_timeslice_us)(struct gk20a *); 688 int (*force_reset_ch)(struct channel_gk20a *ch, 689 u32 err_code, bool verbose); 690 int (*engine_enum_from_type)(struct gk20a *g, u32 engine_type, 691 u32 *inst_id); 692 void (*device_info_data_parse)(struct gk20a *g, 693 u32 table_entry, u32 *inst_id, 694 u32 *pri_base, u32 *fault_id); 695 u32 (*device_info_fault_id)(u32 table_entry); 696 int (*tsg_bind_channel)(struct tsg_gk20a *tsg, 697 struct channel_gk20a *ch); 698 int (*tsg_unbind_channel)(struct channel_gk20a *ch); 699 int (*tsg_open)(struct tsg_gk20a *tsg); 700 void (*tsg_release)(struct tsg_gk20a *tsg); 701 u32 (*eng_runlist_base_size)(void); 702 int (*init_engine_info)(struct fifo_gk20a *f); 703 u32 (*runlist_entry_size)(void); 704 void (*get_tsg_runlist_entry)(struct tsg_gk20a *tsg, 705 u32 *runlist); 706 void (*get_ch_runlist_entry)(struct channel_gk20a *ch, 707 u32 *runlist); 708 u32 (*userd_gp_get)(struct gk20a *g, struct channel_gk20a *ch); 709 void (*userd_gp_put)(struct gk20a *g, struct channel_gk20a *ch); 710 u64 (*userd_pb_get)(struct gk20a *g, struct channel_gk20a *ch); 711 void (*free_channel_ctx_header)(struct channel_gk20a *ch); 712 bool (*is_fault_engine_subid_gpc)(struct gk20a *g, 713 u32 engine_subid); 714 void (*dump_pbdma_status)(struct gk20a *g, 715 struct gk20a_debug_output *o); 716 void (*dump_eng_status)(struct gk20a *g, 717 struct gk20a_debug_output *o); 718 void (*dump_channel_status_ramfc)(struct gk20a *g, 719 struct gk20a_debug_output *o, u32 chid, 720 struct ch_state *ch_state); 721 u32 (*intr_0_error_mask)(struct gk20a *g); 722 int (*is_preempt_pending)(struct gk20a *g, u32 id, 723 unsigned int id_type, bool preempt_retries_left); 724 void (*init_pbdma_intr_descs)(struct fifo_gk20a *f); 725 int (*reset_enable_hw)(struct gk20a *g); 726 int (*setup_userd)(struct channel_gk20a *c); 727 u32 (*pbdma_acquire_val)(u64 timeout); 728 void (*teardown_ch_tsg)(struct gk20a *g, u32 act_eng_bitmask, 729 u32 id, unsigned int id_type, unsigned int rc_type, 730 struct mmu_fault_info *mmfault); 731 void (*teardown_mask_intr)(struct gk20a *g); 732 void (*teardown_unmask_intr)(struct gk20a *g); 733 bool (*handle_sched_error)(struct gk20a *g); 734 bool (*handle_ctxsw_timeout)(struct gk20a *g, u32 fifo_intr); 735 unsigned int (*handle_pbdma_intr_0)(struct gk20a *g, 736 u32 pbdma_id, u32 pbdma_intr_0, 737 u32 *handled, u32 *error_notifier); 738 unsigned int (*handle_pbdma_intr_1)(struct gk20a *g, 739 u32 pbdma_id, u32 pbdma_intr_1, 740 u32 *handled, u32 *error_notifier); 741 void (*init_eng_method_buffers)(struct gk20a *g, 742 struct tsg_gk20a *tsg); 743 void (*deinit_eng_method_buffers)(struct gk20a *g, 744 struct tsg_gk20a *tsg); 745 u32 (*get_preempt_timeout)(struct gk20a *g); 746 void (*post_event_id)(struct tsg_gk20a *tsg, int event_id); 747 void (*ch_abort_clean_up)(struct channel_gk20a *ch); 748 bool (*check_tsg_ctxsw_timeout)(struct tsg_gk20a *tsg, 749 bool *verbose, u32 *ms); 750 bool (*check_ch_ctxsw_timeout)(struct channel_gk20a *ch, 751 bool *verbose, u32 *ms); 752 int (*channel_suspend)(struct gk20a *g); 753 int (*channel_resume)(struct gk20a *g); 754 void (*set_error_notifier)(struct channel_gk20a *ch, u32 error); 755#ifdef CONFIG_TEGRA_GK20A_NVHOST 756 int (*alloc_syncpt_buf)(struct channel_gk20a *c, 757 u32 syncpt_id, struct nvgpu_mem *syncpt_buf); 758 void (*free_syncpt_buf)(struct channel_gk20a *c, 759 struct nvgpu_mem *syncpt_buf); 760 void (*add_syncpt_wait_cmd)(struct gk20a *g, 761 struct priv_cmd_entry *cmd, u32 off, 762 u32 id, u32 thresh, u64 gpu_va); 763 u32 (*get_syncpt_wait_cmd_size)(void); 764 void (*add_syncpt_incr_cmd)(struct gk20a *g, 765 bool wfi_cmd, struct priv_cmd_entry *cmd, 766 u32 id, u64 gpu_va); 767 u32 (*get_syncpt_incr_cmd_size)(bool wfi_cmd); 768 int (*get_sync_ro_map)(struct vm_gk20a *vm, 769 u64 *base_gpuva, u32 *sync_size); 770 u32 (*get_syncpt_incr_per_release)(void); 771#endif 772 void (*runlist_hw_submit)(struct gk20a *g, u32 runlist_id, 773 u32 count, u32 buffer_index); 774 int (*runlist_wait_pending)(struct gk20a *g, u32 runlist_id); 775 void (*ring_channel_doorbell)(struct channel_gk20a *c); 776 u64 (*usermode_base)(struct gk20a *g); 777 u32 (*get_sema_wait_cmd_size)(void); 778 u32 (*get_sema_incr_cmd_size)(void); 779 void (*add_sema_cmd)(struct gk20a *g, 780 struct nvgpu_semaphore *s, u64 sema_va, 781 struct priv_cmd_entry *cmd, 782 u32 off, bool acquire, bool wfi); 783 int (*init_pdb_cache_war)(struct gk20a *g); 784 void (*deinit_pdb_cache_war)(struct gk20a *g); 785 } fifo; 786 struct pmu_v { 787 u32 (*get_pmu_cmdline_args_size)(struct nvgpu_pmu *pmu); 788 void (*set_pmu_cmdline_args_cpu_freq)(struct nvgpu_pmu *pmu, 789 u32 freq); 790 void (*set_pmu_cmdline_args_trace_size)(struct nvgpu_pmu *pmu, 791 u32 size); 792 void (*set_pmu_cmdline_args_trace_dma_base)( 793 struct nvgpu_pmu *pmu); 794 void (*config_pmu_cmdline_args_super_surface)( 795 struct nvgpu_pmu *pmu); 796 void (*set_pmu_cmdline_args_trace_dma_idx)( 797 struct nvgpu_pmu *pmu, u32 idx); 798 void * (*get_pmu_cmdline_args_ptr)(struct nvgpu_pmu *pmu); 799 u32 (*get_pmu_allocation_struct_size)(struct nvgpu_pmu *pmu); 800 void (*set_pmu_allocation_ptr)(struct nvgpu_pmu *pmu, 801 void **pmu_alloc_ptr, void *assign_ptr); 802 void (*pmu_allocation_set_dmem_size)(struct nvgpu_pmu *pmu, 803 void *pmu_alloc_ptr, u16 size); 804 u16 (*pmu_allocation_get_dmem_size)(struct nvgpu_pmu *pmu, 805 void *pmu_alloc_ptr); 806 u32 (*pmu_allocation_get_dmem_offset)(struct nvgpu_pmu *pmu, 807 void *pmu_alloc_ptr); 808 u32 * (*pmu_allocation_get_dmem_offset_addr)( 809 struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); 810 void (*pmu_allocation_set_dmem_offset)(struct nvgpu_pmu *pmu, 811 void *pmu_alloc_ptr, u32 offset); 812 void * (*pmu_allocation_get_fb_addr)( 813 struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); 814 u32 (*pmu_allocation_get_fb_size)( 815 struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); 816 void (*get_pmu_init_msg_pmu_queue_params)( 817 struct nvgpu_falcon_queue *queue, u32 id, 818 void *pmu_init_msg); 819 void *(*get_pmu_msg_pmu_init_msg_ptr)( 820 struct pmu_init_msg *init); 821 u16 (*get_pmu_init_msg_pmu_sw_mg_off)( 822 union pmu_init_msg_pmu *init_msg); 823 u16 (*get_pmu_init_msg_pmu_sw_mg_size)( 824 union pmu_init_msg_pmu *init_msg); 825 u32 (*get_pmu_perfmon_cmd_start_size)(void); 826 int (*get_perfmon_cmd_start_offsetofvar)( 827 enum pmu_perfmon_cmd_start_fields field); 828 void (*perfmon_start_set_cmd_type)(struct pmu_perfmon_cmd *pc, 829 u8 value); 830 void (*perfmon_start_set_group_id)(struct pmu_perfmon_cmd *pc, 831 u8 value); 832 void (*perfmon_start_set_state_id)(struct pmu_perfmon_cmd *pc, 833 u8 value); 834 void (*perfmon_start_set_flags)(struct pmu_perfmon_cmd *pc, 835 u8 value); 836 u8 (*perfmon_start_get_flags)(struct pmu_perfmon_cmd *pc); 837 u32 (*get_pmu_perfmon_cmd_init_size)(void); 838 int (*get_perfmon_cmd_init_offsetofvar)( 839 enum pmu_perfmon_cmd_start_fields field); 840 void (*perfmon_cmd_init_set_sample_buffer)( 841 struct pmu_perfmon_cmd *pc, u16 value); 842 void (*perfmon_cmd_init_set_dec_cnt)( 843 struct pmu_perfmon_cmd *pc, u8 value); 844 void (*perfmon_cmd_init_set_base_cnt_id)( 845 struct pmu_perfmon_cmd *pc, u8 value); 846 void (*perfmon_cmd_init_set_samp_period_us)( 847 struct pmu_perfmon_cmd *pc, u32 value); 848 void (*perfmon_cmd_init_set_num_cnt)(struct pmu_perfmon_cmd *pc, 849 u8 value); 850 void (*perfmon_cmd_init_set_mov_avg)(struct pmu_perfmon_cmd *pc, 851 u8 value); 852 void *(*get_pmu_seq_in_a_ptr)( 853 struct pmu_sequence *seq); 854 void *(*get_pmu_seq_out_a_ptr)( 855 struct pmu_sequence *seq); 856 void (*set_pmu_cmdline_args_secure_mode)(struct nvgpu_pmu *pmu, 857 u32 val); 858 u32 (*get_perfmon_cntr_sz)(struct nvgpu_pmu *pmu); 859 void * (*get_perfmon_cntr_ptr)(struct nvgpu_pmu *pmu); 860 void (*set_perfmon_cntr_ut)(struct nvgpu_pmu *pmu, u16 ut); 861 void (*set_perfmon_cntr_lt)(struct nvgpu_pmu *pmu, u16 lt); 862 void (*set_perfmon_cntr_valid)(struct nvgpu_pmu *pmu, u8 val); 863 void (*set_perfmon_cntr_index)(struct nvgpu_pmu *pmu, u8 val); 864 void (*set_perfmon_cntr_group_id)(struct nvgpu_pmu *pmu, 865 u8 gid); 866 867 u8 (*pg_cmd_eng_buf_load_size)(struct pmu_pg_cmd *pg); 868 void (*pg_cmd_eng_buf_load_set_cmd_type)(struct pmu_pg_cmd *pg, 869 u8 value); 870 void (*pg_cmd_eng_buf_load_set_engine_id)(struct pmu_pg_cmd *pg, 871 u8 value); 872 void (*pg_cmd_eng_buf_load_set_buf_idx)(struct pmu_pg_cmd *pg, 873 u8 value); 874 void (*pg_cmd_eng_buf_load_set_pad)(struct pmu_pg_cmd *pg, 875 u8 value); 876 void (*pg_cmd_eng_buf_load_set_buf_size)(struct pmu_pg_cmd *pg, 877 u16 value); 878 void (*pg_cmd_eng_buf_load_set_dma_base)(struct pmu_pg_cmd *pg, 879 u32 value); 880 void (*pg_cmd_eng_buf_load_set_dma_offset)(struct pmu_pg_cmd *pg, 881 u8 value); 882 void (*pg_cmd_eng_buf_load_set_dma_idx)(struct pmu_pg_cmd *pg, 883 u8 value); 884 struct { 885 int (*boardobjgrp_pmucmd_construct_impl) 886 (struct gk20a *g, 887 struct boardobjgrp *pboardobjgrp, 888 struct boardobjgrp_pmu_cmd *cmd, u8 id, u8 msgid, 889 u16 hdrsize, u16 entrysize, u16 fbsize, u32 ss_offset, 890 u8 rpc_func_id); 891 int (*boardobjgrp_pmuset_impl)(struct gk20a *g, 892 struct boardobjgrp *pboardobjgrp); 893 int (*boardobjgrp_pmugetstatus_impl)(struct gk20a *g, 894 struct boardobjgrp *pboardobjgrp, 895 struct boardobjgrpmask *mask); 896 int (*is_boardobjgrp_pmucmd_id_valid)(struct gk20a *g, 897 struct boardobjgrp *pboardobjgrp, 898 struct boardobjgrp_pmu_cmd *cmd); 899 } boardobj; 900 struct { 901 u32 (*volt_set_voltage)(struct gk20a *g, 902 u32 logic_voltage_uv, u32 sram_voltage_uv); 903 u32 (*volt_get_voltage)(struct gk20a *g, 904 u8 volt_domain, u32 *pvoltage_uv); 905 u32 (*volt_send_load_cmd_to_pmu)(struct gk20a *g); 906 } volt; 907 struct { 908 u32 (*get_vbios_clk_domain)(u32 vbios_domain); 909 u32 (*clk_avfs_get_vin_cal_data)(struct gk20a *g, 910 struct avfsvinobjs *pvinobjs, 911 struct vin_device_v20 *pvindev); 912 u32 (*clk_vf_change_inject_data_fill)(struct gk20a *g, 913 struct nv_pmu_clk_rpc *rpccall, 914 struct set_fll_clk *setfllclk); 915 u32 (*clk_set_boot_clk)(struct gk20a *g); 916 }clk; 917 } pmu_ver; 918 struct { 919 int (*get_netlist_name)(struct gk20a *g, int index, char *name); 920 bool (*is_fw_defined)(void); 921 } gr_ctx; 922#ifdef CONFIG_GK20A_CTXSW_TRACE 923 /* 924 * Currently only supported on Linux due to the extremely tight 925 * integration with Linux device driver structure (in particular 926 * mmap). 927 */ 928 struct { 929 int (*init)(struct gk20a *g); 930 int (*max_entries)(struct gk20a *, 931 struct nvgpu_gpu_ctxsw_trace_filter *filter); 932 int (*flush)(struct gk20a *g); 933 int (*poll)(struct gk20a *g); 934 int (*enable)(struct gk20a *g); 935 int (*disable)(struct gk20a *g); 936 bool (*is_enabled)(struct gk20a *g); 937 int (*reset)(struct gk20a *g); 938 int (*bind_channel)(struct gk20a *g, struct channel_gk20a *ch); 939 int (*unbind_channel)(struct gk20a *g, 940 struct channel_gk20a *ch); 941 int (*deinit)(struct gk20a *g); 942 int (*alloc_user_buffer)(struct gk20a *g, 943 void **buf, size_t *size); 944 int (*free_user_buffer)(struct gk20a *g); 945 int (*mmap_user_buffer)(struct gk20a *g, 946 struct vm_area_struct *vma); 947 int (*set_filter)(struct gk20a *g, 948 struct nvgpu_gpu_ctxsw_trace_filter *filter); 949 } fecs_trace; 950#endif 951 struct { 952 bool (*support_sparse)(struct gk20a *g); 953 u64 (*gmmu_map)(struct vm_gk20a *vm, 954 u64 map_offset, 955 struct nvgpu_sgt *sgt, 956 u64 buffer_offset, 957 u64 size, 958 u32 pgsz_idx, 959 u8 kind_v, 960 u32 ctag_offset, 961 u32 flags, 962 enum gk20a_mem_rw_flag rw_flag, 963 bool clear_ctags, 964 bool sparse, 965 bool priv, 966 struct vm_gk20a_mapping_batch *batch, 967 enum nvgpu_aperture aperture); 968 void (*gmmu_unmap)(struct vm_gk20a *vm, 969 u64 vaddr, 970 u64 size, 971 u32 pgsz_idx, 972 bool va_allocated, 973 enum gk20a_mem_rw_flag rw_flag, 974 bool sparse, 975 struct vm_gk20a_mapping_batch *batch); 976 int (*vm_bind_channel)(struct vm_gk20a *vm, 977 struct channel_gk20a *ch); 978 int (*fb_flush)(struct gk20a *g); 979 void (*l2_invalidate)(struct gk20a *g); 980 void (*l2_flush)(struct gk20a *g, bool invalidate); 981 void (*cbc_clean)(struct gk20a *g); 982 void (*set_big_page_size)(struct gk20a *g, 983 struct nvgpu_mem *mem, int size); 984 u32 (*get_big_page_sizes)(void); 985 u32 (*get_default_big_page_size)(void); 986 u32 (*get_iommu_bit)(struct gk20a *g); 987 int (*init_mm_setup_hw)(struct gk20a *g); 988 bool (*is_bar1_supported)(struct gk20a *g); 989 int (*init_bar2_vm)(struct gk20a *g); 990 void (*remove_bar2_vm)(struct gk20a *g); 991 const struct gk20a_mmu_level * 992 (*get_mmu_levels)(struct gk20a *g, u32 big_page_size); 993 void (*init_pdb)(struct gk20a *g, struct nvgpu_mem *inst_block, 994 struct vm_gk20a *vm); 995 u64 (*gpu_phys_addr)(struct gk20a *g, 996 struct nvgpu_gmmu_attrs *attrs, u64 phys); 997 int (*alloc_inst_block)(struct gk20a *g, 998 struct nvgpu_mem *inst_block); 999 void (*init_inst_block)(struct nvgpu_mem *inst_block, 1000 struct vm_gk20a *vm, u32 big_page_size); 1001 bool (*mmu_fault_pending)(struct gk20a *g); 1002 void (*fault_info_mem_destroy)(struct gk20a *g); 1003 void (*mmu_fault_disable_hw)(struct gk20a *g); 1004 u32 (*get_kind_invalid)(void); 1005 u32 (*get_kind_pitch)(void); 1006 u32 (*get_flush_retries)(struct gk20a *g, 1007 enum nvgpu_flush_op op); 1008 } mm; 1009 /* 1010 * This function is called to allocate secure memory (memory 1011 * that the CPU cannot see). The function should fill the 1012 * context buffer descriptor (especially fields destroy, sgt, 1013 * size). 1014 */ 1015 int (*secure_alloc)(struct gk20a *g, 1016 struct gr_ctx_buffer_desc *desc, 1017 size_t size); 1018 struct { 1019 void (*exit)(struct gk20a *g, struct nvgpu_mem *mem, 1020 struct nvgpu_sgl *sgl); 1021 u32 (*data032_r)(u32 i); 1022 } pramin; 1023 struct { 1024 int (*init_therm_setup_hw)(struct gk20a *g); 1025 void (*init_elcg_mode)(struct gk20a *g, u32 mode, u32 engine); 1026 void (*init_blcg_mode)(struct gk20a *g, u32 mode, u32 engine); 1027 int (*elcg_init_idle_filters)(struct gk20a *g); 1028#ifdef CONFIG_DEBUG_FS 1029 void (*therm_debugfs_init)(struct gk20a *g); 1030#endif 1031 int (*get_internal_sensor_curr_temp)(struct gk20a *g, u32 *temp_f24_8); 1032 void (*get_internal_sensor_limits)(s32 *max_24_8, 1033 s32 *min_24_8); 1034 u32 (*configure_therm_alert)(struct gk20a *g, s32 curr_warn_temp); 1035 } therm; 1036 struct { 1037 bool (*is_pmu_supported)(struct gk20a *g); 1038 int (*prepare_ucode)(struct gk20a *g); 1039 int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g); 1040 int (*pmu_nsbootstrap)(struct nvgpu_pmu *pmu); 1041 int (*pmu_init_perfmon)(struct nvgpu_pmu *pmu); 1042 int (*pmu_perfmon_start_sampling)(struct nvgpu_pmu *pmu); 1043 int (*pmu_perfmon_stop_sampling)(struct nvgpu_pmu *pmu); 1044 int (*pmu_perfmon_get_samples_rpc)(struct nvgpu_pmu *pmu); 1045 int (*pmu_setup_elpg)(struct gk20a *g); 1046 u32 (*pmu_get_queue_head)(u32 i); 1047 u32 (*pmu_get_queue_head_size)(void); 1048 u32 (*pmu_get_queue_tail_size)(void); 1049 u32 (*pmu_get_queue_tail)(u32 i); 1050 int (*pmu_queue_head)(struct gk20a *g, 1051 struct nvgpu_falcon_queue *queue, u32 *head, bool set); 1052 int (*pmu_queue_tail)(struct gk20a *g, 1053 struct nvgpu_falcon_queue *queue, u32 *tail, bool set); 1054 void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu, 1055 u32 *tail, bool set); 1056 u32 (*pmu_mutex_size)(void); 1057 int (*pmu_mutex_acquire)(struct nvgpu_pmu *pmu, 1058 u32 id, u32 *token); 1059 int (*pmu_mutex_release)(struct nvgpu_pmu *pmu, 1060 u32 id, u32 *token); 1061 bool (*pmu_is_interrupted)(struct nvgpu_pmu *pmu); 1062 void (*pmu_isr)(struct gk20a *g); 1063 void (*pmu_init_perfmon_counter)(struct gk20a *g); 1064 void (*pmu_pg_idle_counter_config)(struct gk20a *g, u32 pg_engine_id); 1065 u32 (*pmu_read_idle_counter)(struct gk20a *g, u32 counter_id); 1066 u32 (*pmu_read_idle_intr_status)(struct gk20a *g); 1067 void (*pmu_clear_idle_intr_status)(struct gk20a *g); 1068 void (*pmu_reset_idle_counter)(struct gk20a *g, u32 counter_id); 1069 void (*pmu_dump_elpg_stats)(struct nvgpu_pmu *pmu); 1070 void (*pmu_dump_falcon_stats)(struct nvgpu_pmu *pmu); 1071 void (*pmu_enable_irq)(struct nvgpu_pmu *pmu, bool enable); 1072 int (*init_wpr_region)(struct gk20a *g); 1073 int (*load_lsfalcon_ucode)(struct gk20a *g, u32 falconidmask); 1074 void (*write_dmatrfbase)(struct gk20a *g, u32 addr); 1075 void (*pmu_elpg_statistics)(struct gk20a *g, u32 pg_engine_id, 1076 struct pmu_pg_stats_data *pg_stat_data); 1077 int (*pmu_pg_init_param)(struct gk20a *g, u32 pg_engine_id); 1078 int (*pmu_pg_set_sub_feature_mask)(struct gk20a *g, 1079 u32 pg_engine_id); 1080 u32 (*pmu_pg_supported_engines_list)(struct gk20a *g); 1081 u32 (*pmu_pg_engines_feature_list)(struct gk20a *g, 1082 u32 pg_engine_id); 1083 int (*pmu_process_pg_event)(struct gk20a *g, void *pmumsg); 1084 bool (*pmu_is_lpwr_feature_supported)(struct gk20a *g, 1085 u32 feature_id); 1086 int (*pmu_lpwr_enable_pg)(struct gk20a *g, bool pstate_lock); 1087 int (*pmu_lpwr_disable_pg)(struct gk20a *g, bool pstate_lock); 1088 u32 (*pmu_pg_param_post_init)(struct gk20a *g); 1089 void (*dump_secure_fuses)(struct gk20a *g); 1090 int (*reset_engine)(struct gk20a *g, bool do_reset); 1091 bool (*is_engine_in_reset)(struct gk20a *g); 1092 bool (*is_lazy_bootstrap)(u32 falcon_id); 1093 bool (*is_priv_load)(u32 falcon_id); 1094 int (*pmu_populate_loader_cfg)(struct gk20a *g, 1095 void *lsfm, u32 *p_bl_gen_desc_size); 1096 int (*flcn_populate_bl_dmem_desc)(struct gk20a *g, 1097 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid); 1098 void (*handle_ext_irq)(struct gk20a *g, u32 intr); 1099 void (*set_irqmask)(struct gk20a *g); 1100 void (*update_lspmu_cmdline_args)(struct gk20a *g); 1101 void (*setup_apertures)(struct gk20a *g); 1102 u32 (*get_irqdest)(struct gk20a *g); 1103 int (*alloc_super_surface)(struct gk20a *g, 1104 struct nvgpu_mem *super_surface, u32 size); 1105 bool (*is_debug_mode_enabled)(struct gk20a *g); 1106 void (*secured_pmu_start)(struct gk20a *g); 1107 } pmu; 1108 struct { 1109 int (*init_debugfs)(struct gk20a *g); 1110 void (*disable_slowboot)(struct gk20a *g); 1111 int (*init_clk_support)(struct gk20a *g); 1112 int (*suspend_clk_support)(struct gk20a *g); 1113 u32 (*get_crystal_clk_hz)(struct gk20a *g); 1114 int (*clk_domain_get_f_points)(struct gk20a *g, 1115 u32 clkapidomain, u32 *pfpointscount, 1116 u16 *pfreqpointsinmhz); 1117 int (*clk_get_round_rate)(struct gk20a *g, u32 api_domain, 1118 unsigned long rate_target, unsigned long *rounded_rate); 1119 int (*get_clk_range)(struct gk20a *g, u32 api_domain, 1120 u16 *min_mhz, u16 *max_mhz); 1121 unsigned long (*measure_freq)(struct gk20a *g, u32 api_domain); 1122 u32 (*get_rate_cntr)(struct gk20a *g, struct namemap_cfg *c); 1123 unsigned long (*get_rate)(struct gk20a *g, u32 api_domain); 1124 int (*set_rate)(struct gk20a *g, u32 api_domain, unsigned long rate); 1125 unsigned long (*get_fmax_at_vmin_safe)(struct gk20a *g); 1126 u32 (*get_ref_clock_rate)(struct gk20a *g); 1127 int (*predict_mv_at_hz_cur_tfloor)(struct clk_gk20a *clk, 1128 unsigned long rate); 1129 unsigned long (*get_maxrate)(struct gk20a *g, u32 api_domain); 1130 int (*prepare_enable)(struct clk_gk20a *clk); 1131 void (*disable_unprepare)(struct clk_gk20a *clk); 1132 int (*get_voltage)(struct clk_gk20a *clk, u64 *val); 1133 int (*get_gpcclk_clock_counter)(struct clk_gk20a *clk, u64 *val); 1134 int (*pll_reg_write)(struct gk20a *g, u32 reg, u32 val); 1135 int (*get_pll_debug_data)(struct gk20a *g, 1136 struct nvgpu_clk_pll_debug_data *d); 1137 int (*mclk_init)(struct gk20a *g); 1138 void (*mclk_deinit)(struct gk20a *g); 1139 int (*mclk_change)(struct gk20a *g, u16 val); 1140 bool split_rail_support; 1141 bool support_clk_freq_controller; 1142 bool support_pmgr_domain; 1143 bool support_lpwr_pg; 1144 u32 (*perf_pmu_vfe_load)(struct gk20a *g); 1145 u32 lut_num_entries; 1146 } clk; 1147 struct { 1148 int (*arbiter_clk_init)(struct gk20a *g); 1149 u32 (*get_arbiter_clk_domains)(struct gk20a *g); 1150 int (*get_arbiter_f_points)(struct gk20a *g,u32 api_domain, 1151 u32 *num_points, u16 *freqs_in_mhz); 1152 int (*get_arbiter_clk_range)(struct gk20a *g, u32 api_domain, 1153 u16 *min_mhz, u16 *max_mhz); 1154 int (*get_arbiter_clk_default)(struct gk20a *g, u32 api_domain, 1155 u16 *default_mhz); 1156 void (*clk_arb_run_arbiter_cb)(struct nvgpu_clk_arb *arb); 1157 /* This function is inherently unsafe to call while 1158 * arbiter is running arbiter must be blocked 1159 * before calling this function */ 1160 int (*get_current_pstate)(struct gk20a *g); 1161 void (*clk_arb_cleanup)(struct nvgpu_clk_arb *arb); 1162 } clk_arb; 1163 struct { 1164 int (*handle_pmu_perf_event)(struct gk20a *g, void *pmu_msg); 1165 } pmu_perf; 1166 struct { 1167 int (*exec_regops)(struct dbg_session_gk20a *dbg_s, 1168 struct nvgpu_dbg_reg_op *ops, 1169 u64 num_ops, 1170 bool *is_current_ctx); 1171 const struct regop_offset_range* ( 1172 *get_global_whitelist_ranges)(void); 1173 u64 (*get_global_whitelist_ranges_count)(void); 1174 const struct regop_offset_range* ( 1175 *get_context_whitelist_ranges)(void); 1176 u64 (*get_context_whitelist_ranges_count)(void); 1177 const u32* (*get_runcontrol_whitelist)(void); 1178 u64 (*get_runcontrol_whitelist_count)(void); 1179 const u32* (*get_qctl_whitelist)(void); 1180 u64 (*get_qctl_whitelist_count)(void); 1181 } regops; 1182 struct { 1183 void (*intr_mask)(struct gk20a *g); 1184 void (*intr_enable)(struct gk20a *g); 1185 void (*intr_unit_config)(struct gk20a *g, 1186 bool enable, bool is_stalling, u32 mask); 1187 void (*isr_stall)(struct gk20a *g); 1188 bool (*is_intr_hub_pending)(struct gk20a *g, u32 mc_intr); 1189 bool (*is_intr_nvlink_pending)(struct gk20a *g, u32 mc_intr); 1190 bool (*is_stall_and_eng_intr_pending)(struct gk20a *g, 1191 u32 act_eng_id, u32 *eng_intr_pending); 1192 u32 (*intr_stall)(struct gk20a *g); 1193 void (*intr_stall_pause)(struct gk20a *g); 1194 void (*intr_stall_resume)(struct gk20a *g); 1195 u32 (*intr_nonstall)(struct gk20a *g); 1196 void (*intr_nonstall_pause)(struct gk20a *g); 1197 void (*intr_nonstall_resume)(struct gk20a *g); 1198 u32 (*isr_nonstall)(struct gk20a *g); 1199 void (*enable)(struct gk20a *g, u32 units); 1200 void (*disable)(struct gk20a *g, u32 units); 1201 void (*reset)(struct gk20a *g, u32 units); 1202 bool (*is_enabled)(struct gk20a *g, enum nvgpu_unit unit); 1203 bool (*is_intr1_pending)(struct gk20a *g, enum nvgpu_unit unit, u32 mc_intr_1); 1204 void (*log_pending_intrs)(struct gk20a *g); 1205 void (*fbpa_isr)(struct gk20a *g); 1206 u32 (*reset_mask)(struct gk20a *g, enum nvgpu_unit unit); 1207 void (*fb_reset)(struct gk20a *g); 1208 } mc; 1209 struct { 1210 void (*show_dump)(struct gk20a *g, 1211 struct gk20a_debug_output *o); 1212 } debug; 1213 struct { 1214 int (*dbg_set_powergate)(struct dbg_session_gk20a *dbg_s, 1215 bool disable_powergate); 1216 bool (*check_and_set_global_reservation)( 1217 struct dbg_session_gk20a *dbg_s, 1218 struct dbg_profiler_object_data *prof_obj); 1219 bool (*check_and_set_context_reservation)( 1220 struct dbg_session_gk20a *dbg_s, 1221 struct dbg_profiler_object_data *prof_obj); 1222 void (*release_profiler_reservation)( 1223 struct dbg_session_gk20a *dbg_s, 1224 struct dbg_profiler_object_data *prof_obj); 1225 int (*perfbuffer_enable)(struct gk20a *g, u64 offset, u32 size); 1226 int (*perfbuffer_disable)(struct gk20a *g); 1227 } dbg_session_ops; 1228 1229 u32 (*get_litter_value)(struct gk20a *g, int value); 1230 int (*chip_init_gpu_characteristics)(struct gk20a *g); 1231 1232 struct { 1233 void (*init_hw)(struct gk20a *g); 1234 void (*isr)(struct gk20a *g); 1235 int (*bar1_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst); 1236 int (*bar2_bind)(struct gk20a *g, struct nvgpu_mem *bar1_inst); 1237 u32 (*set_bar0_window)(struct gk20a *g, struct nvgpu_mem *mem, 1238 struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, 1239 u32 w); 1240 u32 (*read_sw_scratch)(struct gk20a *g, u32 index); 1241 void (*write_sw_scratch)(struct gk20a *g, u32 index, u32 val); 1242 } bus; 1243 1244 struct { 1245 void (*isr)(struct gk20a *g); 1246 int (*read_ptimer)(struct gk20a *g, u64 *value); 1247 int (*get_timestamps_zipper)(struct gk20a *g, 1248 u32 source_id, u32 count, 1249 struct nvgpu_cpu_time_correlation_sample *); 1250 } ptimer; 1251 1252 struct { 1253 int (*init)(struct gk20a *g); 1254 int (*preos_wait_for_halt)(struct gk20a *g); 1255 void (*preos_reload_check)(struct gk20a *g); 1256 int (*devinit)(struct gk20a *g); 1257 int (*preos)(struct gk20a *g); 1258 int (*verify_devinit)(struct gk20a *g); 1259 } bios; 1260 1261#if defined(CONFIG_GK20A_CYCLE_STATS) 1262 struct { 1263 int (*enable_snapshot)(struct channel_gk20a *ch, 1264 struct gk20a_cs_snapshot_client *client); 1265 void (*disable_snapshot)(struct gr_gk20a *gr); 1266 int (*check_data_available)(struct channel_gk20a *ch, 1267 u32 *pending, 1268 bool *hw_overflow); 1269 void (*set_handled_snapshots)(struct gk20a *g, u32 num); 1270 u32 (*allocate_perfmon_ids)(struct gk20a_cs_snapshot *data, 1271 u32 count); 1272 u32 (*release_perfmon_ids)(struct gk20a_cs_snapshot *data, 1273 u32 start, 1274 u32 count); 1275 int (*detach_snapshot)(struct channel_gk20a *ch, 1276 struct gk20a_cs_snapshot_client *client); 1277 bool (*get_overflow_status)(struct gk20a *g); 1278 u32 (*get_pending_snapshots)(struct gk20a *g); 1279 } css; 1280#endif 1281 struct { 1282 int (*get_speed)(struct gk20a *g, u32 *xve_link_speed); 1283 int (*set_speed)(struct gk20a *g, u32 xve_link_speed); 1284 void (*available_speeds)(struct gk20a *g, u32 *speed_mask); 1285 u32 (*xve_readl)(struct gk20a *g, u32 reg); 1286 void (*xve_writel)(struct gk20a *g, u32 reg, u32 val); 1287 void (*disable_aspm)(struct gk20a *g); 1288 void (*reset_gpu)(struct gk20a *g); 1289#if defined(CONFIG_PCI_MSI) 1290 void (*rearm_msi)(struct gk20a *g); 1291#endif 1292 void (*enable_shadow_rom)(struct gk20a *g); 1293 void (*disable_shadow_rom)(struct gk20a *g); 1294 u32 (*get_link_control_status)(struct gk20a *g); 1295 } xve; 1296 struct { 1297 int (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn); 1298 } falcon; 1299 struct { 1300 void (*enable_priv_ring)(struct gk20a *g); 1301 void (*isr)(struct gk20a *g); 1302 void (*decode_error_code)(struct gk20a *g, u32 error_code); 1303 void (*set_ppriv_timeout_settings)(struct gk20a *g); 1304 u32 (*enum_ltc)(struct gk20a *g); 1305 } priv_ring; 1306 struct { 1307 int (*check_priv_security)(struct gk20a *g); 1308 bool (*is_opt_ecc_enable)(struct gk20a *g); 1309 bool (*is_opt_feature_override_disable)(struct gk20a *g); 1310 u32 (*fuse_status_opt_fbio)(struct gk20a *g); 1311 u32 (*fuse_status_opt_fbp)(struct gk20a *g); 1312 u32 (*fuse_status_opt_rop_l2_fbp)(struct gk20a *g, u32 fbp); 1313 u32 (*fuse_status_opt_gpc)(struct gk20a *g); 1314 u32 (*fuse_status_opt_tpc_gpc)(struct gk20a *g, u32 gpc); 1315 void (*fuse_ctrl_opt_tpc_gpc)(struct gk20a *g, u32 gpc, u32 val); 1316 u32 (*fuse_opt_sec_debug_en)(struct gk20a *g); 1317 u32 (*fuse_opt_priv_sec_en)(struct gk20a *g); 1318 u32 (*read_vin_cal_fuse_rev)(struct gk20a *g); 1319 u32 (*read_vin_cal_slope_intercept_fuse)(struct gk20a *g, 1320 u32 vin_id, u32 *slope, 1321 u32 *intercept); 1322 u32 (*read_vin_cal_gain_offset_fuse)(struct gk20a *g, 1323 u32 vin_id, s8 *gain, 1324 s8 *offset); 1325 } fuse; 1326 struct { 1327 int (*init)(struct gk20a *g); 1328 int (*discover_ioctrl)(struct gk20a *g); 1329 int (*discover_link)(struct gk20a *g); 1330 int (*isr)(struct gk20a *g); 1331 int (*rxdet)(struct gk20a *g, u32 link_id); 1332 int (*setup_pll)(struct gk20a *g, unsigned long link_mask); 1333 int (*minion_data_ready_en)(struct gk20a *g, 1334 unsigned long link_mask, bool sync); 1335 void (*get_connected_link_mask)(u32 *link_mask); 1336 void (*set_sw_war)(struct gk20a *g, u32 link_id); 1337 /* API */ 1338 int (*link_early_init)(struct gk20a *g, unsigned long mask); 1339 u32 (*link_get_mode)(struct gk20a *g, u32 link_id); 1340 u32 (*link_get_state)(struct gk20a *g, u32 link_id); 1341 int (*link_set_mode)(struct gk20a *g, u32 link_id, u32 mode); 1342 u32 (*get_sublink_mode)(struct gk20a *g, u32 link_id, 1343 bool is_rx_sublink); 1344 u32 (*get_rx_sublink_state)(struct gk20a *g, u32 link_id); 1345 u32 (*get_tx_sublink_state)(struct gk20a *g, u32 link_id); 1346 int (*set_sublink_mode)(struct gk20a *g, u32 link_id, 1347 bool is_rx_sublink, u32 mode); 1348 int (*interface_init)(struct gk20a *g); 1349 int (*interface_disable)(struct gk20a *g); 1350 int (*reg_init)(struct gk20a *g); 1351 int (*shutdown)(struct gk20a *g); 1352 int (*early_init)(struct gk20a *g); 1353 } nvlink; 1354 struct { 1355 u32 (*get_nvhsclk_ctrl_e_clk_nvl)(struct gk20a *g); 1356 void (*set_nvhsclk_ctrl_e_clk_nvl)(struct gk20a *g, u32 val); 1357 u32 (*get_nvhsclk_ctrl_swap_clk_nvl)(struct gk20a *g); 1358 void (*set_nvhsclk_ctrl_swap_clk_nvl)(struct gk20a *g, u32 val); 1359 } top; 1360 struct { 1361 void (*acr_sw_init)(struct gk20a *g, struct nvgpu_acr *acr); 1362 } acr; 1363 struct { 1364 int (*tpc_powergate)(struct gk20a *g, u32 fuse_status); 1365 } tpc; 1366 void (*semaphore_wakeup)(struct gk20a *g, bool post_events); 1367}; 1368 1369struct nvgpu_bios_ucode { 1370 u8 *bootloader; 1371 u32 bootloader_phys_base; 1372 u32 bootloader_size; 1373 u8 *ucode; 1374 u32 phys_base; 1375 u32 size; 1376 u8 *dmem; 1377 u32 dmem_phys_base; 1378 u32 dmem_size; 1379 u32 code_entry_point; 1380}; 1381 1382struct nvgpu_bios { 1383 u32 vbios_version; 1384 u8 vbios_oem_version; 1385 1386 u8 *data; 1387 size_t size; 1388 1389 struct nvgpu_bios_ucode devinit; 1390 struct nvgpu_bios_ucode preos; 1391 1392 u8 *devinit_tables; 1393 u32 devinit_tables_size; 1394 u8 *bootscripts; 1395 u32 bootscripts_size; 1396 1397 u8 mem_strap_data_count; 1398 u16 mem_strap_xlat_tbl_ptr; 1399 1400 u32 condition_table_ptr; 1401 1402 u32 devinit_tables_phys_base; 1403 u32 devinit_script_phys_base; 1404 1405 struct bit_token *perf_token; 1406 struct bit_token *clock_token; 1407 struct bit_token *virt_token; 1408 u32 expansion_rom_offset; 1409 1410 u32 nvlink_config_data_offset; 1411}; 1412 1413struct nvgpu_gpu_params { 1414 /* GPU architecture ID */ 1415 u32 gpu_arch; 1416 /* GPU implementation ID */ 1417 u32 gpu_impl; 1418 /* GPU revision ID */ 1419 u32 gpu_rev; 1420 /* sm version */ 1421 u32 sm_arch_sm_version; 1422 /* sm instruction set */ 1423 u32 sm_arch_spa_version; 1424 u32 sm_arch_warp_count; 1425}; 1426 1427struct gk20a { 1428 void (*free)(struct gk20a *g); 1429 struct nvgpu_nvhost_dev *nvhost_dev; 1430 1431 /* 1432 * Used by <nvgpu/enabled.h>. Do not access directly! 1433 */ 1434 unsigned long *enabled_flags; 1435 1436#ifdef __KERNEL__ 1437 struct notifier_block nvgpu_reboot_nb; 1438#endif 1439 1440 nvgpu_atomic_t usage_count; 1441 1442 struct nvgpu_mutex ctxsw_disable_lock; 1443 int ctxsw_disable_count; 1444 1445 struct nvgpu_ref refcount; 1446 1447 const char *name; 1448 1449 bool gpu_reset_done; 1450 bool power_on; 1451 bool suspended; 1452 bool sw_ready; 1453 1454 u64 log_mask; 1455 u32 log_trace; 1456 1457 struct nvgpu_mutex tpc_pg_lock; 1458 1459 struct nvgpu_gpu_params params; 1460 1461 /* 1462 * Guards access to hardware when usual gk20a_{busy,idle} are skipped 1463 * for submits and held for channel lifetime but dropped for an ongoing 1464 * gk20a_do_idle(). 1465 */ 1466 struct nvgpu_rwsem deterministic_busy; 1467 1468 struct nvgpu_falcon pmu_flcn; 1469 struct nvgpu_falcon sec2_flcn; 1470 struct nvgpu_falcon fecs_flcn; 1471 struct nvgpu_falcon gpccs_flcn; 1472 struct nvgpu_falcon nvdec_flcn; 1473 struct nvgpu_falcon minion_flcn; 1474 struct nvgpu_falcon gsp_flcn; 1475 struct clk_gk20a clk; 1476 struct fifo_gk20a fifo; 1477 struct nvgpu_nvlink_dev nvlink; 1478 struct gr_gk20a gr; 1479 struct sim_nvgpu *sim; 1480 struct mm_gk20a mm; 1481 struct nvgpu_pmu pmu; 1482 struct nvgpu_acr acr; 1483 struct nvgpu_ecc ecc; 1484 struct clk_pmupstate clk_pmu; 1485 struct perf_pmupstate perf_pmu; 1486 struct pmgr_pmupstate pmgr_pmu; 1487 struct therm_pmupstate therm_pmu; 1488 struct nvgpu_sec2 sec2; 1489 struct nvgpu_sched_ctrl sched_ctrl; 1490 1491#ifdef CONFIG_DEBUG_FS 1492 struct railgate_stats pstats; 1493#endif 1494 u32 gr_idle_timeout_default; 1495 bool timeouts_disabled_by_user; 1496 unsigned int ch_wdt_timeout_ms; 1497 u32 fifo_eng_timeout_us; 1498 1499 struct nvgpu_mutex power_lock; 1500 1501 /* Channel priorities */ 1502 u32 timeslice_low_priority_us; 1503 u32 timeslice_medium_priority_us; 1504 u32 timeslice_high_priority_us; 1505 u32 min_timeslice_us; 1506 u32 max_timeslice_us; 1507 bool runlist_interleave; 1508 1509 struct nvgpu_mutex cg_pg_lock; 1510 bool slcg_enabled; 1511 bool blcg_enabled; 1512 bool elcg_enabled; 1513 bool elpg_enabled; 1514 bool aelpg_enabled; 1515 bool can_elpg; 1516 bool mscg_enabled; 1517 bool forced_idle; 1518 bool forced_reset; 1519 bool allow_all; 1520 1521 u32 ptimer_src_freq; 1522 1523 int railgate_delay; 1524 u8 ldiv_slowdown_factor; 1525 unsigned int aggressive_sync_destroy_thresh; 1526 bool aggressive_sync_destroy; 1527 1528 /* Debugfs knob for forcing syncpt support off in runtime. */ 1529 u32 disable_syncpoints; 1530 1531 bool support_pmu; 1532 1533 bool is_virtual; 1534 1535 bool has_cde; 1536 1537 u32 emc3d_ratio; 1538 1539 struct nvgpu_spinlock ltc_enabled_lock; 1540 1541 struct gk20a_ctxsw_ucode_info ctxsw_ucode_info; 1542 1543 /* 1544 * A group of semaphore pools. One for each channel. 1545 */ 1546 struct nvgpu_semaphore_sea *sema_sea; 1547 1548 /* held while manipulating # of debug/profiler sessions present */ 1549 /* also prevents debug sessions from attaching until released */ 1550 struct nvgpu_mutex dbg_sessions_lock; 1551 int dbg_powergating_disabled_refcount; /*refcount for pg disable */ 1552 /*refcount for timeout disable */ 1553 nvgpu_atomic_t timeouts_disabled_refcount; 1554 1555 /* must have dbg_sessions_lock before use */ 1556 struct nvgpu_dbg_reg_op *dbg_regops_tmp_buf; 1557 u32 dbg_regops_tmp_buf_ops; 1558 1559 /* For perfbuf mapping */ 1560 struct { 1561 struct dbg_session_gk20a *owner; 1562 u64 offset; 1563 } perfbuf; 1564 1565 /* For profiler reservations */ 1566 struct nvgpu_list_node profiler_objects; 1567 bool global_profiler_reservation_held; 1568 int profiler_reservation_count; 1569 1570 void (*remove_support)(struct gk20a *); 1571 1572 u64 pg_ingating_time_us; 1573 u64 pg_ungating_time_us; 1574 u32 pg_gating_cnt; 1575 1576 struct nvgpu_spinlock mc_enable_lock; 1577 1578 struct gk20a_as as; 1579 1580 struct nvgpu_mutex client_lock; 1581 int client_refcount; /* open channels and ctrl nodes */ 1582 1583 struct gpu_ops ops; 1584 u32 mc_intr_mask_restore[4]; 1585 /*used for change of enum zbc update cmd id from ver 0 to ver1*/ 1586 u32 pmu_ver_cmd_id_zbc_table_update; 1587 u32 pmu_lsf_pmu_wpr_init_done; 1588 u32 pmu_lsf_loaded_falcon_id; 1589 1590 int irqs_enabled; 1591 int irq_stall; /* can be same as irq_nonstall in case of PCI */ 1592 int irq_nonstall; 1593 u32 max_ltc_count; 1594 u32 ltc_count; 1595 u32 ltc_streamid; 1596 1597 struct gk20a_worker { 1598 struct nvgpu_thread poll_task; 1599 nvgpu_atomic_t put; 1600 struct nvgpu_cond wq; 1601 struct nvgpu_list_node items; 1602 struct nvgpu_spinlock items_lock; 1603 struct nvgpu_mutex start_lock; 1604 } channel_worker, clk_arb_worker; 1605 1606 struct { 1607 void (*open)(struct channel_gk20a *ch); 1608 void (*close)(struct channel_gk20a *ch); 1609 void (*work_completion_signal)(struct channel_gk20a *ch); 1610 void (*work_completion_cancel_sync)(struct channel_gk20a *ch); 1611 bool (*os_fence_framework_inst_exists)(struct channel_gk20a *ch); 1612 int (*init_os_fence_framework)( 1613 struct channel_gk20a *ch, const char *fmt, ...); 1614 void (*signal_os_fence_framework)(struct channel_gk20a *ch); 1615 void (*destroy_os_fence_framework)(struct channel_gk20a *ch); 1616 int (*copy_user_gpfifo)(struct nvgpu_gpfifo_entry *dest, 1617 struct nvgpu_gpfifo_userdata userdata, 1618 u32 start, u32 length); 1619 int (*alloc_usermode_buffers)(struct channel_gk20a *c, 1620 struct nvgpu_setup_bind_args *args); 1621 void (*free_usermode_buffers)(struct channel_gk20a *c); 1622 } os_channel; 1623 1624 struct gk20a_scale_profile *scale_profile; 1625 unsigned long last_freq; 1626 1627 struct gk20a_ctxsw_trace *ctxsw_trace; 1628 struct gk20a_fecs_trace *fecs_trace; 1629 1630 bool mmu_debug_ctrl; 1631 u32 mmu_debug_mode_refcnt; 1632 1633 u32 tpc_fs_mask_user; 1634 1635 u32 tpc_pg_mask; 1636 u32 tpc_count; 1637 bool can_tpc_powergate; 1638 1639 u32 valid_tpc_mask[MAX_TPC_PG_CONFIGS]; 1640 1641 struct nvgpu_bios bios; 1642 bool bios_is_init; 1643 1644 struct nvgpu_clk_arb *clk_arb; 1645 1646 struct nvgpu_mutex clk_arb_enable_lock; 1647 1648 nvgpu_atomic_t clk_arb_global_nr; 1649 1650 struct gk20a_ce_app ce_app; 1651 1652 bool ltc_intr_en_illegal_compstat; 1653 1654 /* PCI device identifier */ 1655 u16 pci_vendor_id, pci_device_id; 1656 u16 pci_subsystem_vendor_id, pci_subsystem_device_id; 1657 u16 pci_class; 1658 u8 pci_revision; 1659 1660 /* 1661 * PCI power management: i2c device index, port and address for 1662 * INA3221. 1663 */ 1664 u32 ina3221_dcb_index; 1665 u32 ina3221_i2c_address; 1666 u32 ina3221_i2c_port; 1667 bool hardcode_sw_threshold; 1668 1669 /* PCIe power states. */ 1670 bool xve_l0s; 1671 bool xve_l1; 1672 1673 /* Current warning temp in sfxp24.8 */ 1674 s32 curr_warn_temp; 1675 1676#if defined(CONFIG_PCI_MSI) 1677 /* Check if msi is enabled */ 1678 bool msi_enabled; 1679#endif 1680#ifdef CONFIG_NVGPU_TRACK_MEM_USAGE 1681 struct nvgpu_mem_alloc_tracker *vmallocs; 1682 struct nvgpu_mem_alloc_tracker *kmallocs; 1683#endif 1684 1685 /* The minimum VBIOS version supported */ 1686 u32 vbios_min_version; 1687 1688 /* memory training sequence and mclk switch scripts */ 1689 u32 mem_config_idx; 1690 1691 u64 dma_memory_used; 1692 1693#if defined(CONFIG_TEGRA_GK20A_NVHOST) 1694 u64 syncpt_unit_base; 1695 size_t syncpt_unit_size; 1696 u32 syncpt_size; 1697#endif 1698 struct nvgpu_mem syncpt_mem; 1699 1700 struct nvgpu_list_node boardobj_head; 1701 struct nvgpu_list_node boardobjgrp_head; 1702 1703 struct nvgpu_mem pdb_cache_war_mem; 1704}; 1705 1706static inline bool nvgpu_is_timeouts_enabled(struct gk20a *g) 1707{ 1708 return nvgpu_atomic_read(&g->timeouts_disabled_refcount) == 0; 1709} 1710 1711static inline u32 gk20a_get_gr_idle_timeout(struct gk20a *g) 1712{ 1713 return nvgpu_is_timeouts_enabled(g) ? 1714 g->gr_idle_timeout_default : UINT_MAX; 1715} 1716 1717#define MULTICHAR_TAG(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) 1718enum BAR0_DEBUG_OPERATION { 1719 BARO_ZERO_NOP = 0, 1720 OP_END = MULTICHAR_TAG('D', 'O', 'N', 'E'), 1721 BAR0_READ32 = MULTICHAR_TAG('0', 'R', '3', '2'), 1722 BAR0_WRITE32 = MULTICHAR_TAG('0', 'W', '3', '2'), 1723}; 1724 1725struct share_buffer_head { 1726 enum BAR0_DEBUG_OPERATION operation; 1727/* size of the operation item */ 1728 u32 size; 1729 u32 completed; 1730 u32 failed; 1731 u64 context; 1732 u64 completion_callback; 1733}; 1734 1735struct gk20a_cyclestate_buffer_elem { 1736 struct share_buffer_head head; 1737/* in */ 1738 u64 p_data; 1739 u64 p_done; 1740 u32 offset_bar0; 1741 u16 first_bit; 1742 u16 last_bit; 1743/* out */ 1744/* keep 64 bits to be consistent */ 1745 u64 data; 1746}; 1747 1748/* operations that will need to be executed on non stall workqueue */ 1749#define GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE BIT32(0) 1750#define GK20A_NONSTALL_OPS_POST_EVENTS BIT32(1) 1751 1752/* register accessors */ 1753void __nvgpu_check_gpu_state(struct gk20a *g); 1754void __gk20a_warn_on_no_regs(void); 1755 1756/* classes that the device supports */ 1757/* TBD: get these from an open-sourced SDK? */ 1758enum { 1759 FERMI_TWOD_A = 0x902D, 1760 KEPLER_INLINE_TO_MEMORY_A = 0xA040, 1761 KEPLER_DMA_COPY_A = 0xA0B5, 1762}; 1763 1764#define GK20A_BAR0_IORESOURCE_MEM 0 1765#define GK20A_BAR1_IORESOURCE_MEM 1 1766#define GK20A_SIM_IORESOURCE_MEM 2 1767 1768void gk20a_busy_noresume(struct gk20a *g); 1769void gk20a_idle_nosuspend(struct gk20a *g); 1770int __must_check gk20a_busy(struct gk20a *g); 1771void gk20a_idle(struct gk20a *g); 1772int __gk20a_do_idle(struct gk20a *g, bool force_reset); 1773int __gk20a_do_unidle(struct gk20a *g); 1774 1775int gk20a_wait_for_idle(struct gk20a *g); 1776 1777#define NVGPU_GPU_ARCHITECTURE_SHIFT 4 1778 1779/* constructs unique and compact GPUID from nvgpu_gpu_characteristics 1780 * arch/impl fields */ 1781#define GK20A_GPUID(arch, impl) ((u32) ((arch) | (impl))) 1782 1783#define GK20A_GPUID_GK20A 0x000000EA 1784#define GK20A_GPUID_GM20B 0x0000012B 1785#define GK20A_GPUID_GM20B_B 0x0000012E 1786#define NVGPU_GPUID_GP10B 0x0000013B 1787#define NVGPU_GPUID_GP104 0x00000134 1788#define NVGPU_GPUID_GP106 0x00000136 1789#define NVGPU_GPUID_GV11B 0x0000015B 1790#define NVGPU_GPUID_GV100 0x00000140 1791 1792int gk20a_init_gpu_characteristics(struct gk20a *g); 1793 1794bool gk20a_check_poweron(struct gk20a *g); 1795int gk20a_prepare_poweroff(struct gk20a *g); 1796int gk20a_finalize_poweron(struct gk20a *g); 1797 1798int nvgpu_wait_for_stall_interrupts(struct gk20a *g, u32 timeout); 1799int nvgpu_wait_for_nonstall_interrupts(struct gk20a *g, u32 timeout); 1800void nvgpu_wait_for_deferred_interrupts(struct gk20a *g); 1801 1802struct gk20a * __must_check gk20a_get(struct gk20a *g); 1803void gk20a_put(struct gk20a *g); 1804 1805bool nvgpu_has_syncpoints(struct gk20a *g); 1806 1807#endif /* GK20A_H */
diff --git a/include/nvgpu/gmmu.h b/include/nvgpu/gmmu.h
deleted file mode 100644
index 2fc0d44..0000000
--- a/include/nvgpu/gmmu.h
+++ /dev/null
@@ -1,369 +0,0 @@ 1/* 2 * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef NVGPU_GMMU_H 24#define NVGPU_GMMU_H 25 26#include <nvgpu/types.h> 27#include <nvgpu/nvgpu_mem.h> 28#include <nvgpu/list.h> 29#include <nvgpu/rbtree.h> 30#include <nvgpu/lock.h> 31#include <nvgpu/bitops.h> 32#include <nvgpu/mm.h> 33 34/* 35 * This is the GMMU API visible to blocks outside of the GMMU. Basically this 36 * API supports all the different types of mappings that might be done in the 37 * GMMU. 38 */ 39 40struct vm_gk20a; 41struct nvgpu_mem; 42 43#define GMMU_PAGE_SIZE_SMALL 0U 44#define GMMU_PAGE_SIZE_BIG 1U 45#define GMMU_PAGE_SIZE_KERNEL 2U 46#define GMMU_NR_PAGE_SIZES 3U 47 48enum gk20a_mem_rw_flag { 49 gk20a_mem_flag_none = 0, /* RW */ 50 gk20a_mem_flag_read_only = 1, /* RO */ 51 gk20a_mem_flag_write_only = 2, /* WO */ 52}; 53 54/* 55 * Minimum size of a cache. The number of different caches in the nvgpu_pd_cache 56 * structure is of course depending on this. The MIN_SHIFT define is the right 57 * number of bits to shift to determine which list to use in the array of lists. 58 * 59 * For Linux, limit the use of the cache to entries less than the page size, to 60 * avoid potential problems with running out of CMA memory when allocating large, 61 * contiguous slabs, as would be required for non-iommmuable chips. 62 */ 63#define NVGPU_PD_CACHE_MIN 256U 64#define NVGPU_PD_CACHE_MIN_SHIFT 9U 65 66#ifdef __KERNEL__ 67 68#if PAGE_SIZE == 4096 69#define NVGPU_PD_CACHE_COUNT 4U 70#elif PAGE_SIZE == 65536 71#define NVGPU_PD_CACHE_COUNT 8U 72#else 73#error "Unsupported page size." 74#endif 75 76#else 77#define NVGPU_PD_CACHE_COUNT 8U 78#endif 79 80#define NVGPU_PD_CACHE_SIZE (NVGPU_PD_CACHE_MIN * (1U << NVGPU_PD_CACHE_COUNT)) 81 82struct nvgpu_pd_mem_entry { 83 struct nvgpu_mem mem; 84 85 /* 86 * Size of the page directories (not the mem). alloc_map is a bitmap 87 * showing which PDs have been allocated. 88 * 89 * The size of mem will be NVGPU_PD_CACHE_SIZE 90 * and pd_size will always be a power of 2. 91 * 92 */ 93 u32 pd_size; 94 DECLARE_BITMAP(alloc_map, NVGPU_PD_CACHE_SIZE / NVGPU_PD_CACHE_MIN); 95 96 /* Total number of allocations in this PD. */ 97 u32 allocs; 98 99 struct nvgpu_list_node list_entry; 100 struct nvgpu_rbtree_node tree_entry; 101}; 102 103static inline struct nvgpu_pd_mem_entry * 104nvgpu_pd_mem_entry_from_list_entry(struct nvgpu_list_node *node) 105{ 106 return (struct nvgpu_pd_mem_entry *) 107 ((uintptr_t)node - 108 offsetof(struct nvgpu_pd_mem_entry, list_entry)); 109}; 110 111static inline struct nvgpu_pd_mem_entry * 112nvgpu_pd_mem_entry_from_tree_entry(struct nvgpu_rbtree_node *node) 113{ 114 return (struct nvgpu_pd_mem_entry *) 115 ((uintptr_t)node - 116 offsetof(struct nvgpu_pd_mem_entry, tree_entry)); 117}; 118 119/* 120 * A cache for allocating PD memory from. This enables smaller PDs to be packed 121 * into single pages. 122 * 123 * This is fairly complex so see the documentation in pd_cache.c for a full 124 * description of how this is organized. 125 */ 126struct nvgpu_pd_cache { 127 /* 128 * Array of lists of full nvgpu_pd_mem_entries and partially full (or 129 * empty) nvgpu_pd_mem_entries. 130 */ 131 struct nvgpu_list_node full[NVGPU_PD_CACHE_COUNT]; 132 struct nvgpu_list_node partial[NVGPU_PD_CACHE_COUNT]; 133 134 /* 135 * Tree of all allocated struct nvgpu_mem's for fast look up. 136 */ 137 struct nvgpu_rbtree_node *mem_tree; 138 139 /* 140 * All access to the cache much be locked. This protects the lists and 141 * the rb tree. 142 */ 143 struct nvgpu_mutex lock; 144}; 145 146/* 147 * GMMU page directory. This is the kernel's tracking of a list of PDEs or PTEs 148 * in the GMMU. 149 */ 150struct nvgpu_gmmu_pd { 151 /* 152 * DMA memory describing the PTEs or PDEs. @mem_offs describes the 153 * offset of the PDE table in @mem. @cached specifies if this PD is 154 * using pd_cache memory. 155 */ 156 struct nvgpu_mem *mem; 157 u32 mem_offs; 158 bool cached; 159 160 /* 161 * List of pointers to the next level of page tables. Does not 162 * need to be populated when this PD is pointing to PTEs. 163 */ 164 struct nvgpu_gmmu_pd *entries; 165 int num_entries; 166}; 167 168/* 169 * Reduce the number of arguments getting passed through the various levels of 170 * GMMU mapping functions. 171 * 172 * The following fields are set statically and do not change throughout the 173 * mapping call: 174 * 175 * pgsz: Index into the page size table. 176 * kind_v: Kind attributes for mapping. 177 * cacheable: Cacheability of the mapping. 178 * rw_flag: Flag from enum gk20a_mem_rw_flag 179 * sparse: Set if the mapping should be sparse. 180 * priv: Privilidged mapping. 181 * coherent: Set if the mapping should be IO coherent. 182 * valid: Set if the PTE should be marked valid. 183 * aperture: VIDMEM or SYSMEM. 184 * debug: When set print debugging info. 185 * platform_atomic: True if platform_atomic flag is valid. 186 * 187 * These fields are dynamically updated as necessary during the map: 188 * 189 * ctag: Comptag line in the comptag cache; 190 * updated every time we write a PTE. 191 */ 192struct nvgpu_gmmu_attrs { 193 u32 pgsz; 194 u32 kind_v; 195 u64 ctag; 196 bool cacheable; 197 enum gk20a_mem_rw_flag rw_flag; 198 bool sparse; 199 bool priv; 200 bool valid; 201 enum nvgpu_aperture aperture; 202 bool debug; 203 bool l3_alloc; 204 bool platform_atomic; 205}; 206 207struct gk20a_mmu_level { 208 int hi_bit[2]; 209 int lo_bit[2]; 210 211 /* 212 * Build map from virt_addr -> phys_addr. 213 */ 214 void (*update_entry)(struct vm_gk20a *vm, 215 const struct gk20a_mmu_level *l, 216 struct nvgpu_gmmu_pd *pd, 217 u32 pd_idx, 218 u64 phys_addr, 219 u64 virt_addr, 220 struct nvgpu_gmmu_attrs *attrs); 221 u32 entry_size; 222 /* 223 * Get pde page size 224 */ 225 u32 (*get_pgsz)(struct gk20a *g, const struct gk20a_mmu_level *l, 226 struct nvgpu_gmmu_pd *pd, u32 pd_idx); 227}; 228 229static inline const char *nvgpu_gmmu_perm_str(enum gk20a_mem_rw_flag p) 230{ 231 switch (p) { 232 case gk20a_mem_flag_none: 233 return "RW"; 234 case gk20a_mem_flag_write_only: 235 return "WO"; 236 case gk20a_mem_flag_read_only: 237 return "RO"; 238 default: 239 return "??"; 240 } 241} 242 243int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm); 244 245/** 246 * nvgpu_gmmu_map - Map memory into the GMMU. 247 * 248 * Kernel space. 249 */ 250u64 nvgpu_gmmu_map(struct vm_gk20a *vm, 251 struct nvgpu_mem *mem, 252 u64 size, 253 u32 flags, 254 enum gk20a_mem_rw_flag rw_flag, 255 bool priv, 256 enum nvgpu_aperture aperture); 257 258/** 259 * nvgpu_gmmu_map_fixed - Map memory into the GMMU. 260 * 261 * Kernel space. 262 */ 263u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm, 264 struct nvgpu_mem *mem, 265 u64 addr, 266 u64 size, 267 u32 flags, 268 enum gk20a_mem_rw_flag rw_flag, 269 bool priv, 270 enum nvgpu_aperture aperture); 271 272/** 273 * nvgpu_gmmu_unmap - Unmap a buffer. 274 * 275 * Kernel space. 276 */ 277void nvgpu_gmmu_unmap(struct vm_gk20a *vm, 278 struct nvgpu_mem *mem, 279 u64 gpu_va); 280 281int nvgpu_pd_alloc(struct vm_gk20a *vm, 282 struct nvgpu_gmmu_pd *pd, 283 u32 bytes); 284 285void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd); 286int nvgpu_pd_cache_alloc_direct(struct gk20a *g, 287 struct nvgpu_gmmu_pd *pd, u32 bytes); 288void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd); 289int nvgpu_pd_cache_init(struct gk20a *g); 290void nvgpu_pd_cache_fini(struct gk20a *g); 291 292/* 293 * Some useful routines that are shared across chips. 294 */ 295static inline u32 pd_offset_from_index(const struct gk20a_mmu_level *l, 296 u32 pd_idx) 297{ 298 return (pd_idx * l->entry_size) / sizeof(u32); 299} 300 301static inline void pd_write(struct gk20a *g, struct nvgpu_gmmu_pd *pd, 302 size_t w, size_t data) 303{ 304 nvgpu_mem_wr32(g, pd->mem, (pd->mem_offs / sizeof(u32)) + w, data); 305} 306 307/** 308 * __nvgpu_pte_words - Compute number of words in a PTE. 309 * 310 * @g - The GPU. 311 * 312 * This computes and returns the size of a PTE for the passed chip. 313 */ 314u32 __nvgpu_pte_words(struct gk20a *g); 315 316/** 317 * __nvgpu_get_pte - Get the contents of a PTE by virtual address 318 * 319 * @g - The GPU. 320 * @vm - VM to look in. 321 * @vaddr - GPU virtual address. 322 * @pte - [out] Set to the contents of the PTE. 323 * 324 * Find a PTE in the passed VM based on the passed GPU virtual address. This 325 * will @pte with a copy of the contents of the PTE. @pte must be an array of 326 * u32s large enough to contain the PTE. This can be computed using 327 * __nvgpu_pte_words(). 328 * 329 * If you wish to write to this PTE then you may modify @pte and then use the 330 * __nvgpu_set_pte(). 331 * 332 * This function returns 0 if the PTE is found and -EINVAL otherwise. 333 */ 334int __nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); 335 336/** 337 * __nvgpu_set_pte - Set a PTE based on virtual address 338 * 339 * @g - The GPU. 340 * @vm - VM to look in. 341 * @vaddr - GPU virtual address. 342 * @pte - The contents of the PTE to write. 343 * 344 * Find a PTE and overwrite the contents of that PTE with the passed in data 345 * located in @pte. If the PTE does not exist then no writing will happen. That 346 * is this function will not fill out the page tables for you. The expectation 347 * is that the passed @vaddr has already been mapped and this is just modifying 348 * the mapping (for instance changing invalid to valid). 349 * 350 * @pte must contain at least the required words for the PTE. See 351 * __nvgpu_pte_words(). 352 * 353 * This function returns 0 on success and -EINVAL otherwise. 354 */ 355int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); 356 357 358/* 359 * Internal debugging routines. Probably not something you want to use. 360 */ 361#define pte_dbg(g, attrs, fmt, args...) \ 362 do { \ 363 if ((attrs != NULL) && (attrs->debug)) \ 364 nvgpu_info(g, fmt, ##args); \ 365 else \ 366 nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \ 367 } while (0) 368 369#endif /* NVGPU_GMMU_H */
diff --git a/include/nvgpu/hal_init.h b/include/nvgpu/hal_init.h
deleted file mode 100644
index 06e58e7..0000000
--- a/include/nvgpu/hal_init.h
+++ /dev/null
@@ -1,33 +0,0 @@ 1/* 2 * NVIDIA GPU Hardware Abstraction Layer functions definitions. 3 * 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#ifndef NVGPU_HAL_INIT_H 26#define NVGPU_HAL_INIT_H 27 28struct gk20a; 29 30int nvgpu_init_hal(struct gk20a *g); 31int nvgpu_detect_chip(struct gk20a *g); 32 33#endif /* NVGPU_HAL_INIT_H */
diff --git a/include/nvgpu/hashtable.h b/include/nvgpu/hashtable.h
deleted file mode 100644
index 5ce56f0..0000000
--- a/include/nvgpu/hashtable.h
+++ /dev/null
@@ -1,29 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22#ifndef __NVGPU_SORT_H__ 23#define __NVGPU_SORT_H__ 24 25#ifdef __KERNEL__ 26#include <linux/hashtable.h> 27#endif 28 29#endif
diff --git a/include/nvgpu/hw/gk20a/hw_bus_gk20a.h b/include/nvgpu/hw/gk20a/hw_bus_gk20a.h
deleted file mode 100644
index d3bb9e9..0000000
--- a/include/nvgpu/hw/gk20a/hw_bus_gk20a.h
+++ /dev/null
@@ -1,171 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_bus_gk20a_h_ 57#define _hw_bus_gk20a_h_ 58 59static inline u32 bus_bar0_window_r(void) 60{ 61 return 0x00001700U; 62} 63static inline u32 bus_bar0_window_base_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 bus_bar0_window_target_vid_mem_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) 72{ 73 return 0x2000000U; 74} 75static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) 76{ 77 return 0x3000000U; 78} 79static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) 80{ 81 return 0x00000010U; 82} 83static inline u32 bus_bar1_block_r(void) 84{ 85 return 0x00001704U; 86} 87static inline u32 bus_bar1_block_ptr_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 0U; 90} 91static inline u32 bus_bar1_block_target_vid_mem_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) 96{ 97 return 0x20000000U; 98} 99static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) 100{ 101 return 0x30000000U; 102} 103static inline u32 bus_bar1_block_mode_virtual_f(void) 104{ 105 return 0x80000000U; 106} 107static inline u32 bus_bar2_block_r(void) 108{ 109 return 0x00001714U; 110} 111static inline u32 bus_bar2_block_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 bus_bar2_block_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 bus_bar2_block_mode_virtual_f(void) 128{ 129 return 0x80000000U; 130} 131static inline u32 bus_bar1_block_ptr_shift_v(void) 132{ 133 return 0x0000000cU; 134} 135static inline u32 bus_bar2_block_ptr_shift_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 bus_intr_0_r(void) 140{ 141 return 0x00001100U; 142} 143static inline u32 bus_intr_0_pri_squash_m(void) 144{ 145 return 0x1U << 1U; 146} 147static inline u32 bus_intr_0_pri_fecserr_m(void) 148{ 149 return 0x1U << 2U; 150} 151static inline u32 bus_intr_0_pri_timeout_m(void) 152{ 153 return 0x1U << 3U; 154} 155static inline u32 bus_intr_en_0_r(void) 156{ 157 return 0x00001140U; 158} 159static inline u32 bus_intr_en_0_pri_squash_m(void) 160{ 161 return 0x1U << 1U; 162} 163static inline u32 bus_intr_en_0_pri_fecserr_m(void) 164{ 165 return 0x1U << 2U; 166} 167static inline u32 bus_intr_en_0_pri_timeout_m(void) 168{ 169 return 0x1U << 3U; 170} 171#endif
diff --git a/include/nvgpu/hw/gk20a/hw_ccsr_gk20a.h b/include/nvgpu/hw/gk20a/hw_ccsr_gk20a.h
deleted file mode 100644
index 95151f6..0000000
--- a/include/nvgpu/hw/gk20a/hw_ccsr_gk20a.h
+++ /dev/null
@@ -1,163 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ccsr_gk20a_h_ 57#define _hw_ccsr_gk20a_h_ 58 59static inline u32 ccsr_channel_inst_r(u32 i) 60{ 61 return 0x00800000U + i*8U; 62} 63static inline u32 ccsr_channel_inst__size_1_v(void) 64{ 65 return 0x00000080U; 66} 67static inline u32 ccsr_channel_inst_ptr_f(u32 v) 68{ 69 return (v & 0xfffffffU) << 0U; 70} 71static inline u32 ccsr_channel_inst_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) 76{ 77 return 0x20000000U; 78} 79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) 80{ 81 return 0x30000000U; 82} 83static inline u32 ccsr_channel_inst_bind_false_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 ccsr_channel_inst_bind_true_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 ccsr_channel_r(u32 i) 92{ 93 return 0x00800004U + i*8U; 94} 95static inline u32 ccsr_channel__size_1_v(void) 96{ 97 return 0x00000080U; 98} 99static inline u32 ccsr_channel_enable_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 ccsr_channel_enable_set_f(u32 v) 104{ 105 return (v & 0x1U) << 10U; 106} 107static inline u32 ccsr_channel_enable_set_true_f(void) 108{ 109 return 0x400U; 110} 111static inline u32 ccsr_channel_enable_clr_true_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ccsr_channel_runlist_f(u32 v) 116{ 117 return (v & 0xfU) << 16U; 118} 119static inline u32 ccsr_channel_status_v(u32 r) 120{ 121 return (r >> 24U) & 0xfU; 122} 123static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) 128{ 129 return 0x00000004U; 130} 131static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) 132{ 133 return 0x0000000aU; 134} 135static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) 136{ 137 return 0x0000000bU; 138} 139static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) 140{ 141 return 0x0000000cU; 142} 143static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) 144{ 145 return 0x0000000dU; 146} 147static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) 148{ 149 return 0x0000000eU; 150} 151static inline u32 ccsr_channel_next_v(u32 r) 152{ 153 return (r >> 1U) & 0x1U; 154} 155static inline u32 ccsr_channel_next_true_v(void) 156{ 157 return 0x00000001U; 158} 159static inline u32 ccsr_channel_busy_v(u32 r) 160{ 161 return (r >> 28U) & 0x1U; 162} 163#endif
diff --git a/include/nvgpu/hw/gk20a/hw_ce2_gk20a.h b/include/nvgpu/hw/gk20a/hw_ce2_gk20a.h
deleted file mode 100644
index 87481cd..0000000
--- a/include/nvgpu/hw/gk20a/hw_ce2_gk20a.h
+++ /dev/null
@@ -1,87 +0,0 @@ 1/* 2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ce2_gk20a_h_ 57#define _hw_ce2_gk20a_h_ 58 59static inline u32 ce2_intr_status_r(void) 60{ 61 return 0x00106908U; 62} 63static inline u32 ce2_intr_status_blockpipe_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 ce2_intr_status_blockpipe_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 ce2_intr_status_nonblockpipe_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 ce2_intr_status_nonblockpipe_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 ce2_intr_status_launcherr_pending_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 ce2_intr_status_launcherr_reset_f(void) 84{ 85 return 0x4U; 86} 87#endif
diff --git a/include/nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h b/include/nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h
deleted file mode 100644
index 131fd12..0000000
--- a/include/nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h
+++ /dev/null
@@ -1,447 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ctxsw_prog_gk20a_h_ 57#define _hw_ctxsw_prog_gk20a_h_ 58 59static inline u32 ctxsw_prog_fecs_header_v(void) 60{ 61 return 0x00000100U; 62} 63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ctxsw_prog_main_image_patch_count_o(void) 68{ 69 return 0x00000010U; 70} 71static inline u32 ctxsw_prog_main_image_context_id_o(void) 72{ 73 return 0x000000f0U; 74} 75static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) 76{ 77 return 0x00000014U; 78} 79static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) 80{ 81 return 0x00000018U; 82} 83static inline u32 ctxsw_prog_main_image_zcull_o(void) 84{ 85 return 0x0000001cU; 86} 87static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) 88{ 89 return 0x00000001U; 90} 91static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) 92{ 93 return 0x00000002U; 94} 95static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) 96{ 97 return 0x00000020U; 98} 99static inline u32 ctxsw_prog_main_image_pm_o(void) 100{ 101 return 0x00000028U; 102} 103static inline u32 ctxsw_prog_main_image_pm_mode_m(void) 104{ 105 return 0x7U << 0U; 106} 107static inline u32 ctxsw_prog_main_image_pm_mode_ctxsw_f(void) 108{ 109 return 0x1U; 110} 111static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) 112{ 113 return 0x0U; 114} 115static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) 116{ 117 return 0x7U << 3U; 118} 119static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) 120{ 121 return 0x8U; 122} 123static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) 128{ 129 return 0x0000002cU; 130} 131static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) 132{ 133 return 0x000000f4U; 134} 135static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) 136{ 137 return 0x000000f8U; 138} 139static inline u32 ctxsw_prog_main_image_magic_value_o(void) 140{ 141 return 0x000000fcU; 142} 143static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) 144{ 145 return 0x600dc0deU; 146} 147static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) 148{ 149 return 0x0000000cU; 150} 151static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) 152{ 153 return (r >> 0U) & 0xffffU; 154} 155static inline u32 ctxsw_prog_local_image_ppc_info_o(void) 156{ 157 return 0x000000f4U; 158} 159static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) 160{ 161 return (r >> 0U) & 0xffffU; 162} 163static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) 164{ 165 return (r >> 16U) & 0xffffU; 166} 167static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) 168{ 169 return 0x000000f8U; 170} 171static inline u32 ctxsw_prog_local_magic_value_o(void) 172{ 173 return 0x000000fcU; 174} 175static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) 176{ 177 return 0xad0becabU; 178} 179static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) 180{ 181 return 0x000000ecU; 182} 183static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) 184{ 185 return (r >> 0U) & 0xffffU; 186} 187static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) 188{ 189 return (r >> 16U) & 0xffU; 190} 191static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) 192{ 193 return 0x00000100U; 194} 195static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) 196{ 197 return 0x00000004U; 198} 199static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) 200{ 201 return 0x00000005U; 202} 203static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) 204{ 205 return 0x00000004U; 206} 207static inline u32 ctxsw_prog_extended_num_smpc_quadrants_v(void) 208{ 209 return 0x00000004U; 210} 211static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) 212{ 213 return 0x000000a0U; 214} 215static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) 216{ 217 return 2U; 218} 219static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) 220{ 221 return (v & 0x3U) << 0U; 222} 223static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) 224{ 225 return 0x3U << 0U; 226} 227static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) 228{ 229 return (r >> 0U) & 0x3U; 230} 231static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) 232{ 233 return 0x0U; 234} 235static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) 236{ 237 return 0x2U; 238} 239static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) 240{ 241 return 0x000000a4U; 242} 243static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) 244{ 245 return 0x000000a8U; 246} 247static inline u32 ctxsw_prog_main_image_misc_options_o(void) 248{ 249 return 0x0000003cU; 250} 251static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) 252{ 253 return 0x1U << 3U; 254} 255static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) 256{ 257 return 0x0U; 258} 259static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_control_o(void) 260{ 261 return 0x000000acU; 262} 263static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f(u32 v) 264{ 265 return (v & 0xffffU) << 0U; 266} 267static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(void) 268{ 269 return 0x000000b0U; 270} 271static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_v_m(void) 272{ 273 return 0xfffffffU << 0U; 274} 275static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_m(void) 276{ 277 return 0x3U << 28U; 278} 279static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f(void) 280{ 281 return 0x0U; 282} 283static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_coherent_f(void) 284{ 285 return 0x20000000U; 286} 287static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(void) 288{ 289 return 0x30000000U; 290} 291static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(void) 292{ 293 return 0x000000b4U; 294} 295static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(u32 v) 296{ 297 return (v & 0xffffffffU) << 0U; 298} 299static inline u32 ctxsw_prog_record_timestamp_record_size_in_bytes_v(void) 300{ 301 return 0x00000080U; 302} 303static inline u32 ctxsw_prog_record_timestamp_record_size_in_words_v(void) 304{ 305 return 0x00000020U; 306} 307static inline u32 ctxsw_prog_record_timestamp_magic_value_lo_o(void) 308{ 309 return 0x00000000U; 310} 311static inline u32 ctxsw_prog_record_timestamp_magic_value_lo_v_value_v(void) 312{ 313 return 0x00000000U; 314} 315static inline u32 ctxsw_prog_record_timestamp_magic_value_hi_o(void) 316{ 317 return 0x00000004U; 318} 319static inline u32 ctxsw_prog_record_timestamp_magic_value_hi_v_value_v(void) 320{ 321 return 0x600dbeefU; 322} 323static inline u32 ctxsw_prog_record_timestamp_context_id_o(void) 324{ 325 return 0x00000008U; 326} 327static inline u32 ctxsw_prog_record_timestamp_context_ptr_o(void) 328{ 329 return 0x0000000cU; 330} 331static inline u32 ctxsw_prog_record_timestamp_new_context_id_o(void) 332{ 333 return 0x00000010U; 334} 335static inline u32 ctxsw_prog_record_timestamp_new_context_ptr_o(void) 336{ 337 return 0x00000014U; 338} 339static inline u32 ctxsw_prog_record_timestamp_timestamp_lo_o(void) 340{ 341 return 0x00000018U; 342} 343static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_o(void) 344{ 345 return 0x0000001cU; 346} 347static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_v_f(u32 v) 348{ 349 return (v & 0xffffffU) << 0U; 350} 351static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_v_v(u32 r) 352{ 353 return (r >> 0U) & 0xffffffU; 354} 355static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_f(u32 v) 356{ 357 return (v & 0xffU) << 24U; 358} 359static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_m(void) 360{ 361 return 0xffU << 24U; 362} 363static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_v(u32 r) 364{ 365 return (r >> 24U) & 0xffU; 366} 367static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_ctxsw_req_by_host_v(void) 368{ 369 return 0x00000001U; 370} 371static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_ctxsw_req_by_host_f(void) 372{ 373 return 0x1000000U; 374} 375static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_v(void) 376{ 377 return 0x00000002U; 378} 379static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_f(void) 380{ 381 return 0x2000000U; 382} 383static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_wfi_v(void) 384{ 385 return 0x0000000aU; 386} 387static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_wfi_f(void) 388{ 389 return 0xa000000U; 390} 391static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_gfxp_v(void) 392{ 393 return 0x0000000bU; 394} 395static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_gfxp_f(void) 396{ 397 return 0xb000000U; 398} 399static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_ctap_v(void) 400{ 401 return 0x0000000cU; 402} 403static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_ctap_f(void) 404{ 405 return 0xc000000U; 406} 407static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_cilp_v(void) 408{ 409 return 0x0000000dU; 410} 411static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_cilp_f(void) 412{ 413 return 0xd000000U; 414} 415static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_save_end_v(void) 416{ 417 return 0x00000003U; 418} 419static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_save_end_f(void) 420{ 421 return 0x3000000U; 422} 423static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_restore_start_v(void) 424{ 425 return 0x00000004U; 426} 427static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_restore_start_f(void) 428{ 429 return 0x4000000U; 430} 431static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_context_start_v(void) 432{ 433 return 0x00000005U; 434} 435static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_context_start_f(void) 436{ 437 return 0x5000000U; 438} 439static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_v(void) 440{ 441 return 0x000000ffU; 442} 443static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_f(void) 444{ 445 return 0xff000000U; 446} 447#endif
diff --git a/include/nvgpu/hw/gk20a/hw_falcon_gk20a.h b/include/nvgpu/hw/gk20a/hw_falcon_gk20a.h
deleted file mode 100644
index 7b4d87b..0000000
--- a/include/nvgpu/hw/gk20a/hw_falcon_gk20a.h
+++ /dev/null
@@ -1,559 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_falcon_gk20a_h_ 57#define _hw_falcon_gk20a_h_ 58 59static inline u32 falcon_falcon_irqsset_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 falcon_falcon_irqsclr_r(void) 68{ 69 return 0x00000004U; 70} 71static inline u32 falcon_falcon_irqstat_r(void) 72{ 73 return 0x00000008U; 74} 75static inline u32 falcon_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 falcon_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 falcon_falcon_irqmode_r(void) 88{ 89 return 0x0000000cU; 90} 91static inline u32 falcon_falcon_irqmset_r(void) 92{ 93 return 0x00000010U; 94} 95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 falcon_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 falcon_falcon_irqmclr_r(void) 128{ 129 return 0x00000014U; 130} 131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 falcon_falcon_irqmask_r(void) 168{ 169 return 0x00000018U; 170} 171static inline u32 falcon_falcon_irqdest_r(void) 172{ 173 return 0x0000001cU; 174} 175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 falcon_falcon_curctx_r(void) 248{ 249 return 0x00000050U; 250} 251static inline u32 falcon_falcon_nxtctx_r(void) 252{ 253 return 0x00000054U; 254} 255static inline u32 falcon_falcon_mailbox0_r(void) 256{ 257 return 0x00000040U; 258} 259static inline u32 falcon_falcon_mailbox1_r(void) 260{ 261 return 0x00000044U; 262} 263static inline u32 falcon_falcon_itfen_r(void) 264{ 265 return 0x00000048U; 266} 267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 falcon_falcon_idlestate_r(void) 272{ 273 return 0x0000004cU; 274} 275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 falcon_falcon_os_r(void) 284{ 285 return 0x00000080U; 286} 287static inline u32 falcon_falcon_engctl_r(void) 288{ 289 return 0x000000a4U; 290} 291static inline u32 falcon_falcon_cpuctl_r(void) 292{ 293 return 0x00000100U; 294} 295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) 300{ 301 return (v & 0x1U) << 2U; 302} 303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) 304{ 305 return (v & 0x1U) << 3U; 306} 307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) 308{ 309 return (v & 0x1U) << 4U; 310} 311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) 312{ 313 return 0x1U << 4U; 314} 315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) 316{ 317 return (r >> 4U) & 0x1U; 318} 319static inline u32 falcon_falcon_cpuctl_stopped_m(void) 320{ 321 return 0x1U << 5U; 322} 323static inline u32 falcon_falcon_imemc_r(u32 i) 324{ 325 return 0x00000180U + i*16U; 326} 327static inline u32 falcon_falcon_imemc_offs_f(u32 v) 328{ 329 return (v & 0x3fU) << 2U; 330} 331static inline u32 falcon_falcon_imemc_blk_f(u32 v) 332{ 333 return (v & 0xffU) << 8U; 334} 335static inline u32 falcon_falcon_imemc_aincw_f(u32 v) 336{ 337 return (v & 0x1U) << 24U; 338} 339static inline u32 falcon_falcon_imemc_secure_f(u32 v) 340{ 341 return (v & 0x1U) << 28U; 342} 343static inline u32 falcon_falcon_imemd_r(u32 i) 344{ 345 return 0x00000184U + i*16U; 346} 347static inline u32 falcon_falcon_imemt_r(u32 i) 348{ 349 return 0x00000188U + i*16U; 350} 351static inline u32 falcon_falcon_bootvec_r(void) 352{ 353 return 0x00000104U; 354} 355static inline u32 falcon_falcon_bootvec_vec_f(u32 v) 356{ 357 return (v & 0xffffffffU) << 0U; 358} 359static inline u32 falcon_falcon_dmactl_r(void) 360{ 361 return 0x0000010cU; 362} 363static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) 364{ 365 return 0x1U << 1U; 366} 367static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) 368{ 369 return 0x1U << 2U; 370} 371static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) 372{ 373 return (v & 0x1U) << 0U; 374} 375static inline u32 falcon_falcon_hwcfg_r(void) 376{ 377 return 0x00000108U; 378} 379static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) 380{ 381 return (r >> 0U) & 0x1ffU; 382} 383static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) 384{ 385 return (r >> 9U) & 0x1ffU; 386} 387static inline u32 falcon_falcon_dmatrfbase_r(void) 388{ 389 return 0x00000110U; 390} 391static inline u32 falcon_falcon_dmatrfmoffs_r(void) 392{ 393 return 0x00000114U; 394} 395static inline u32 falcon_falcon_dmatrfcmd_r(void) 396{ 397 return 0x00000118U; 398} 399static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) 400{ 401 return (v & 0x1U) << 4U; 402} 403static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) 404{ 405 return (v & 0x1U) << 5U; 406} 407static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) 408{ 409 return (v & 0x7U) << 8U; 410} 411static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) 412{ 413 return (v & 0x7U) << 12U; 414} 415static inline u32 falcon_falcon_dmatrffboffs_r(void) 416{ 417 return 0x0000011cU; 418} 419static inline u32 falcon_falcon_imstat_r(void) 420{ 421 return 0x00000144U; 422} 423static inline u32 falcon_falcon_traceidx_r(void) 424{ 425 return 0x00000148U; 426} 427static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) 428{ 429 return (r >> 16U) & 0xffU; 430} 431static inline u32 falcon_falcon_traceidx_idx_v(u32 r) 432{ 433 return (r >> 0U) & 0xffU; 434} 435static inline u32 falcon_falcon_tracepc_r(void) 436{ 437 return 0x0000014cU; 438} 439static inline u32 falcon_falcon_tracepc_pc_v(u32 r) 440{ 441 return (r >> 0U) & 0xffffffU; 442} 443static inline u32 falcon_falcon_exterraddr_r(void) 444{ 445 return 0x00000168U; 446} 447static inline u32 falcon_falcon_exterrstat_r(void) 448{ 449 return 0x0000016cU; 450} 451static inline u32 falcon_falcon_exterrstat_valid_m(void) 452{ 453 return 0x1U << 31U; 454} 455static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) 456{ 457 return (r >> 31U) & 0x1U; 458} 459static inline u32 falcon_falcon_exterrstat_valid_true_v(void) 460{ 461 return 0x00000001U; 462} 463static inline u32 falcon_falcon_icd_cmd_r(void) 464{ 465 return 0x00000200U; 466} 467static inline u32 falcon_falcon_icd_cmd_opc_s(void) 468{ 469 return 4U; 470} 471static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) 472{ 473 return (v & 0xfU) << 0U; 474} 475static inline u32 falcon_falcon_icd_cmd_opc_m(void) 476{ 477 return 0xfU << 0U; 478} 479static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) 480{ 481 return (r >> 0U) & 0xfU; 482} 483static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) 484{ 485 return 0x8U; 486} 487static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) 488{ 489 return 0xeU; 490} 491static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) 492{ 493 return (v & 0x1fU) << 8U; 494} 495static inline u32 falcon_falcon_icd_rdata_r(void) 496{ 497 return 0x0000020cU; 498} 499static inline u32 falcon_falcon_dmemc_r(u32 i) 500{ 501 return 0x000001c0U + i*8U; 502} 503static inline u32 falcon_falcon_dmemc_offs_f(u32 v) 504{ 505 return (v & 0x3fU) << 2U; 506} 507static inline u32 falcon_falcon_dmemc_offs_m(void) 508{ 509 return 0x3fU << 2U; 510} 511static inline u32 falcon_falcon_dmemc_blk_f(u32 v) 512{ 513 return (v & 0xffU) << 8U; 514} 515static inline u32 falcon_falcon_dmemc_blk_m(void) 516{ 517 return 0xffU << 8U; 518} 519static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) 520{ 521 return (v & 0x1U) << 24U; 522} 523static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) 524{ 525 return (v & 0x1U) << 25U; 526} 527static inline u32 falcon_falcon_dmemd_r(u32 i) 528{ 529 return 0x000001c4U + i*8U; 530} 531static inline u32 falcon_falcon_debug1_r(void) 532{ 533 return 0x00000090U; 534} 535static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) 536{ 537 return 1U; 538} 539static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) 540{ 541 return (v & 0x1U) << 16U; 542} 543static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) 544{ 545 return 0x1U << 16U; 546} 547static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) 548{ 549 return (r >> 16U) & 0x1U; 550} 551static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) 552{ 553 return 0x0U; 554} 555static inline u32 falcon_falcon_debuginfo_r(void) 556{ 557 return 0x00000094U; 558} 559#endif
diff --git a/include/nvgpu/hw/gk20a/hw_fb_gk20a.h b/include/nvgpu/hw/gk20a/hw_fb_gk20a.h
deleted file mode 100644
index 42df4f5..0000000
--- a/include/nvgpu/hw/gk20a/hw_fb_gk20a.h
+++ /dev/null
@@ -1,263 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fb_gk20a_h_ 57#define _hw_fb_gk20a_h_ 58 59static inline u32 fb_mmu_ctrl_r(void) 60{ 61 return 0x00100c80U; 62} 63static inline u32 fb_mmu_ctrl_vm_pg_size_f(u32 v) 64{ 65 return (v & 0x1U) << 0U; 66} 67static inline u32 fb_mmu_ctrl_vm_pg_size_128kb_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 fb_mmu_ctrl_vm_pg_size_64kb_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) 76{ 77 return (r >> 15U) & 0x1U; 78} 79static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) 80{ 81 return 0x0U; 82} 83static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) 84{ 85 return (r >> 16U) & 0xffU; 86} 87static inline u32 fb_mmu_invalidate_pdb_r(void) 88{ 89 return 0x00100cb8U; 90} 91static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) 96{ 97 return 0x2U; 98} 99static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) 100{ 101 return (v & 0xfffffffU) << 4U; 102} 103static inline u32 fb_mmu_invalidate_r(void) 104{ 105 return 0x00100cbcU; 106} 107static inline u32 fb_mmu_invalidate_all_va_true_f(void) 108{ 109 return 0x1U; 110} 111static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) 112{ 113 return 0x2U; 114} 115static inline u32 fb_mmu_invalidate_trigger_s(void) 116{ 117 return 1U; 118} 119static inline u32 fb_mmu_invalidate_trigger_f(u32 v) 120{ 121 return (v & 0x1U) << 31U; 122} 123static inline u32 fb_mmu_invalidate_trigger_m(void) 124{ 125 return 0x1U << 31U; 126} 127static inline u32 fb_mmu_invalidate_trigger_v(u32 r) 128{ 129 return (r >> 31U) & 0x1U; 130} 131static inline u32 fb_mmu_invalidate_trigger_true_f(void) 132{ 133 return 0x80000000U; 134} 135static inline u32 fb_mmu_debug_wr_r(void) 136{ 137 return 0x00100cc8U; 138} 139static inline u32 fb_mmu_debug_wr_aperture_s(void) 140{ 141 return 2U; 142} 143static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) 144{ 145 return (v & 0x3U) << 0U; 146} 147static inline u32 fb_mmu_debug_wr_aperture_m(void) 148{ 149 return 0x3U << 0U; 150} 151static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) 152{ 153 return (r >> 0U) & 0x3U; 154} 155static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) 164{ 165 return 0x3U; 166} 167static inline u32 fb_mmu_debug_wr_vol_false_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 fb_mmu_debug_wr_vol_true_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 fb_mmu_debug_wr_vol_true_f(void) 176{ 177 return 0x4U; 178} 179static inline u32 fb_mmu_debug_wr_addr_f(u32 v) 180{ 181 return (v & 0xfffffffU) << 4U; 182} 183static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) 184{ 185 return 0x0000000cU; 186} 187static inline u32 fb_mmu_debug_rd_r(void) 188{ 189 return 0x00100cccU; 190} 191static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) 192{ 193 return 0x0U; 194} 195static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) 196{ 197 return 0x2U; 198} 199static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) 200{ 201 return 0x3U; 202} 203static inline u32 fb_mmu_debug_rd_vol_false_f(void) 204{ 205 return 0x0U; 206} 207static inline u32 fb_mmu_debug_rd_addr_f(u32 v) 208{ 209 return (v & 0xfffffffU) << 4U; 210} 211static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) 212{ 213 return 0x0000000cU; 214} 215static inline u32 fb_mmu_debug_ctrl_r(void) 216{ 217 return 0x00100cc4U; 218} 219static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) 220{ 221 return (r >> 16U) & 0x1U; 222} 223static inline u32 fb_mmu_debug_ctrl_debug_m(void) 224{ 225 return 0x1U << 16U; 226} 227static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) 228{ 229 return 0x00000001U; 230} 231static inline u32 fb_mmu_debug_ctrl_debug_enabled_f(void) 232{ 233 return 0x10000U; 234} 235static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) 236{ 237 return 0x00000000U; 238} 239static inline u32 fb_mmu_debug_ctrl_debug_disabled_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 fb_mmu_vpr_info_r(void) 244{ 245 return 0x00100cd0U; 246} 247static inline u32 fb_mmu_vpr_info_fetch_v(u32 r) 248{ 249 return (r >> 2U) & 0x1U; 250} 251static inline u32 fb_mmu_vpr_info_fetch_false_v(void) 252{ 253 return 0x00000000U; 254} 255static inline u32 fb_mmu_vpr_info_fetch_true_v(void) 256{ 257 return 0x00000001U; 258} 259static inline u32 fb_niso_flush_sysmem_addr_r(void) 260{ 261 return 0x00100c10U; 262} 263#endif
diff --git a/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h b/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h
deleted file mode 100644
index e61e386..0000000
--- a/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h
+++ /dev/null
@@ -1,619 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fifo_gk20a_h_ 57#define _hw_fifo_gk20a_h_ 58 59static inline u32 fifo_bar1_base_r(void) 60{ 61 return 0x00002254U; 62} 63static inline u32 fifo_bar1_base_ptr_f(u32 v) 64{ 65 return (v & 0xfffffffU) << 0U; 66} 67static inline u32 fifo_bar1_base_ptr_align_shift_v(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 fifo_bar1_base_valid_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fifo_bar1_base_valid_true_f(void) 76{ 77 return 0x10000000U; 78} 79static inline u32 fifo_runlist_base_r(void) 80{ 81 return 0x00002270U; 82} 83static inline u32 fifo_runlist_base_ptr_f(u32 v) 84{ 85 return (v & 0xfffffffU) << 0U; 86} 87static inline u32 fifo_runlist_base_target_vid_mem_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) 92{ 93 return 0x20000000U; 94} 95static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) 96{ 97 return 0x30000000U; 98} 99static inline u32 fifo_runlist_r(void) 100{ 101 return 0x00002274U; 102} 103static inline u32 fifo_runlist_engine_f(u32 v) 104{ 105 return (v & 0xfU) << 20U; 106} 107static inline u32 fifo_eng_runlist_base_r(u32 i) 108{ 109 return 0x00002280U + i*8U; 110} 111static inline u32 fifo_eng_runlist_base__size_1_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 fifo_eng_runlist_r(u32 i) 116{ 117 return 0x00002284U + i*8U; 118} 119static inline u32 fifo_eng_runlist__size_1_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 fifo_eng_runlist_length_f(u32 v) 124{ 125 return (v & 0xffffU) << 0U; 126} 127static inline u32 fifo_eng_runlist_length_max_v(void) 128{ 129 return 0x0000ffffU; 130} 131static inline u32 fifo_eng_runlist_pending_true_f(void) 132{ 133 return 0x100000U; 134} 135static inline u32 fifo_runlist_timeslice_r(u32 i) 136{ 137 return 0x00002310U + i*4U; 138} 139static inline u32 fifo_runlist_timeslice_timeout_128_f(void) 140{ 141 return 0x80U; 142} 143static inline u32 fifo_runlist_timeslice_timescale_3_f(void) 144{ 145 return 0x3000U; 146} 147static inline u32 fifo_runlist_timeslice_enable_true_f(void) 148{ 149 return 0x10000000U; 150} 151static inline u32 fifo_eng_timeout_r(void) 152{ 153 return 0x00002a0cU; 154} 155static inline u32 fifo_eng_timeout_period_max_f(void) 156{ 157 return 0x7fffffffU; 158} 159static inline u32 fifo_eng_timeout_detection_enabled_f(void) 160{ 161 return 0x80000000U; 162} 163static inline u32 fifo_eng_timeout_detection_disabled_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 fifo_pb_timeslice_r(u32 i) 168{ 169 return 0x00002350U + i*4U; 170} 171static inline u32 fifo_pb_timeslice_timeout_16_f(void) 172{ 173 return 0x10U; 174} 175static inline u32 fifo_pb_timeslice_timescale_0_f(void) 176{ 177 return 0x0U; 178} 179static inline u32 fifo_pb_timeslice_enable_true_f(void) 180{ 181 return 0x10000000U; 182} 183static inline u32 fifo_pbdma_map_r(u32 i) 184{ 185 return 0x00002390U + i*4U; 186} 187static inline u32 fifo_intr_0_r(void) 188{ 189 return 0x00002100U; 190} 191static inline u32 fifo_intr_0_bind_error_pending_f(void) 192{ 193 return 0x1U; 194} 195static inline u32 fifo_intr_0_bind_error_reset_f(void) 196{ 197 return 0x1U; 198} 199static inline u32 fifo_intr_0_pio_error_pending_f(void) 200{ 201 return 0x10U; 202} 203static inline u32 fifo_intr_0_pio_error_reset_f(void) 204{ 205 return 0x10U; 206} 207static inline u32 fifo_intr_0_sched_error_pending_f(void) 208{ 209 return 0x100U; 210} 211static inline u32 fifo_intr_0_sched_error_reset_f(void) 212{ 213 return 0x100U; 214} 215static inline u32 fifo_intr_0_chsw_error_pending_f(void) 216{ 217 return 0x10000U; 218} 219static inline u32 fifo_intr_0_chsw_error_reset_f(void) 220{ 221 return 0x10000U; 222} 223static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void) 224{ 225 return 0x800000U; 226} 227static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void) 228{ 229 return 0x800000U; 230} 231static inline u32 fifo_intr_0_lb_error_pending_f(void) 232{ 233 return 0x1000000U; 234} 235static inline u32 fifo_intr_0_lb_error_reset_f(void) 236{ 237 return 0x1000000U; 238} 239static inline u32 fifo_intr_0_dropped_mmu_fault_pending_f(void) 240{ 241 return 0x8000000U; 242} 243static inline u32 fifo_intr_0_dropped_mmu_fault_reset_f(void) 244{ 245 return 0x8000000U; 246} 247static inline u32 fifo_intr_0_mmu_fault_pending_f(void) 248{ 249 return 0x10000000U; 250} 251static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) 252{ 253 return 0x20000000U; 254} 255static inline u32 fifo_intr_0_runlist_event_pending_f(void) 256{ 257 return 0x40000000U; 258} 259static inline u32 fifo_intr_0_channel_intr_pending_f(void) 260{ 261 return 0x80000000U; 262} 263static inline u32 fifo_intr_en_0_r(void) 264{ 265 return 0x00002140U; 266} 267static inline u32 fifo_intr_en_0_sched_error_f(u32 v) 268{ 269 return (v & 0x1U) << 8U; 270} 271static inline u32 fifo_intr_en_0_sched_error_m(void) 272{ 273 return 0x1U << 8U; 274} 275static inline u32 fifo_intr_en_0_mmu_fault_f(u32 v) 276{ 277 return (v & 0x1U) << 28U; 278} 279static inline u32 fifo_intr_en_0_mmu_fault_m(void) 280{ 281 return 0x1U << 28U; 282} 283static inline u32 fifo_intr_en_1_r(void) 284{ 285 return 0x00002528U; 286} 287static inline u32 fifo_intr_bind_error_r(void) 288{ 289 return 0x0000252cU; 290} 291static inline u32 fifo_intr_sched_error_r(void) 292{ 293 return 0x0000254cU; 294} 295static inline u32 fifo_intr_sched_error_code_f(u32 v) 296{ 297 return (v & 0xffU) << 0U; 298} 299static inline u32 fifo_intr_sched_error_code_ctxsw_timeout_v(void) 300{ 301 return 0x0000000aU; 302} 303static inline u32 fifo_intr_chsw_error_r(void) 304{ 305 return 0x0000256cU; 306} 307static inline u32 fifo_intr_mmu_fault_id_r(void) 308{ 309 return 0x0000259cU; 310} 311static inline u32 fifo_intr_mmu_fault_eng_id_graphics_v(void) 312{ 313 return 0x00000000U; 314} 315static inline u32 fifo_intr_mmu_fault_eng_id_graphics_f(void) 316{ 317 return 0x0U; 318} 319static inline u32 fifo_intr_mmu_fault_inst_r(u32 i) 320{ 321 return 0x00002800U + i*16U; 322} 323static inline u32 fifo_intr_mmu_fault_inst_ptr_v(u32 r) 324{ 325 return (r >> 0U) & 0xfffffffU; 326} 327static inline u32 fifo_intr_mmu_fault_inst_ptr_align_shift_v(void) 328{ 329 return 0x0000000cU; 330} 331static inline u32 fifo_intr_mmu_fault_lo_r(u32 i) 332{ 333 return 0x00002804U + i*16U; 334} 335static inline u32 fifo_intr_mmu_fault_hi_r(u32 i) 336{ 337 return 0x00002808U + i*16U; 338} 339static inline u32 fifo_intr_mmu_fault_info_r(u32 i) 340{ 341 return 0x0000280cU + i*16U; 342} 343static inline u32 fifo_intr_mmu_fault_info_type_v(u32 r) 344{ 345 return (r >> 0U) & 0xfU; 346} 347static inline u32 fifo_intr_mmu_fault_info_write_v(u32 r) 348{ 349 return (r >> 7U) & 0x1U; 350} 351static inline u32 fifo_intr_mmu_fault_info_engine_subid_v(u32 r) 352{ 353 return (r >> 6U) & 0x1U; 354} 355static inline u32 fifo_intr_mmu_fault_info_engine_subid_gpc_v(void) 356{ 357 return 0x00000000U; 358} 359static inline u32 fifo_intr_mmu_fault_info_engine_subid_hub_v(void) 360{ 361 return 0x00000001U; 362} 363static inline u32 fifo_intr_mmu_fault_info_client_v(u32 r) 364{ 365 return (r >> 8U) & 0x1fU; 366} 367static inline u32 fifo_intr_pbdma_id_r(void) 368{ 369 return 0x000025a0U; 370} 371static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) 372{ 373 return (v & 0x1U) << (0U + i*1U); 374} 375static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) 376{ 377 return (r >> (0U + i*1U)) & 0x1U; 378} 379static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 fifo_intr_runlist_r(void) 384{ 385 return 0x00002a00U; 386} 387static inline u32 fifo_fb_timeout_r(void) 388{ 389 return 0x00002a04U; 390} 391static inline u32 fifo_fb_timeout_period_m(void) 392{ 393 return 0x3fffffffU << 0U; 394} 395static inline u32 fifo_fb_timeout_period_max_f(void) 396{ 397 return 0x3fffffffU; 398} 399static inline u32 fifo_pb_timeout_r(void) 400{ 401 return 0x00002a08U; 402} 403static inline u32 fifo_pb_timeout_detection_enabled_f(void) 404{ 405 return 0x80000000U; 406} 407static inline u32 fifo_error_sched_disable_r(void) 408{ 409 return 0x0000262cU; 410} 411static inline u32 fifo_sched_disable_r(void) 412{ 413 return 0x00002630U; 414} 415static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) 416{ 417 return (v & 0x1U) << (0U + i*1U); 418} 419static inline u32 fifo_sched_disable_runlist_m(u32 i) 420{ 421 return 0x1U << (0U + i*1U); 422} 423static inline u32 fifo_sched_disable_true_v(void) 424{ 425 return 0x00000001U; 426} 427static inline u32 fifo_preempt_r(void) 428{ 429 return 0x00002634U; 430} 431static inline u32 fifo_preempt_pending_true_f(void) 432{ 433 return 0x100000U; 434} 435static inline u32 fifo_preempt_type_channel_f(void) 436{ 437 return 0x0U; 438} 439static inline u32 fifo_preempt_type_tsg_f(void) 440{ 441 return 0x1000000U; 442} 443static inline u32 fifo_preempt_chid_f(u32 v) 444{ 445 return (v & 0xfffU) << 0U; 446} 447static inline u32 fifo_preempt_id_f(u32 v) 448{ 449 return (v & 0xfffU) << 0U; 450} 451static inline u32 fifo_trigger_mmu_fault_r(u32 i) 452{ 453 return 0x00002a30U + i*4U; 454} 455static inline u32 fifo_trigger_mmu_fault_id_f(u32 v) 456{ 457 return (v & 0x1fU) << 0U; 458} 459static inline u32 fifo_trigger_mmu_fault_enable_f(u32 v) 460{ 461 return (v & 0x1U) << 8U; 462} 463static inline u32 fifo_engine_status_r(u32 i) 464{ 465 return 0x00002640U + i*8U; 466} 467static inline u32 fifo_engine_status__size_1_v(void) 468{ 469 return 0x00000002U; 470} 471static inline u32 fifo_engine_status_id_v(u32 r) 472{ 473 return (r >> 0U) & 0xfffU; 474} 475static inline u32 fifo_engine_status_id_type_v(u32 r) 476{ 477 return (r >> 12U) & 0x1U; 478} 479static inline u32 fifo_engine_status_id_type_chid_v(void) 480{ 481 return 0x00000000U; 482} 483static inline u32 fifo_engine_status_id_type_tsgid_v(void) 484{ 485 return 0x00000001U; 486} 487static inline u32 fifo_engine_status_ctx_status_v(u32 r) 488{ 489 return (r >> 13U) & 0x7U; 490} 491static inline u32 fifo_engine_status_ctx_status_invalid_v(void) 492{ 493 return 0x00000000U; 494} 495static inline u32 fifo_engine_status_ctx_status_valid_v(void) 496{ 497 return 0x00000001U; 498} 499static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) 500{ 501 return 0x00000005U; 502} 503static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) 504{ 505 return 0x00000006U; 506} 507static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) 508{ 509 return 0x00000007U; 510} 511static inline u32 fifo_engine_status_next_id_v(u32 r) 512{ 513 return (r >> 16U) & 0xfffU; 514} 515static inline u32 fifo_engine_status_next_id_type_v(u32 r) 516{ 517 return (r >> 28U) & 0x1U; 518} 519static inline u32 fifo_engine_status_next_id_type_chid_v(void) 520{ 521 return 0x00000000U; 522} 523static inline u32 fifo_engine_status_faulted_v(u32 r) 524{ 525 return (r >> 30U) & 0x1U; 526} 527static inline u32 fifo_engine_status_faulted_true_v(void) 528{ 529 return 0x00000001U; 530} 531static inline u32 fifo_engine_status_engine_v(u32 r) 532{ 533 return (r >> 31U) & 0x1U; 534} 535static inline u32 fifo_engine_status_engine_idle_v(void) 536{ 537 return 0x00000000U; 538} 539static inline u32 fifo_engine_status_engine_busy_v(void) 540{ 541 return 0x00000001U; 542} 543static inline u32 fifo_engine_status_ctxsw_v(u32 r) 544{ 545 return (r >> 15U) & 0x1U; 546} 547static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) 548{ 549 return 0x00000001U; 550} 551static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) 552{ 553 return 0x8000U; 554} 555static inline u32 fifo_pbdma_status_r(u32 i) 556{ 557 return 0x00003080U + i*4U; 558} 559static inline u32 fifo_pbdma_status__size_1_v(void) 560{ 561 return 0x00000001U; 562} 563static inline u32 fifo_pbdma_status_id_v(u32 r) 564{ 565 return (r >> 0U) & 0xfffU; 566} 567static inline u32 fifo_pbdma_status_id_type_v(u32 r) 568{ 569 return (r >> 12U) & 0x1U; 570} 571static inline u32 fifo_pbdma_status_id_type_chid_v(void) 572{ 573 return 0x00000000U; 574} 575static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) 576{ 577 return 0x00000001U; 578} 579static inline u32 fifo_pbdma_status_chan_status_v(u32 r) 580{ 581 return (r >> 13U) & 0x7U; 582} 583static inline u32 fifo_pbdma_status_chan_status_valid_v(void) 584{ 585 return 0x00000001U; 586} 587static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) 588{ 589 return 0x00000005U; 590} 591static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) 592{ 593 return 0x00000006U; 594} 595static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) 596{ 597 return 0x00000007U; 598} 599static inline u32 fifo_pbdma_status_next_id_v(u32 r) 600{ 601 return (r >> 16U) & 0xfffU; 602} 603static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) 604{ 605 return (r >> 28U) & 0x1U; 606} 607static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) 608{ 609 return 0x00000000U; 610} 611static inline u32 fifo_pbdma_status_chsw_v(u32 r) 612{ 613 return (r >> 15U) & 0x1U; 614} 615static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) 616{ 617 return 0x00000001U; 618} 619#endif
diff --git a/include/nvgpu/hw/gk20a/hw_flush_gk20a.h b/include/nvgpu/hw/gk20a/hw_flush_gk20a.h
deleted file mode 100644
index d270b5f..0000000
--- a/include/nvgpu/hw/gk20a/hw_flush_gk20a.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_flush_gk20a_h_ 57#define _hw_flush_gk20a_h_ 58 59static inline u32 flush_l2_system_invalidate_r(void) 60{ 61 return 0x00070004U; 62} 63static inline u32 flush_l2_system_invalidate_pending_v(u32 r) 64{ 65 return (r >> 0U) & 0x1U; 66} 67static inline u32 flush_l2_system_invalidate_pending_busy_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 flush_l2_system_invalidate_pending_busy_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) 76{ 77 return (r >> 1U) & 0x1U; 78} 79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 flush_l2_flush_dirty_r(void) 84{ 85 return 0x00070010U; 86} 87static inline u32 flush_l2_flush_dirty_pending_v(u32 r) 88{ 89 return (r >> 0U) & 0x1U; 90} 91static inline u32 flush_l2_flush_dirty_pending_empty_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 flush_l2_flush_dirty_pending_empty_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 flush_l2_flush_dirty_pending_busy_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 flush_l2_flush_dirty_pending_busy_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 flush_l2_clean_comptags_r(void) 124{ 125 return 0x0007000cU; 126} 127static inline u32 flush_l2_clean_comptags_pending_v(u32 r) 128{ 129 return (r >> 0U) & 0x1U; 130} 131static inline u32 flush_l2_clean_comptags_pending_empty_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 flush_l2_clean_comptags_pending_empty_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 flush_l2_clean_comptags_pending_busy_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 flush_l2_clean_comptags_pending_busy_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 flush_fb_flush_r(void) 164{ 165 return 0x00070000U; 166} 167static inline u32 flush_fb_flush_pending_v(u32 r) 168{ 169 return (r >> 0U) & 0x1U; 170} 171static inline u32 flush_fb_flush_pending_busy_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 flush_fb_flush_pending_busy_f(void) 176{ 177 return 0x1U; 178} 179static inline u32 flush_fb_flush_outstanding_v(u32 r) 180{ 181 return (r >> 1U) & 0x1U; 182} 183static inline u32 flush_fb_flush_outstanding_true_v(void) 184{ 185 return 0x00000001U; 186} 187#endif
diff --git a/include/nvgpu/hw/gk20a/hw_gmmu_gk20a.h b/include/nvgpu/hw/gk20a/hw_gmmu_gk20a.h
deleted file mode 100644
index a788d1d..0000000
--- a/include/nvgpu/hw/gk20a/hw_gmmu_gk20a.h
+++ /dev/null
@@ -1,283 +0,0 @@ 1/* 2 * Copyright (c) 2012-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gmmu_gk20a_h_ 57#define _hw_gmmu_gk20a_h_ 58 59static inline u32 gmmu_pde_aperture_big_w(void) 60{ 61 return 0U; 62} 63static inline u32 gmmu_pde_aperture_big_invalid_f(void) 64{ 65 return 0x0U; 66} 67static inline u32 gmmu_pde_aperture_big_video_memory_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gmmu_pde_aperture_big_sys_mem_coh_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gmmu_pde_aperture_big_sys_mem_ncoh_f(void) 76{ 77 return 0x3U; 78} 79static inline u32 gmmu_pde_size_w(void) 80{ 81 return 0U; 82} 83static inline u32 gmmu_pde_size_full_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 gmmu_pde_address_big_sys_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 4U; 90} 91static inline u32 gmmu_pde_address_big_sys_w(void) 92{ 93 return 0U; 94} 95static inline u32 gmmu_pde_aperture_small_w(void) 96{ 97 return 1U; 98} 99static inline u32 gmmu_pde_aperture_small_invalid_f(void) 100{ 101 return 0x0U; 102} 103static inline u32 gmmu_pde_aperture_small_video_memory_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 gmmu_pde_aperture_small_sys_mem_coh_f(void) 108{ 109 return 0x2U; 110} 111static inline u32 gmmu_pde_aperture_small_sys_mem_ncoh_f(void) 112{ 113 return 0x3U; 114} 115static inline u32 gmmu_pde_vol_small_w(void) 116{ 117 return 1U; 118} 119static inline u32 gmmu_pde_vol_small_true_f(void) 120{ 121 return 0x4U; 122} 123static inline u32 gmmu_pde_vol_small_false_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 gmmu_pde_vol_big_w(void) 128{ 129 return 1U; 130} 131static inline u32 gmmu_pde_vol_big_true_f(void) 132{ 133 return 0x8U; 134} 135static inline u32 gmmu_pde_vol_big_false_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 gmmu_pde_address_small_sys_f(u32 v) 140{ 141 return (v & 0xfffffffU) << 4U; 142} 143static inline u32 gmmu_pde_address_small_sys_w(void) 144{ 145 return 1U; 146} 147static inline u32 gmmu_pde_address_shift_v(void) 148{ 149 return 0x0000000cU; 150} 151static inline u32 gmmu_pde__size_v(void) 152{ 153 return 0x00000008U; 154} 155static inline u32 gmmu_pte__size_v(void) 156{ 157 return 0x00000008U; 158} 159static inline u32 gmmu_pte_valid_w(void) 160{ 161 return 0U; 162} 163static inline u32 gmmu_pte_valid_true_f(void) 164{ 165 return 0x1U; 166} 167static inline u32 gmmu_pte_valid_false_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 gmmu_pte_privilege_w(void) 172{ 173 return 0U; 174} 175static inline u32 gmmu_pte_privilege_true_f(void) 176{ 177 return 0x2U; 178} 179static inline u32 gmmu_pte_privilege_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 gmmu_pte_address_sys_f(u32 v) 184{ 185 return (v & 0xfffffffU) << 4U; 186} 187static inline u32 gmmu_pte_address_sys_w(void) 188{ 189 return 0U; 190} 191static inline u32 gmmu_pte_address_vid_f(u32 v) 192{ 193 return (v & 0x1ffffffU) << 4U; 194} 195static inline u32 gmmu_pte_address_vid_w(void) 196{ 197 return 0U; 198} 199static inline u32 gmmu_pte_vol_w(void) 200{ 201 return 1U; 202} 203static inline u32 gmmu_pte_vol_true_f(void) 204{ 205 return 0x1U; 206} 207static inline u32 gmmu_pte_vol_false_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 gmmu_pte_aperture_w(void) 212{ 213 return 1U; 214} 215static inline u32 gmmu_pte_aperture_video_memory_f(void) 216{ 217 return 0x0U; 218} 219static inline u32 gmmu_pte_aperture_sys_mem_coh_f(void) 220{ 221 return 0x4U; 222} 223static inline u32 gmmu_pte_aperture_sys_mem_ncoh_f(void) 224{ 225 return 0x6U; 226} 227static inline u32 gmmu_pte_read_only_w(void) 228{ 229 return 0U; 230} 231static inline u32 gmmu_pte_read_only_true_f(void) 232{ 233 return 0x4U; 234} 235static inline u32 gmmu_pte_write_disable_w(void) 236{ 237 return 1U; 238} 239static inline u32 gmmu_pte_write_disable_true_f(void) 240{ 241 return 0x80000000U; 242} 243static inline u32 gmmu_pte_read_disable_w(void) 244{ 245 return 1U; 246} 247static inline u32 gmmu_pte_read_disable_true_f(void) 248{ 249 return 0x40000000U; 250} 251static inline u32 gmmu_pte_comptagline_s(void) 252{ 253 return 17U; 254} 255static inline u32 gmmu_pte_comptagline_f(u32 v) 256{ 257 return (v & 0x1ffffU) << 12U; 258} 259static inline u32 gmmu_pte_comptagline_w(void) 260{ 261 return 1U; 262} 263static inline u32 gmmu_pte_address_shift_v(void) 264{ 265 return 0x0000000cU; 266} 267static inline u32 gmmu_pte_kind_f(u32 v) 268{ 269 return (v & 0xffU) << 4U; 270} 271static inline u32 gmmu_pte_kind_w(void) 272{ 273 return 1U; 274} 275static inline u32 gmmu_pte_kind_invalid_v(void) 276{ 277 return 0x000000ffU; 278} 279static inline u32 gmmu_pte_kind_pitch_v(void) 280{ 281 return 0x00000000U; 282} 283#endif
diff --git a/include/nvgpu/hw/gk20a/hw_gr_gk20a.h b/include/nvgpu/hw/gk20a/hw_gr_gk20a.h
deleted file mode 100644
index 376cc8f..0000000
--- a/include/nvgpu/hw/gk20a/hw_gr_gk20a.h
+++ /dev/null
@@ -1,3868 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gr_gk20a_h_ 57#define _hw_gr_gk20a_h_ 58 59static inline u32 gr_intr_r(void) 60{ 61 return 0x00400100U; 62} 63static inline u32 gr_intr_notify_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 gr_intr_notify_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gr_intr_semaphore_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gr_intr_semaphore_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gr_intr_semaphore_timeout_not_pending_f(void) 80{ 81 return 0x0U; 82} 83static inline u32 gr_intr_semaphore_timeout_pending_f(void) 84{ 85 return 0x4U; 86} 87static inline u32 gr_intr_semaphore_timeout_reset_f(void) 88{ 89 return 0x4U; 90} 91static inline u32 gr_intr_illegal_method_pending_f(void) 92{ 93 return 0x10U; 94} 95static inline u32 gr_intr_illegal_method_reset_f(void) 96{ 97 return 0x10U; 98} 99static inline u32 gr_intr_illegal_notify_pending_f(void) 100{ 101 return 0x40U; 102} 103static inline u32 gr_intr_illegal_notify_reset_f(void) 104{ 105 return 0x40U; 106} 107static inline u32 gr_intr_firmware_method_f(u32 v) 108{ 109 return (v & 0x1U) << 8U; 110} 111static inline u32 gr_intr_firmware_method_pending_f(void) 112{ 113 return 0x100U; 114} 115static inline u32 gr_intr_firmware_method_reset_f(void) 116{ 117 return 0x100U; 118} 119static inline u32 gr_intr_illegal_class_pending_f(void) 120{ 121 return 0x20U; 122} 123static inline u32 gr_intr_illegal_class_reset_f(void) 124{ 125 return 0x20U; 126} 127static inline u32 gr_intr_fecs_error_pending_f(void) 128{ 129 return 0x80000U; 130} 131static inline u32 gr_intr_fecs_error_reset_f(void) 132{ 133 return 0x80000U; 134} 135static inline u32 gr_intr_class_error_pending_f(void) 136{ 137 return 0x100000U; 138} 139static inline u32 gr_intr_class_error_reset_f(void) 140{ 141 return 0x100000U; 142} 143static inline u32 gr_intr_exception_pending_f(void) 144{ 145 return 0x200000U; 146} 147static inline u32 gr_intr_exception_reset_f(void) 148{ 149 return 0x200000U; 150} 151static inline u32 gr_fecs_intr_r(void) 152{ 153 return 0x00400144U; 154} 155static inline u32 gr_class_error_r(void) 156{ 157 return 0x00400110U; 158} 159static inline u32 gr_class_error_code_v(u32 r) 160{ 161 return (r >> 0U) & 0xffffU; 162} 163static inline u32 gr_intr_nonstall_r(void) 164{ 165 return 0x00400120U; 166} 167static inline u32 gr_intr_nonstall_trap_pending_f(void) 168{ 169 return 0x2U; 170} 171static inline u32 gr_intr_en_r(void) 172{ 173 return 0x0040013cU; 174} 175static inline u32 gr_exception_r(void) 176{ 177 return 0x00400108U; 178} 179static inline u32 gr_exception_fe_m(void) 180{ 181 return 0x1U << 0U; 182} 183static inline u32 gr_exception_gpc_m(void) 184{ 185 return 0x1U << 24U; 186} 187static inline u32 gr_exception_memfmt_m(void) 188{ 189 return 0x1U << 1U; 190} 191static inline u32 gr_exception_ds_m(void) 192{ 193 return 0x1U << 4U; 194} 195static inline u32 gr_exception_sked_m(void) 196{ 197 return 0x1U << 8U; 198} 199static inline u32 gr_exception_pd_m(void) 200{ 201 return 0x1U << 2U; 202} 203static inline u32 gr_exception_scc_m(void) 204{ 205 return 0x1U << 3U; 206} 207static inline u32 gr_exception_ssync_m(void) 208{ 209 return 0x1U << 5U; 210} 211static inline u32 gr_exception_mme_m(void) 212{ 213 return 0x1U << 7U; 214} 215static inline u32 gr_exception1_r(void) 216{ 217 return 0x00400118U; 218} 219static inline u32 gr_exception1_gpc_0_pending_f(void) 220{ 221 return 0x1U; 222} 223static inline u32 gr_exception2_r(void) 224{ 225 return 0x0040011cU; 226} 227static inline u32 gr_exception_en_r(void) 228{ 229 return 0x00400138U; 230} 231static inline u32 gr_exception_en_fe_m(void) 232{ 233 return 0x1U << 0U; 234} 235static inline u32 gr_exception1_en_r(void) 236{ 237 return 0x00400130U; 238} 239static inline u32 gr_exception2_en_r(void) 240{ 241 return 0x00400134U; 242} 243static inline u32 gr_gpfifo_ctl_r(void) 244{ 245 return 0x00400500U; 246} 247static inline u32 gr_gpfifo_ctl_access_f(u32 v) 248{ 249 return (v & 0x1U) << 0U; 250} 251static inline u32 gr_gpfifo_ctl_access_disabled_f(void) 252{ 253 return 0x0U; 254} 255static inline u32 gr_gpfifo_ctl_access_enabled_f(void) 256{ 257 return 0x1U; 258} 259static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) 260{ 261 return (v & 0x1U) << 16U; 262} 263static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) 264{ 265 return 0x00000001U; 266} 267static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) 268{ 269 return 0x10000U; 270} 271static inline u32 gr_gpfifo_status_r(void) 272{ 273 return 0x00400504U; 274} 275static inline u32 gr_trapped_addr_r(void) 276{ 277 return 0x00400704U; 278} 279static inline u32 gr_trapped_addr_mthd_v(u32 r) 280{ 281 return (r >> 2U) & 0xfffU; 282} 283static inline u32 gr_trapped_addr_subch_v(u32 r) 284{ 285 return (r >> 16U) & 0x7U; 286} 287static inline u32 gr_trapped_addr_mme_generated_v(u32 r) 288{ 289 return (r >> 20U) & 0x1U; 290} 291static inline u32 gr_trapped_addr_datahigh_v(u32 r) 292{ 293 return (r >> 24U) & 0x1U; 294} 295static inline u32 gr_trapped_addr_priv_v(u32 r) 296{ 297 return (r >> 28U) & 0x1U; 298} 299static inline u32 gr_trapped_addr_status_v(u32 r) 300{ 301 return (r >> 31U) & 0x1U; 302} 303static inline u32 gr_trapped_data_lo_r(void) 304{ 305 return 0x00400708U; 306} 307static inline u32 gr_trapped_data_hi_r(void) 308{ 309 return 0x0040070cU; 310} 311static inline u32 gr_trapped_data_mme_r(void) 312{ 313 return 0x00400710U; 314} 315static inline u32 gr_trapped_data_mme_pc_v(u32 r) 316{ 317 return (r >> 0U) & 0x7ffU; 318} 319static inline u32 gr_status_r(void) 320{ 321 return 0x00400700U; 322} 323static inline u32 gr_status_fe_method_upper_v(u32 r) 324{ 325 return (r >> 1U) & 0x1U; 326} 327static inline u32 gr_status_fe_method_lower_v(u32 r) 328{ 329 return (r >> 2U) & 0x1U; 330} 331static inline u32 gr_status_fe_method_lower_idle_v(void) 332{ 333 return 0x00000000U; 334} 335static inline u32 gr_status_fe_gi_v(u32 r) 336{ 337 return (r >> 21U) & 0x1U; 338} 339static inline u32 gr_status_mask_r(void) 340{ 341 return 0x00400610U; 342} 343static inline u32 gr_status_1_r(void) 344{ 345 return 0x00400604U; 346} 347static inline u32 gr_status_2_r(void) 348{ 349 return 0x00400608U; 350} 351static inline u32 gr_engine_status_r(void) 352{ 353 return 0x0040060cU; 354} 355static inline u32 gr_engine_status_value_busy_f(void) 356{ 357 return 0x1U; 358} 359static inline u32 gr_pri_be0_becs_be_exception_r(void) 360{ 361 return 0x00410204U; 362} 363static inline u32 gr_pri_be0_becs_be_exception_en_r(void) 364{ 365 return 0x00410208U; 366} 367static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) 368{ 369 return 0x00502c90U; 370} 371static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) 372{ 373 return 0x00502c94U; 374} 375static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) 376{ 377 return 0x00504508U; 378} 379static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 380{ 381 return 0x0050450cU; 382} 383static inline u32 gr_activity_0_r(void) 384{ 385 return 0x00400380U; 386} 387static inline u32 gr_activity_1_r(void) 388{ 389 return 0x00400384U; 390} 391static inline u32 gr_activity_2_r(void) 392{ 393 return 0x00400388U; 394} 395static inline u32 gr_activity_4_r(void) 396{ 397 return 0x00400390U; 398} 399static inline u32 gr_pri_gpc0_gcc_dbg_r(void) 400{ 401 return 0x00501000U; 402} 403static inline u32 gr_pri_gpcs_gcc_dbg_r(void) 404{ 405 return 0x00419000U; 406} 407static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) 408{ 409 return 0x1U << 1U; 410} 411static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) 412{ 413 return 0x005046a4U; 414} 415static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) 416{ 417 return 0x00419ea4U; 418} 419static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) 420{ 421 return 0x1U << 0U; 422} 423static inline u32 gr_pri_sked_activity_r(void) 424{ 425 return 0x00407054U; 426} 427static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) 428{ 429 return 0x00502c80U; 430} 431static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) 432{ 433 return 0x00502c84U; 434} 435static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) 436{ 437 return 0x00502c88U; 438} 439static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) 440{ 441 return 0x00502c8cU; 442} 443static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) 444{ 445 return 0x00504500U; 446} 447static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) 448{ 449 return 0x00501d00U; 450} 451static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) 452{ 453 return 0x0041ac80U; 454} 455static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) 456{ 457 return 0x0041ac84U; 458} 459static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) 460{ 461 return 0x0041ac88U; 462} 463static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) 464{ 465 return 0x0041ac8cU; 466} 467static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) 468{ 469 return 0x0041c500U; 470} 471static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) 472{ 473 return 0x00419d00U; 474} 475static inline u32 gr_pri_be0_becs_be_activity0_r(void) 476{ 477 return 0x00410200U; 478} 479static inline u32 gr_pri_bes_becs_be_activity0_r(void) 480{ 481 return 0x00408a00U; 482} 483static inline u32 gr_pri_ds_mpipe_status_r(void) 484{ 485 return 0x00405858U; 486} 487static inline u32 gr_pri_fe_go_idle_on_status_r(void) 488{ 489 return 0x00404150U; 490} 491static inline u32 gr_pri_fe_go_idle_check_r(void) 492{ 493 return 0x00404158U; 494} 495static inline u32 gr_pri_fe_go_idle_info_r(void) 496{ 497 return 0x00404194U; 498} 499static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) 500{ 501 return 0x00504238U; 502} 503static inline u32 gr_pri_be0_crop_status1_r(void) 504{ 505 return 0x00410134U; 506} 507static inline u32 gr_pri_bes_crop_status1_r(void) 508{ 509 return 0x00408934U; 510} 511static inline u32 gr_pri_be0_zrop_status_r(void) 512{ 513 return 0x00410048U; 514} 515static inline u32 gr_pri_be0_zrop_status2_r(void) 516{ 517 return 0x0041004cU; 518} 519static inline u32 gr_pri_bes_zrop_status_r(void) 520{ 521 return 0x00408848U; 522} 523static inline u32 gr_pri_bes_zrop_status2_r(void) 524{ 525 return 0x0040884cU; 526} 527static inline u32 gr_pipe_bundle_address_r(void) 528{ 529 return 0x00400200U; 530} 531static inline u32 gr_pipe_bundle_address_value_v(u32 r) 532{ 533 return (r >> 0U) & 0xffffU; 534} 535static inline u32 gr_pipe_bundle_data_r(void) 536{ 537 return 0x00400204U; 538} 539static inline u32 gr_pipe_bundle_config_r(void) 540{ 541 return 0x00400208U; 542} 543static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) 544{ 545 return 0x0U; 546} 547static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) 548{ 549 return 0x80000000U; 550} 551static inline u32 gr_fe_hww_esr_r(void) 552{ 553 return 0x00404000U; 554} 555static inline u32 gr_fe_hww_esr_reset_active_f(void) 556{ 557 return 0x40000000U; 558} 559static inline u32 gr_fe_hww_esr_en_enable_f(void) 560{ 561 return 0x80000000U; 562} 563static inline u32 gr_fe_hww_esr_info_r(void) 564{ 565 return 0x004041b0U; 566} 567static inline u32 gr_fe_go_idle_timeout_r(void) 568{ 569 return 0x00404154U; 570} 571static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) 572{ 573 return (v & 0xffffffffU) << 0U; 574} 575static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) 576{ 577 return 0x0U; 578} 579static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) 580{ 581 return 0x800U; 582} 583static inline u32 gr_fe_object_table_r(u32 i) 584{ 585 return 0x00404200U + i*4U; 586} 587static inline u32 gr_fe_object_table_nvclass_v(u32 r) 588{ 589 return (r >> 0U) & 0xffffU; 590} 591static inline u32 gr_pri_mme_shadow_raw_index_r(void) 592{ 593 return 0x00404488U; 594} 595static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) 596{ 597 return 0x80000000U; 598} 599static inline u32 gr_pri_mme_shadow_raw_data_r(void) 600{ 601 return 0x0040448cU; 602} 603static inline u32 gr_mme_hww_esr_r(void) 604{ 605 return 0x00404490U; 606} 607static inline u32 gr_mme_hww_esr_reset_active_f(void) 608{ 609 return 0x40000000U; 610} 611static inline u32 gr_mme_hww_esr_en_enable_f(void) 612{ 613 return 0x80000000U; 614} 615static inline u32 gr_mme_hww_esr_info_r(void) 616{ 617 return 0x00404494U; 618} 619static inline u32 gr_memfmt_hww_esr_r(void) 620{ 621 return 0x00404600U; 622} 623static inline u32 gr_memfmt_hww_esr_reset_active_f(void) 624{ 625 return 0x40000000U; 626} 627static inline u32 gr_memfmt_hww_esr_en_enable_f(void) 628{ 629 return 0x80000000U; 630} 631static inline u32 gr_fecs_cpuctl_r(void) 632{ 633 return 0x00409100U; 634} 635static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) 636{ 637 return (v & 0x1U) << 1U; 638} 639static inline u32 gr_fecs_dmactl_r(void) 640{ 641 return 0x0040910cU; 642} 643static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) 644{ 645 return (v & 0x1U) << 0U; 646} 647static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) 648{ 649 return 0x1U << 1U; 650} 651static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) 652{ 653 return 0x1U << 2U; 654} 655static inline u32 gr_fecs_os_r(void) 656{ 657 return 0x00409080U; 658} 659static inline u32 gr_fecs_idlestate_r(void) 660{ 661 return 0x0040904cU; 662} 663static inline u32 gr_fecs_mailbox0_r(void) 664{ 665 return 0x00409040U; 666} 667static inline u32 gr_fecs_mailbox1_r(void) 668{ 669 return 0x00409044U; 670} 671static inline u32 gr_fecs_irqstat_r(void) 672{ 673 return 0x00409008U; 674} 675static inline u32 gr_fecs_irqmode_r(void) 676{ 677 return 0x0040900cU; 678} 679static inline u32 gr_fecs_irqmask_r(void) 680{ 681 return 0x00409018U; 682} 683static inline u32 gr_fecs_irqdest_r(void) 684{ 685 return 0x0040901cU; 686} 687static inline u32 gr_fecs_curctx_r(void) 688{ 689 return 0x00409050U; 690} 691static inline u32 gr_fecs_nxtctx_r(void) 692{ 693 return 0x00409054U; 694} 695static inline u32 gr_fecs_engctl_r(void) 696{ 697 return 0x004090a4U; 698} 699static inline u32 gr_fecs_debug1_r(void) 700{ 701 return 0x00409090U; 702} 703static inline u32 gr_fecs_debuginfo_r(void) 704{ 705 return 0x00409094U; 706} 707static inline u32 gr_fecs_icd_cmd_r(void) 708{ 709 return 0x00409200U; 710} 711static inline u32 gr_fecs_icd_cmd_opc_s(void) 712{ 713 return 4U; 714} 715static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) 716{ 717 return (v & 0xfU) << 0U; 718} 719static inline u32 gr_fecs_icd_cmd_opc_m(void) 720{ 721 return 0xfU << 0U; 722} 723static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) 724{ 725 return (r >> 0U) & 0xfU; 726} 727static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) 728{ 729 return 0x8U; 730} 731static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) 732{ 733 return 0xeU; 734} 735static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) 736{ 737 return (v & 0x1fU) << 8U; 738} 739static inline u32 gr_fecs_icd_rdata_r(void) 740{ 741 return 0x0040920cU; 742} 743static inline u32 gr_fecs_imemc_r(u32 i) 744{ 745 return 0x00409180U + i*16U; 746} 747static inline u32 gr_fecs_imemc_offs_f(u32 v) 748{ 749 return (v & 0x3fU) << 2U; 750} 751static inline u32 gr_fecs_imemc_blk_f(u32 v) 752{ 753 return (v & 0xffU) << 8U; 754} 755static inline u32 gr_fecs_imemc_aincw_f(u32 v) 756{ 757 return (v & 0x1U) << 24U; 758} 759static inline u32 gr_fecs_imemd_r(u32 i) 760{ 761 return 0x00409184U + i*16U; 762} 763static inline u32 gr_fecs_imemt_r(u32 i) 764{ 765 return 0x00409188U + i*16U; 766} 767static inline u32 gr_fecs_imemt_tag_f(u32 v) 768{ 769 return (v & 0xffffU) << 0U; 770} 771static inline u32 gr_fecs_dmemc_r(u32 i) 772{ 773 return 0x004091c0U + i*8U; 774} 775static inline u32 gr_fecs_dmemc_offs_s(void) 776{ 777 return 6U; 778} 779static inline u32 gr_fecs_dmemc_offs_f(u32 v) 780{ 781 return (v & 0x3fU) << 2U; 782} 783static inline u32 gr_fecs_dmemc_offs_m(void) 784{ 785 return 0x3fU << 2U; 786} 787static inline u32 gr_fecs_dmemc_offs_v(u32 r) 788{ 789 return (r >> 2U) & 0x3fU; 790} 791static inline u32 gr_fecs_dmemc_blk_f(u32 v) 792{ 793 return (v & 0xffU) << 8U; 794} 795static inline u32 gr_fecs_dmemc_aincw_f(u32 v) 796{ 797 return (v & 0x1U) << 24U; 798} 799static inline u32 gr_fecs_dmemd_r(u32 i) 800{ 801 return 0x004091c4U + i*8U; 802} 803static inline u32 gr_fecs_dmatrfbase_r(void) 804{ 805 return 0x00409110U; 806} 807static inline u32 gr_fecs_dmatrfmoffs_r(void) 808{ 809 return 0x00409114U; 810} 811static inline u32 gr_fecs_dmatrffboffs_r(void) 812{ 813 return 0x0040911cU; 814} 815static inline u32 gr_fecs_dmatrfcmd_r(void) 816{ 817 return 0x00409118U; 818} 819static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) 820{ 821 return (v & 0x1U) << 4U; 822} 823static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) 824{ 825 return (v & 0x1U) << 5U; 826} 827static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) 828{ 829 return (v & 0x7U) << 8U; 830} 831static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) 832{ 833 return (v & 0x7U) << 12U; 834} 835static inline u32 gr_fecs_bootvec_r(void) 836{ 837 return 0x00409104U; 838} 839static inline u32 gr_fecs_bootvec_vec_f(u32 v) 840{ 841 return (v & 0xffffffffU) << 0U; 842} 843static inline u32 gr_fecs_falcon_hwcfg_r(void) 844{ 845 return 0x00409108U; 846} 847static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) 848{ 849 return 0x0041a108U; 850} 851static inline u32 gr_fecs_falcon_rm_r(void) 852{ 853 return 0x00409084U; 854} 855static inline u32 gr_fecs_current_ctx_r(void) 856{ 857 return 0x00409b00U; 858} 859static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) 860{ 861 return (v & 0xfffffffU) << 0U; 862} 863static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) 864{ 865 return (r >> 0U) & 0xfffffffU; 866} 867static inline u32 gr_fecs_current_ctx_target_s(void) 868{ 869 return 2U; 870} 871static inline u32 gr_fecs_current_ctx_target_f(u32 v) 872{ 873 return (v & 0x3U) << 28U; 874} 875static inline u32 gr_fecs_current_ctx_target_m(void) 876{ 877 return 0x3U << 28U; 878} 879static inline u32 gr_fecs_current_ctx_target_v(u32 r) 880{ 881 return (r >> 28U) & 0x3U; 882} 883static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) 884{ 885 return 0x0U; 886} 887static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) 888{ 889 return 0x20000000U; 890} 891static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) 892{ 893 return 0x30000000U; 894} 895static inline u32 gr_fecs_current_ctx_valid_s(void) 896{ 897 return 1U; 898} 899static inline u32 gr_fecs_current_ctx_valid_f(u32 v) 900{ 901 return (v & 0x1U) << 31U; 902} 903static inline u32 gr_fecs_current_ctx_valid_m(void) 904{ 905 return 0x1U << 31U; 906} 907static inline u32 gr_fecs_current_ctx_valid_v(u32 r) 908{ 909 return (r >> 31U) & 0x1U; 910} 911static inline u32 gr_fecs_current_ctx_valid_false_f(void) 912{ 913 return 0x0U; 914} 915static inline u32 gr_fecs_method_data_r(void) 916{ 917 return 0x00409500U; 918} 919static inline u32 gr_fecs_method_push_r(void) 920{ 921 return 0x00409504U; 922} 923static inline u32 gr_fecs_method_push_adr_f(u32 v) 924{ 925 return (v & 0xfffU) << 0U; 926} 927static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) 928{ 929 return 0x00000003U; 930} 931static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) 932{ 933 return 0x3U; 934} 935static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) 936{ 937 return 0x00000010U; 938} 939static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) 940{ 941 return 0x00000009U; 942} 943static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) 944{ 945 return 0x00000015U; 946} 947static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) 948{ 949 return 0x00000016U; 950} 951static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) 952{ 953 return 0x00000025U; 954} 955static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) 956{ 957 return 0x00000030U; 958} 959static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) 960{ 961 return 0x00000031U; 962} 963static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) 964{ 965 return 0x00000032U; 966} 967static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) 968{ 969 return 0x00000038U; 970} 971static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) 972{ 973 return 0x00000039U; 974} 975static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) 976{ 977 return 0x21U; 978} 979static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) 980{ 981 return 0x00000004U; 982} 983static inline u32 gr_fecs_host_int_status_r(void) 984{ 985 return 0x00409c18U; 986} 987static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) 988{ 989 return (v & 0x1U) << 16U; 990} 991static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) 992{ 993 return (v & 0x1U) << 17U; 994} 995static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) 996{ 997 return (v & 0x1U) << 18U; 998} 999static inline u32 gr_fecs_host_int_status_watchdog_active_f(void) 1000{ 1001 return 0x80000U; 1002} 1003static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) 1004{ 1005 return (v & 0xffffU) << 0U; 1006} 1007static inline u32 gr_fecs_host_int_clear_r(void) 1008{ 1009 return 0x00409c20U; 1010} 1011static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) 1012{ 1013 return (v & 0x1U) << 1U; 1014} 1015static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) 1016{ 1017 return 0x2U; 1018} 1019static inline u32 gr_fecs_host_int_enable_r(void) 1020{ 1021 return 0x00409c24U; 1022} 1023static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) 1024{ 1025 return 0x2U; 1026} 1027static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) 1028{ 1029 return 0x10000U; 1030} 1031static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) 1032{ 1033 return 0x20000U; 1034} 1035static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) 1036{ 1037 return 0x40000U; 1038} 1039static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) 1040{ 1041 return 0x80000U; 1042} 1043static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) 1044{ 1045 return 0x00409614U; 1046} 1047static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) 1048{ 1049 return 0x0U; 1050} 1051static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) 1052{ 1053 return 0x0U; 1054} 1055static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) 1056{ 1057 return 0x0U; 1058} 1059static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) 1060{ 1061 return 0x10U; 1062} 1063static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) 1064{ 1065 return 0x20U; 1066} 1067static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) 1068{ 1069 return 0x40U; 1070} 1071static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) 1072{ 1073 return 0x0U; 1074} 1075static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) 1076{ 1077 return 0x100U; 1078} 1079static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) 1080{ 1081 return 0x0U; 1082} 1083static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) 1084{ 1085 return 0x200U; 1086} 1087static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) 1088{ 1089 return 1U; 1090} 1091static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) 1092{ 1093 return (v & 0x1U) << 10U; 1094} 1095static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) 1096{ 1097 return 0x1U << 10U; 1098} 1099static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) 1100{ 1101 return (r >> 10U) & 0x1U; 1102} 1103static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) 1104{ 1105 return 0x0U; 1106} 1107static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) 1108{ 1109 return 0x400U; 1110} 1111static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) 1112{ 1113 return 0x0040960cU; 1114} 1115static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) 1116{ 1117 return 0x00409800U + i*4U; 1118} 1119static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) 1120{ 1121 return 0x00000008U; 1122} 1123static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) 1124{ 1125 return (v & 0xffffffffU) << 0U; 1126} 1127static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) 1128{ 1129 return 0x00000001U; 1130} 1131static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) 1132{ 1133 return 0x00000002U; 1134} 1135static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) 1136{ 1137 return 0x00409820U + i*4U; 1138} 1139static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) 1140{ 1141 return (v & 0xffffffffU) << 0U; 1142} 1143static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) 1144{ 1145 return 0x00409840U + i*4U; 1146} 1147static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) 1148{ 1149 return (v & 0xffffffffU) << 0U; 1150} 1151static inline u32 gr_fecs_fs_r(void) 1152{ 1153 return 0x00409604U; 1154} 1155static inline u32 gr_fecs_fs_num_available_gpcs_s(void) 1156{ 1157 return 5U; 1158} 1159static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) 1160{ 1161 return (v & 0x1fU) << 0U; 1162} 1163static inline u32 gr_fecs_fs_num_available_gpcs_m(void) 1164{ 1165 return 0x1fU << 0U; 1166} 1167static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) 1168{ 1169 return (r >> 0U) & 0x1fU; 1170} 1171static inline u32 gr_fecs_fs_num_available_fbps_s(void) 1172{ 1173 return 5U; 1174} 1175static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) 1176{ 1177 return (v & 0x1fU) << 16U; 1178} 1179static inline u32 gr_fecs_fs_num_available_fbps_m(void) 1180{ 1181 return 0x1fU << 16U; 1182} 1183static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) 1184{ 1185 return (r >> 16U) & 0x1fU; 1186} 1187static inline u32 gr_fecs_cfg_r(void) 1188{ 1189 return 0x00409620U; 1190} 1191static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) 1192{ 1193 return (r >> 0U) & 0xffU; 1194} 1195static inline u32 gr_fecs_rc_lanes_r(void) 1196{ 1197 return 0x00409880U; 1198} 1199static inline u32 gr_fecs_rc_lanes_num_chains_s(void) 1200{ 1201 return 6U; 1202} 1203static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) 1204{ 1205 return (v & 0x3fU) << 0U; 1206} 1207static inline u32 gr_fecs_rc_lanes_num_chains_m(void) 1208{ 1209 return 0x3fU << 0U; 1210} 1211static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) 1212{ 1213 return (r >> 0U) & 0x3fU; 1214} 1215static inline u32 gr_fecs_ctxsw_status_1_r(void) 1216{ 1217 return 0x00409400U; 1218} 1219static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) 1220{ 1221 return 1U; 1222} 1223static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) 1224{ 1225 return (v & 0x1U) << 12U; 1226} 1227static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) 1228{ 1229 return 0x1U << 12U; 1230} 1231static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) 1232{ 1233 return (r >> 12U) & 0x1U; 1234} 1235static inline u32 gr_fecs_arb_ctx_adr_r(void) 1236{ 1237 return 0x00409a24U; 1238} 1239static inline u32 gr_fecs_new_ctx_r(void) 1240{ 1241 return 0x00409b04U; 1242} 1243static inline u32 gr_fecs_new_ctx_ptr_s(void) 1244{ 1245 return 28U; 1246} 1247static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) 1248{ 1249 return (v & 0xfffffffU) << 0U; 1250} 1251static inline u32 gr_fecs_new_ctx_ptr_m(void) 1252{ 1253 return 0xfffffffU << 0U; 1254} 1255static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) 1256{ 1257 return (r >> 0U) & 0xfffffffU; 1258} 1259static inline u32 gr_fecs_new_ctx_target_s(void) 1260{ 1261 return 2U; 1262} 1263static inline u32 gr_fecs_new_ctx_target_f(u32 v) 1264{ 1265 return (v & 0x3U) << 28U; 1266} 1267static inline u32 gr_fecs_new_ctx_target_m(void) 1268{ 1269 return 0x3U << 28U; 1270} 1271static inline u32 gr_fecs_new_ctx_target_v(u32 r) 1272{ 1273 return (r >> 28U) & 0x3U; 1274} 1275static inline u32 gr_fecs_new_ctx_target_vid_mem_f(void) 1276{ 1277 return 0x0U; 1278} 1279static inline u32 gr_fecs_new_ctx_target_sys_mem_ncoh_f(void) 1280{ 1281 return 0x30000000U; 1282} 1283static inline u32 gr_fecs_new_ctx_target_sys_mem_coh_f(void) 1284{ 1285 return 0x20000000U; 1286} 1287static inline u32 gr_fecs_new_ctx_valid_s(void) 1288{ 1289 return 1U; 1290} 1291static inline u32 gr_fecs_new_ctx_valid_f(u32 v) 1292{ 1293 return (v & 0x1U) << 31U; 1294} 1295static inline u32 gr_fecs_new_ctx_valid_m(void) 1296{ 1297 return 0x1U << 31U; 1298} 1299static inline u32 gr_fecs_new_ctx_valid_v(u32 r) 1300{ 1301 return (r >> 31U) & 0x1U; 1302} 1303static inline u32 gr_fecs_arb_ctx_ptr_r(void) 1304{ 1305 return 0x00409a0cU; 1306} 1307static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) 1308{ 1309 return 28U; 1310} 1311static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) 1312{ 1313 return (v & 0xfffffffU) << 0U; 1314} 1315static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) 1316{ 1317 return 0xfffffffU << 0U; 1318} 1319static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) 1320{ 1321 return (r >> 0U) & 0xfffffffU; 1322} 1323static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) 1324{ 1325 return 2U; 1326} 1327static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) 1328{ 1329 return (v & 0x3U) << 28U; 1330} 1331static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) 1332{ 1333 return 0x3U << 28U; 1334} 1335static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) 1336{ 1337 return (r >> 28U) & 0x3U; 1338} 1339static inline u32 gr_fecs_arb_ctx_ptr_target_vid_mem_f(void) 1340{ 1341 return 0x0U; 1342} 1343static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(void) 1344{ 1345 return 0x30000000U; 1346} 1347static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_coh_f(void) 1348{ 1349 return 0x20000000U; 1350} 1351static inline u32 gr_fecs_arb_ctx_cmd_r(void) 1352{ 1353 return 0x00409a10U; 1354} 1355static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) 1356{ 1357 return 5U; 1358} 1359static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) 1360{ 1361 return (v & 0x1fU) << 0U; 1362} 1363static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) 1364{ 1365 return 0x1fU << 0U; 1366} 1367static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) 1368{ 1369 return (r >> 0U) & 0x1fU; 1370} 1371static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) 1372{ 1373 return 0x00409c00U; 1374} 1375static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) 1376{ 1377 return 0x00502c04U; 1378} 1379static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) 1380{ 1381 return 0x00502400U; 1382} 1383static inline u32 gr_gpc0_gpccs_ctxsw_mailbox__size_1_v(void) 1384{ 1385 return 0x00000010U; 1386} 1387static inline u32 gr_fecs_ctxsw_idlestate_r(void) 1388{ 1389 return 0x00409420U; 1390} 1391static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) 1392{ 1393 return 0x00502420U; 1394} 1395static inline u32 gr_rstr2d_gpc_map0_r(void) 1396{ 1397 return 0x0040780cU; 1398} 1399static inline u32 gr_rstr2d_gpc_map1_r(void) 1400{ 1401 return 0x00407810U; 1402} 1403static inline u32 gr_rstr2d_gpc_map2_r(void) 1404{ 1405 return 0x00407814U; 1406} 1407static inline u32 gr_rstr2d_gpc_map3_r(void) 1408{ 1409 return 0x00407818U; 1410} 1411static inline u32 gr_rstr2d_gpc_map4_r(void) 1412{ 1413 return 0x0040781cU; 1414} 1415static inline u32 gr_rstr2d_gpc_map5_r(void) 1416{ 1417 return 0x00407820U; 1418} 1419static inline u32 gr_rstr2d_map_table_cfg_r(void) 1420{ 1421 return 0x004078bcU; 1422} 1423static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) 1424{ 1425 return (v & 0xffU) << 0U; 1426} 1427static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) 1428{ 1429 return (v & 0xffU) << 8U; 1430} 1431static inline u32 gr_pd_hww_esr_r(void) 1432{ 1433 return 0x00406018U; 1434} 1435static inline u32 gr_pd_hww_esr_reset_active_f(void) 1436{ 1437 return 0x40000000U; 1438} 1439static inline u32 gr_pd_hww_esr_en_enable_f(void) 1440{ 1441 return 0x80000000U; 1442} 1443static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) 1444{ 1445 return 0x00406028U + i*4U; 1446} 1447static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) 1448{ 1449 return 0x00000004U; 1450} 1451static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) 1452{ 1453 return (v & 0xfU) << 0U; 1454} 1455static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) 1456{ 1457 return (v & 0xfU) << 4U; 1458} 1459static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) 1460{ 1461 return (v & 0xfU) << 8U; 1462} 1463static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) 1464{ 1465 return (v & 0xfU) << 12U; 1466} 1467static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) 1468{ 1469 return (v & 0xfU) << 16U; 1470} 1471static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) 1472{ 1473 return (v & 0xfU) << 20U; 1474} 1475static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) 1476{ 1477 return (v & 0xfU) << 24U; 1478} 1479static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) 1480{ 1481 return (v & 0xfU) << 28U; 1482} 1483static inline u32 gr_pd_ab_dist_cfg0_r(void) 1484{ 1485 return 0x004064c0U; 1486} 1487static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) 1488{ 1489 return 0x80000000U; 1490} 1491static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) 1492{ 1493 return 0x0U; 1494} 1495static inline u32 gr_pd_ab_dist_cfg1_r(void) 1496{ 1497 return 0x004064c4U; 1498} 1499static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) 1500{ 1501 return 0xffffU; 1502} 1503static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) 1504{ 1505 return (v & 0x7ffU) << 16U; 1506} 1507static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) 1508{ 1509 return 0x00000080U; 1510} 1511static inline u32 gr_pd_ab_dist_cfg2_r(void) 1512{ 1513 return 0x004064c8U; 1514} 1515static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) 1516{ 1517 return (v & 0xfffU) << 0U; 1518} 1519static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) 1520{ 1521 return 0x00000100U; 1522} 1523static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) 1524{ 1525 return (v & 0xfffU) << 16U; 1526} 1527static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) 1528{ 1529 return 0x00000020U; 1530} 1531static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) 1532{ 1533 return 0x00000062U; 1534} 1535static inline u32 gr_pd_pagepool_r(void) 1536{ 1537 return 0x004064ccU; 1538} 1539static inline u32 gr_pd_pagepool_total_pages_f(u32 v) 1540{ 1541 return (v & 0xffU) << 0U; 1542} 1543static inline u32 gr_pd_pagepool_valid_true_f(void) 1544{ 1545 return 0x80000000U; 1546} 1547static inline u32 gr_pd_dist_skip_table_r(u32 i) 1548{ 1549 return 0x004064d0U + i*4U; 1550} 1551static inline u32 gr_pd_dist_skip_table__size_1_v(void) 1552{ 1553 return 0x00000008U; 1554} 1555static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) 1556{ 1557 return (v & 0xffU) << 0U; 1558} 1559static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) 1560{ 1561 return (v & 0xffU) << 8U; 1562} 1563static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) 1564{ 1565 return (v & 0xffU) << 16U; 1566} 1567static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) 1568{ 1569 return (v & 0xffU) << 24U; 1570} 1571static inline u32 gr_pd_alpha_ratio_table_r(u32 i) 1572{ 1573 return 0x00406800U + i*4U; 1574} 1575static inline u32 gr_pd_alpha_ratio_table__size_1_v(void) 1576{ 1577 return 0x00000100U; 1578} 1579static inline u32 gr_pd_alpha_ratio_table_gpc_4n0_mask_f(u32 v) 1580{ 1581 return (v & 0xffU) << 0U; 1582} 1583static inline u32 gr_pd_alpha_ratio_table_gpc_4n1_mask_f(u32 v) 1584{ 1585 return (v & 0xffU) << 8U; 1586} 1587static inline u32 gr_pd_alpha_ratio_table_gpc_4n2_mask_f(u32 v) 1588{ 1589 return (v & 0xffU) << 16U; 1590} 1591static inline u32 gr_pd_alpha_ratio_table_gpc_4n3_mask_f(u32 v) 1592{ 1593 return (v & 0xffU) << 24U; 1594} 1595static inline u32 gr_pd_beta_ratio_table_r(u32 i) 1596{ 1597 return 0x00406c00U + i*4U; 1598} 1599static inline u32 gr_pd_beta_ratio_table__size_1_v(void) 1600{ 1601 return 0x00000100U; 1602} 1603static inline u32 gr_pd_beta_ratio_table_gpc_4n0_mask_f(u32 v) 1604{ 1605 return (v & 0xffU) << 0U; 1606} 1607static inline u32 gr_pd_beta_ratio_table_gpc_4n1_mask_f(u32 v) 1608{ 1609 return (v & 0xffU) << 8U; 1610} 1611static inline u32 gr_pd_beta_ratio_table_gpc_4n2_mask_f(u32 v) 1612{ 1613 return (v & 0xffU) << 16U; 1614} 1615static inline u32 gr_pd_beta_ratio_table_gpc_4n3_mask_f(u32 v) 1616{ 1617 return (v & 0xffU) << 24U; 1618} 1619static inline u32 gr_ds_debug_r(void) 1620{ 1621 return 0x00405800U; 1622} 1623static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) 1624{ 1625 return 0x0U; 1626} 1627static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) 1628{ 1629 return 0x8000000U; 1630} 1631static inline u32 gr_ds_zbc_color_r_r(void) 1632{ 1633 return 0x00405804U; 1634} 1635static inline u32 gr_ds_zbc_color_r_val_f(u32 v) 1636{ 1637 return (v & 0xffffffffU) << 0U; 1638} 1639static inline u32 gr_ds_zbc_color_g_r(void) 1640{ 1641 return 0x00405808U; 1642} 1643static inline u32 gr_ds_zbc_color_g_val_f(u32 v) 1644{ 1645 return (v & 0xffffffffU) << 0U; 1646} 1647static inline u32 gr_ds_zbc_color_b_r(void) 1648{ 1649 return 0x0040580cU; 1650} 1651static inline u32 gr_ds_zbc_color_b_val_f(u32 v) 1652{ 1653 return (v & 0xffffffffU) << 0U; 1654} 1655static inline u32 gr_ds_zbc_color_a_r(void) 1656{ 1657 return 0x00405810U; 1658} 1659static inline u32 gr_ds_zbc_color_a_val_f(u32 v) 1660{ 1661 return (v & 0xffffffffU) << 0U; 1662} 1663static inline u32 gr_ds_zbc_color_fmt_r(void) 1664{ 1665 return 0x00405814U; 1666} 1667static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) 1668{ 1669 return (v & 0x7fU) << 0U; 1670} 1671static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) 1672{ 1673 return 0x0U; 1674} 1675static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) 1676{ 1677 return 0x00000001U; 1678} 1679static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) 1680{ 1681 return 0x00000002U; 1682} 1683static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) 1684{ 1685 return 0x00000004U; 1686} 1687static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) 1688{ 1689 return 0x00000028U; 1690} 1691static inline u32 gr_ds_zbc_z_r(void) 1692{ 1693 return 0x00405818U; 1694} 1695static inline u32 gr_ds_zbc_z_val_s(void) 1696{ 1697 return 32U; 1698} 1699static inline u32 gr_ds_zbc_z_val_f(u32 v) 1700{ 1701 return (v & 0xffffffffU) << 0U; 1702} 1703static inline u32 gr_ds_zbc_z_val_m(void) 1704{ 1705 return 0xffffffffU << 0U; 1706} 1707static inline u32 gr_ds_zbc_z_val_v(u32 r) 1708{ 1709 return (r >> 0U) & 0xffffffffU; 1710} 1711static inline u32 gr_ds_zbc_z_val__init_v(void) 1712{ 1713 return 0x00000000U; 1714} 1715static inline u32 gr_ds_zbc_z_val__init_f(void) 1716{ 1717 return 0x0U; 1718} 1719static inline u32 gr_ds_zbc_z_fmt_r(void) 1720{ 1721 return 0x0040581cU; 1722} 1723static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) 1724{ 1725 return (v & 0x1U) << 0U; 1726} 1727static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) 1728{ 1729 return 0x0U; 1730} 1731static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) 1732{ 1733 return 0x00000001U; 1734} 1735static inline u32 gr_ds_zbc_tbl_index_r(void) 1736{ 1737 return 0x00405820U; 1738} 1739static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) 1740{ 1741 return (v & 0xfU) << 0U; 1742} 1743static inline u32 gr_ds_zbc_tbl_ld_r(void) 1744{ 1745 return 0x00405824U; 1746} 1747static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) 1748{ 1749 return 0x0U; 1750} 1751static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) 1752{ 1753 return 0x1U; 1754} 1755static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) 1756{ 1757 return 0x0U; 1758} 1759static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) 1760{ 1761 return 0x4U; 1762} 1763static inline u32 gr_ds_tga_constraintlogic_r(void) 1764{ 1765 return 0x00405830U; 1766} 1767static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) 1768{ 1769 return (v & 0xfffU) << 16U; 1770} 1771static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) 1772{ 1773 return (v & 0xfffU) << 0U; 1774} 1775static inline u32 gr_ds_hww_esr_r(void) 1776{ 1777 return 0x00405840U; 1778} 1779static inline u32 gr_ds_hww_esr_reset_s(void) 1780{ 1781 return 1U; 1782} 1783static inline u32 gr_ds_hww_esr_reset_f(u32 v) 1784{ 1785 return (v & 0x1U) << 30U; 1786} 1787static inline u32 gr_ds_hww_esr_reset_m(void) 1788{ 1789 return 0x1U << 30U; 1790} 1791static inline u32 gr_ds_hww_esr_reset_v(u32 r) 1792{ 1793 return (r >> 30U) & 0x1U; 1794} 1795static inline u32 gr_ds_hww_esr_reset_task_v(void) 1796{ 1797 return 0x00000001U; 1798} 1799static inline u32 gr_ds_hww_esr_reset_task_f(void) 1800{ 1801 return 0x40000000U; 1802} 1803static inline u32 gr_ds_hww_esr_en_enabled_f(void) 1804{ 1805 return 0x80000000U; 1806} 1807static inline u32 gr_ds_hww_report_mask_r(void) 1808{ 1809 return 0x00405844U; 1810} 1811static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) 1812{ 1813 return 0x1U; 1814} 1815static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) 1816{ 1817 return 0x2U; 1818} 1819static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) 1820{ 1821 return 0x4U; 1822} 1823static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) 1824{ 1825 return 0x8U; 1826} 1827static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) 1828{ 1829 return 0x10U; 1830} 1831static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) 1832{ 1833 return 0x20U; 1834} 1835static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) 1836{ 1837 return 0x40U; 1838} 1839static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) 1840{ 1841 return 0x80U; 1842} 1843static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) 1844{ 1845 return 0x100U; 1846} 1847static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) 1848{ 1849 return 0x200U; 1850} 1851static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) 1852{ 1853 return 0x400U; 1854} 1855static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) 1856{ 1857 return 0x800U; 1858} 1859static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) 1860{ 1861 return 0x1000U; 1862} 1863static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) 1864{ 1865 return 0x2000U; 1866} 1867static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) 1868{ 1869 return 0x4000U; 1870} 1871static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) 1872{ 1873 return 0x8000U; 1874} 1875static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) 1876{ 1877 return 0x10000U; 1878} 1879static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) 1880{ 1881 return 0x20000U; 1882} 1883static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) 1884{ 1885 return 0x40000U; 1886} 1887static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) 1888{ 1889 return 0x80000U; 1890} 1891static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) 1892{ 1893 return 0x100000U; 1894} 1895static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) 1896{ 1897 return 0x200000U; 1898} 1899static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) 1900{ 1901 return 0x400000U; 1902} 1903static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) 1904{ 1905 return 0x800000U; 1906} 1907static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) 1908{ 1909 return 0x00405870U + i*4U; 1910} 1911static inline u32 gr_scc_bundle_cb_base_r(void) 1912{ 1913 return 0x00408004U; 1914} 1915static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) 1916{ 1917 return (v & 0xffffffffU) << 0U; 1918} 1919static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) 1920{ 1921 return 0x00000008U; 1922} 1923static inline u32 gr_scc_bundle_cb_size_r(void) 1924{ 1925 return 0x00408008U; 1926} 1927static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) 1928{ 1929 return (v & 0x7ffU) << 0U; 1930} 1931static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) 1932{ 1933 return 0x00000018U; 1934} 1935static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) 1936{ 1937 return 0x00000100U; 1938} 1939static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) 1940{ 1941 return 0x00000000U; 1942} 1943static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) 1944{ 1945 return 0x0U; 1946} 1947static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) 1948{ 1949 return 0x80000000U; 1950} 1951static inline u32 gr_scc_pagepool_base_r(void) 1952{ 1953 return 0x0040800cU; 1954} 1955static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) 1956{ 1957 return (v & 0xffffffffU) << 0U; 1958} 1959static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) 1960{ 1961 return 0x00000008U; 1962} 1963static inline u32 gr_scc_pagepool_r(void) 1964{ 1965 return 0x00408010U; 1966} 1967static inline u32 gr_scc_pagepool_total_pages_f(u32 v) 1968{ 1969 return (v & 0xffU) << 0U; 1970} 1971static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) 1972{ 1973 return 0x00000000U; 1974} 1975static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) 1976{ 1977 return 0x00000080U; 1978} 1979static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) 1980{ 1981 return 0x00000100U; 1982} 1983static inline u32 gr_scc_pagepool_max_valid_pages_s(void) 1984{ 1985 return 8U; 1986} 1987static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) 1988{ 1989 return (v & 0xffU) << 8U; 1990} 1991static inline u32 gr_scc_pagepool_max_valid_pages_m(void) 1992{ 1993 return 0xffU << 8U; 1994} 1995static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) 1996{ 1997 return (r >> 8U) & 0xffU; 1998} 1999static inline u32 gr_scc_pagepool_valid_true_f(void) 2000{ 2001 return 0x80000000U; 2002} 2003static inline u32 gr_scc_init_r(void) 2004{ 2005 return 0x0040802cU; 2006} 2007static inline u32 gr_scc_init_ram_trigger_f(void) 2008{ 2009 return 0x1U; 2010} 2011static inline u32 gr_scc_hww_esr_r(void) 2012{ 2013 return 0x00408030U; 2014} 2015static inline u32 gr_scc_hww_esr_reset_active_f(void) 2016{ 2017 return 0x40000000U; 2018} 2019static inline u32 gr_scc_hww_esr_en_enable_f(void) 2020{ 2021 return 0x80000000U; 2022} 2023static inline u32 gr_sked_hww_esr_r(void) 2024{ 2025 return 0x00407020U; 2026} 2027static inline u32 gr_sked_hww_esr_reset_active_f(void) 2028{ 2029 return 0x40000000U; 2030} 2031static inline u32 gr_cwd_fs_r(void) 2032{ 2033 return 0x00405b00U; 2034} 2035static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) 2036{ 2037 return (v & 0xffU) << 0U; 2038} 2039static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) 2040{ 2041 return (v & 0xffU) << 8U; 2042} 2043static inline u32 gr_gpc0_fs_gpc_r(void) 2044{ 2045 return 0x00502608U; 2046} 2047static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) 2048{ 2049 return (r >> 0U) & 0x1fU; 2050} 2051static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) 2052{ 2053 return (r >> 16U) & 0x1fU; 2054} 2055static inline u32 gr_gpc0_cfg_r(void) 2056{ 2057 return 0x00502620U; 2058} 2059static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) 2060{ 2061 return (r >> 0U) & 0xffU; 2062} 2063static inline u32 gr_gpccs_rc_lanes_r(void) 2064{ 2065 return 0x00502880U; 2066} 2067static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) 2068{ 2069 return 6U; 2070} 2071static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) 2072{ 2073 return (v & 0x3fU) << 0U; 2074} 2075static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) 2076{ 2077 return 0x3fU << 0U; 2078} 2079static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) 2080{ 2081 return (r >> 0U) & 0x3fU; 2082} 2083static inline u32 gr_gpccs_rc_lane_size_r(u32 i) 2084{ 2085 return 0x00502910U + i*0U; 2086} 2087static inline u32 gr_gpccs_rc_lane_size__size_1_v(void) 2088{ 2089 return 0x00000010U; 2090} 2091static inline u32 gr_gpccs_rc_lane_size_v_s(void) 2092{ 2093 return 24U; 2094} 2095static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) 2096{ 2097 return (v & 0xffffffU) << 0U; 2098} 2099static inline u32 gr_gpccs_rc_lane_size_v_m(void) 2100{ 2101 return 0xffffffU << 0U; 2102} 2103static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) 2104{ 2105 return (r >> 0U) & 0xffffffU; 2106} 2107static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) 2108{ 2109 return 0x00000000U; 2110} 2111static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) 2112{ 2113 return 0x0U; 2114} 2115static inline u32 gr_gpc0_zcull_fs_r(void) 2116{ 2117 return 0x00500910U; 2118} 2119static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) 2120{ 2121 return (v & 0x1ffU) << 0U; 2122} 2123static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) 2124{ 2125 return (v & 0xfU) << 16U; 2126} 2127static inline u32 gr_gpc0_zcull_ram_addr_r(void) 2128{ 2129 return 0x00500914U; 2130} 2131static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) 2132{ 2133 return (v & 0xfU) << 0U; 2134} 2135static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) 2136{ 2137 return (v & 0xfU) << 8U; 2138} 2139static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) 2140{ 2141 return 0x00500918U; 2142} 2143static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) 2144{ 2145 return (v & 0xffffffU) << 0U; 2146} 2147static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) 2148{ 2149 return 0x00800000U; 2150} 2151static inline u32 gr_gpc0_zcull_total_ram_size_r(void) 2152{ 2153 return 0x00500920U; 2154} 2155static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) 2156{ 2157 return (v & 0xffffU) << 0U; 2158} 2159static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) 2160{ 2161 return 0x00500a04U + i*32U; 2162} 2163static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) 2164{ 2165 return 0x00000040U; 2166} 2167static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) 2168{ 2169 return 0x00000010U; 2170} 2171static inline u32 gr_gpc0_gpm_pd_active_tpcs_r(void) 2172{ 2173 return 0x00500c08U; 2174} 2175static inline u32 gr_gpc0_gpm_pd_active_tpcs_num_f(u32 v) 2176{ 2177 return (v & 0x7U) << 0U; 2178} 2179static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) 2180{ 2181 return 0x00500c10U + i*4U; 2182} 2183static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) 2184{ 2185 return (v & 0xffU) << 0U; 2186} 2187static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) 2188{ 2189 return 0x00500c30U + i*4U; 2190} 2191static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) 2192{ 2193 return (r >> 0U) & 0xffU; 2194} 2195static inline u32 gr_gpc0_gpm_sd_active_tpcs_r(void) 2196{ 2197 return 0x00500c8cU; 2198} 2199static inline u32 gr_gpc0_gpm_sd_active_tpcs_num_f(u32 v) 2200{ 2201 return (v & 0x7U) << 0U; 2202} 2203static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) 2204{ 2205 return 0x00504088U; 2206} 2207static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) 2208{ 2209 return (v & 0xffffU) << 0U; 2210} 2211static inline u32 gr_gpc0_tpc0_l1c_cfg_smid_r(void) 2212{ 2213 return 0x005044e8U; 2214} 2215static inline u32 gr_gpc0_tpc0_l1c_cfg_smid_value_f(u32 v) 2216{ 2217 return (v & 0xffffU) << 0U; 2218} 2219static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) 2220{ 2221 return 0x00504698U; 2222} 2223static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_f(u32 v) 2224{ 2225 return (v & 0xffffU) << 0U; 2226} 2227static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_v(u32 r) 2228{ 2229 return (r >> 0U) & 0xffffU; 2230} 2231static inline u32 gr_gpc0_tpc0_sm_arch_r(void) 2232{ 2233 return 0x0050469cU; 2234} 2235static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) 2236{ 2237 return (r >> 0U) & 0xffU; 2238} 2239static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) 2240{ 2241 return (r >> 8U) & 0xfU; 2242} 2243static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v(void) 2244{ 2245 return 0x0000000cU; 2246} 2247static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) 2248{ 2249 return 0x00503018U; 2250} 2251static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) 2252{ 2253 return 0x1U << 0U; 2254} 2255static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) 2256{ 2257 return 0x1U; 2258} 2259static inline u32 gr_gpc0_ppc0_cbm_cfg_r(void) 2260{ 2261 return 0x005030c0U; 2262} 2263static inline u32 gr_gpc0_ppc0_cbm_cfg_start_offset_f(u32 v) 2264{ 2265 return (v & 0xffffU) << 0U; 2266} 2267static inline u32 gr_gpc0_ppc0_cbm_cfg_start_offset_m(void) 2268{ 2269 return 0xffffU << 0U; 2270} 2271static inline u32 gr_gpc0_ppc0_cbm_cfg_start_offset_v(u32 r) 2272{ 2273 return (r >> 0U) & 0xffffU; 2274} 2275static inline u32 gr_gpc0_ppc0_cbm_cfg_size_f(u32 v) 2276{ 2277 return (v & 0xfffU) << 16U; 2278} 2279static inline u32 gr_gpc0_ppc0_cbm_cfg_size_m(void) 2280{ 2281 return 0xfffU << 16U; 2282} 2283static inline u32 gr_gpc0_ppc0_cbm_cfg_size_v(u32 r) 2284{ 2285 return (r >> 16U) & 0xfffU; 2286} 2287static inline u32 gr_gpc0_ppc0_cbm_cfg_size_default_v(void) 2288{ 2289 return 0x00000240U; 2290} 2291static inline u32 gr_gpc0_ppc0_cbm_cfg_size_granularity_v(void) 2292{ 2293 return 0x00000020U; 2294} 2295static inline u32 gr_gpc0_ppc0_cbm_cfg_timeslice_mode_f(u32 v) 2296{ 2297 return (v & 0x1U) << 28U; 2298} 2299static inline u32 gr_gpc0_ppc0_cbm_cfg2_r(void) 2300{ 2301 return 0x005030e4U; 2302} 2303static inline u32 gr_gpc0_ppc0_cbm_cfg2_start_offset_f(u32 v) 2304{ 2305 return (v & 0xffffU) << 0U; 2306} 2307static inline u32 gr_gpc0_ppc0_cbm_cfg2_size_f(u32 v) 2308{ 2309 return (v & 0xfffU) << 16U; 2310} 2311static inline u32 gr_gpc0_ppc0_cbm_cfg2_size_m(void) 2312{ 2313 return 0xfffU << 16U; 2314} 2315static inline u32 gr_gpc0_ppc0_cbm_cfg2_size_v(u32 r) 2316{ 2317 return (r >> 16U) & 0xfffU; 2318} 2319static inline u32 gr_gpc0_ppc0_cbm_cfg2_size_default_v(void) 2320{ 2321 return 0x00000648U; 2322} 2323static inline u32 gr_gpc0_ppc0_cbm_cfg2_size_granularity_v(void) 2324{ 2325 return 0x00000020U; 2326} 2327static inline u32 gr_gpccs_falcon_addr_r(void) 2328{ 2329 return 0x0041a0acU; 2330} 2331static inline u32 gr_gpccs_falcon_addr_lsb_s(void) 2332{ 2333 return 6U; 2334} 2335static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) 2336{ 2337 return (v & 0x3fU) << 0U; 2338} 2339static inline u32 gr_gpccs_falcon_addr_lsb_m(void) 2340{ 2341 return 0x3fU << 0U; 2342} 2343static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) 2344{ 2345 return (r >> 0U) & 0x3fU; 2346} 2347static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) 2348{ 2349 return 0x00000000U; 2350} 2351static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) 2352{ 2353 return 0x0U; 2354} 2355static inline u32 gr_gpccs_falcon_addr_msb_s(void) 2356{ 2357 return 6U; 2358} 2359static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) 2360{ 2361 return (v & 0x3fU) << 6U; 2362} 2363static inline u32 gr_gpccs_falcon_addr_msb_m(void) 2364{ 2365 return 0x3fU << 6U; 2366} 2367static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) 2368{ 2369 return (r >> 6U) & 0x3fU; 2370} 2371static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) 2372{ 2373 return 0x00000000U; 2374} 2375static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) 2376{ 2377 return 0x0U; 2378} 2379static inline u32 gr_gpccs_falcon_addr_ext_s(void) 2380{ 2381 return 12U; 2382} 2383static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) 2384{ 2385 return (v & 0xfffU) << 0U; 2386} 2387static inline u32 gr_gpccs_falcon_addr_ext_m(void) 2388{ 2389 return 0xfffU << 0U; 2390} 2391static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) 2392{ 2393 return (r >> 0U) & 0xfffU; 2394} 2395static inline u32 gr_gpccs_cpuctl_r(void) 2396{ 2397 return 0x0041a100U; 2398} 2399static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) 2400{ 2401 return (v & 0x1U) << 1U; 2402} 2403static inline u32 gr_gpccs_dmactl_r(void) 2404{ 2405 return 0x0041a10cU; 2406} 2407static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) 2408{ 2409 return (v & 0x1U) << 0U; 2410} 2411static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) 2412{ 2413 return 0x1U << 1U; 2414} 2415static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) 2416{ 2417 return 0x1U << 2U; 2418} 2419static inline u32 gr_gpccs_imemc_r(u32 i) 2420{ 2421 return 0x0041a180U + i*16U; 2422} 2423static inline u32 gr_gpccs_imemc_offs_f(u32 v) 2424{ 2425 return (v & 0x3fU) << 2U; 2426} 2427static inline u32 gr_gpccs_imemc_blk_f(u32 v) 2428{ 2429 return (v & 0xffU) << 8U; 2430} 2431static inline u32 gr_gpccs_imemc_aincw_f(u32 v) 2432{ 2433 return (v & 0x1U) << 24U; 2434} 2435static inline u32 gr_gpccs_imemd_r(u32 i) 2436{ 2437 return 0x0041a184U + i*16U; 2438} 2439static inline u32 gr_gpccs_imemt_r(u32 i) 2440{ 2441 return 0x0041a188U + i*16U; 2442} 2443static inline u32 gr_gpccs_imemt__size_1_v(void) 2444{ 2445 return 0x00000004U; 2446} 2447static inline u32 gr_gpccs_imemt_tag_f(u32 v) 2448{ 2449 return (v & 0xffffU) << 0U; 2450} 2451static inline u32 gr_gpccs_dmemc_r(u32 i) 2452{ 2453 return 0x0041a1c0U + i*8U; 2454} 2455static inline u32 gr_gpccs_dmemc_offs_f(u32 v) 2456{ 2457 return (v & 0x3fU) << 2U; 2458} 2459static inline u32 gr_gpccs_dmemc_blk_f(u32 v) 2460{ 2461 return (v & 0xffU) << 8U; 2462} 2463static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) 2464{ 2465 return (v & 0x1U) << 24U; 2466} 2467static inline u32 gr_gpccs_dmemd_r(u32 i) 2468{ 2469 return 0x0041a1c4U + i*8U; 2470} 2471static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) 2472{ 2473 return 0x0041a800U + i*4U; 2474} 2475static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) 2476{ 2477 return (v & 0xffffffffU) << 0U; 2478} 2479static inline u32 gr_gpcs_setup_bundle_cb_base_r(void) 2480{ 2481 return 0x00418808U; 2482} 2483static inline u32 gr_gpcs_setup_bundle_cb_base_addr_39_8_s(void) 2484{ 2485 return 32U; 2486} 2487static inline u32 gr_gpcs_setup_bundle_cb_base_addr_39_8_f(u32 v) 2488{ 2489 return (v & 0xffffffffU) << 0U; 2490} 2491static inline u32 gr_gpcs_setup_bundle_cb_base_addr_39_8_m(void) 2492{ 2493 return 0xffffffffU << 0U; 2494} 2495static inline u32 gr_gpcs_setup_bundle_cb_base_addr_39_8_v(u32 r) 2496{ 2497 return (r >> 0U) & 0xffffffffU; 2498} 2499static inline u32 gr_gpcs_setup_bundle_cb_base_addr_39_8_init_v(void) 2500{ 2501 return 0x00000000U; 2502} 2503static inline u32 gr_gpcs_setup_bundle_cb_base_addr_39_8_init_f(void) 2504{ 2505 return 0x0U; 2506} 2507static inline u32 gr_gpcs_setup_bundle_cb_size_r(void) 2508{ 2509 return 0x0041880cU; 2510} 2511static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b_s(void) 2512{ 2513 return 11U; 2514} 2515static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b_f(u32 v) 2516{ 2517 return (v & 0x7ffU) << 0U; 2518} 2519static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b_m(void) 2520{ 2521 return 0x7ffU << 0U; 2522} 2523static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b_v(u32 r) 2524{ 2525 return (r >> 0U) & 0x7ffU; 2526} 2527static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b_init_v(void) 2528{ 2529 return 0x00000000U; 2530} 2531static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b_init_f(void) 2532{ 2533 return 0x0U; 2534} 2535static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b__prod_v(void) 2536{ 2537 return 0x00000018U; 2538} 2539static inline u32 gr_gpcs_setup_bundle_cb_size_div_256b__prod_f(void) 2540{ 2541 return 0x18U; 2542} 2543static inline u32 gr_gpcs_setup_bundle_cb_size_valid_s(void) 2544{ 2545 return 1U; 2546} 2547static inline u32 gr_gpcs_setup_bundle_cb_size_valid_f(u32 v) 2548{ 2549 return (v & 0x1U) << 31U; 2550} 2551static inline u32 gr_gpcs_setup_bundle_cb_size_valid_m(void) 2552{ 2553 return 0x1U << 31U; 2554} 2555static inline u32 gr_gpcs_setup_bundle_cb_size_valid_v(u32 r) 2556{ 2557 return (r >> 31U) & 0x1U; 2558} 2559static inline u32 gr_gpcs_setup_bundle_cb_size_valid_false_v(void) 2560{ 2561 return 0x00000000U; 2562} 2563static inline u32 gr_gpcs_setup_bundle_cb_size_valid_false_f(void) 2564{ 2565 return 0x0U; 2566} 2567static inline u32 gr_gpcs_setup_bundle_cb_size_valid_true_v(void) 2568{ 2569 return 0x00000001U; 2570} 2571static inline u32 gr_gpcs_setup_bundle_cb_size_valid_true_f(void) 2572{ 2573 return 0x80000000U; 2574} 2575static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) 2576{ 2577 return 0x00418810U; 2578} 2579static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) 2580{ 2581 return (v & 0xfffffffU) << 0U; 2582} 2583static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) 2584{ 2585 return 0x0000000cU; 2586} 2587static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) 2588{ 2589 return 0x80000000U; 2590} 2591static inline u32 gr_crstr_gpc_map0_r(void) 2592{ 2593 return 0x00418b08U; 2594} 2595static inline u32 gr_crstr_gpc_map0_tile0_f(u32 v) 2596{ 2597 return (v & 0x7U) << 0U; 2598} 2599static inline u32 gr_crstr_gpc_map0_tile1_f(u32 v) 2600{ 2601 return (v & 0x7U) << 5U; 2602} 2603static inline u32 gr_crstr_gpc_map0_tile2_f(u32 v) 2604{ 2605 return (v & 0x7U) << 10U; 2606} 2607static inline u32 gr_crstr_gpc_map0_tile3_f(u32 v) 2608{ 2609 return (v & 0x7U) << 15U; 2610} 2611static inline u32 gr_crstr_gpc_map0_tile4_f(u32 v) 2612{ 2613 return (v & 0x7U) << 20U; 2614} 2615static inline u32 gr_crstr_gpc_map0_tile5_f(u32 v) 2616{ 2617 return (v & 0x7U) << 25U; 2618} 2619static inline u32 gr_crstr_gpc_map1_r(void) 2620{ 2621 return 0x00418b0cU; 2622} 2623static inline u32 gr_crstr_gpc_map1_tile6_f(u32 v) 2624{ 2625 return (v & 0x7U) << 0U; 2626} 2627static inline u32 gr_crstr_gpc_map1_tile7_f(u32 v) 2628{ 2629 return (v & 0x7U) << 5U; 2630} 2631static inline u32 gr_crstr_gpc_map1_tile8_f(u32 v) 2632{ 2633 return (v & 0x7U) << 10U; 2634} 2635static inline u32 gr_crstr_gpc_map1_tile9_f(u32 v) 2636{ 2637 return (v & 0x7U) << 15U; 2638} 2639static inline u32 gr_crstr_gpc_map1_tile10_f(u32 v) 2640{ 2641 return (v & 0x7U) << 20U; 2642} 2643static inline u32 gr_crstr_gpc_map1_tile11_f(u32 v) 2644{ 2645 return (v & 0x7U) << 25U; 2646} 2647static inline u32 gr_crstr_gpc_map2_r(void) 2648{ 2649 return 0x00418b10U; 2650} 2651static inline u32 gr_crstr_gpc_map2_tile12_f(u32 v) 2652{ 2653 return (v & 0x7U) << 0U; 2654} 2655static inline u32 gr_crstr_gpc_map2_tile13_f(u32 v) 2656{ 2657 return (v & 0x7U) << 5U; 2658} 2659static inline u32 gr_crstr_gpc_map2_tile14_f(u32 v) 2660{ 2661 return (v & 0x7U) << 10U; 2662} 2663static inline u32 gr_crstr_gpc_map2_tile15_f(u32 v) 2664{ 2665 return (v & 0x7U) << 15U; 2666} 2667static inline u32 gr_crstr_gpc_map2_tile16_f(u32 v) 2668{ 2669 return (v & 0x7U) << 20U; 2670} 2671static inline u32 gr_crstr_gpc_map2_tile17_f(u32 v) 2672{ 2673 return (v & 0x7U) << 25U; 2674} 2675static inline u32 gr_crstr_gpc_map3_r(void) 2676{ 2677 return 0x00418b14U; 2678} 2679static inline u32 gr_crstr_gpc_map3_tile18_f(u32 v) 2680{ 2681 return (v & 0x7U) << 0U; 2682} 2683static inline u32 gr_crstr_gpc_map3_tile19_f(u32 v) 2684{ 2685 return (v & 0x7U) << 5U; 2686} 2687static inline u32 gr_crstr_gpc_map3_tile20_f(u32 v) 2688{ 2689 return (v & 0x7U) << 10U; 2690} 2691static inline u32 gr_crstr_gpc_map3_tile21_f(u32 v) 2692{ 2693 return (v & 0x7U) << 15U; 2694} 2695static inline u32 gr_crstr_gpc_map3_tile22_f(u32 v) 2696{ 2697 return (v & 0x7U) << 20U; 2698} 2699static inline u32 gr_crstr_gpc_map3_tile23_f(u32 v) 2700{ 2701 return (v & 0x7U) << 25U; 2702} 2703static inline u32 gr_crstr_gpc_map4_r(void) 2704{ 2705 return 0x00418b18U; 2706} 2707static inline u32 gr_crstr_gpc_map4_tile24_f(u32 v) 2708{ 2709 return (v & 0x7U) << 0U; 2710} 2711static inline u32 gr_crstr_gpc_map4_tile25_f(u32 v) 2712{ 2713 return (v & 0x7U) << 5U; 2714} 2715static inline u32 gr_crstr_gpc_map4_tile26_f(u32 v) 2716{ 2717 return (v & 0x7U) << 10U; 2718} 2719static inline u32 gr_crstr_gpc_map4_tile27_f(u32 v) 2720{ 2721 return (v & 0x7U) << 15U; 2722} 2723static inline u32 gr_crstr_gpc_map4_tile28_f(u32 v) 2724{ 2725 return (v & 0x7U) << 20U; 2726} 2727static inline u32 gr_crstr_gpc_map4_tile29_f(u32 v) 2728{ 2729 return (v & 0x7U) << 25U; 2730} 2731static inline u32 gr_crstr_gpc_map5_r(void) 2732{ 2733 return 0x00418b1cU; 2734} 2735static inline u32 gr_crstr_gpc_map5_tile30_f(u32 v) 2736{ 2737 return (v & 0x7U) << 0U; 2738} 2739static inline u32 gr_crstr_gpc_map5_tile31_f(u32 v) 2740{ 2741 return (v & 0x7U) << 5U; 2742} 2743static inline u32 gr_crstr_gpc_map5_tile32_f(u32 v) 2744{ 2745 return (v & 0x7U) << 10U; 2746} 2747static inline u32 gr_crstr_gpc_map5_tile33_f(u32 v) 2748{ 2749 return (v & 0x7U) << 15U; 2750} 2751static inline u32 gr_crstr_gpc_map5_tile34_f(u32 v) 2752{ 2753 return (v & 0x7U) << 20U; 2754} 2755static inline u32 gr_crstr_gpc_map5_tile35_f(u32 v) 2756{ 2757 return (v & 0x7U) << 25U; 2758} 2759static inline u32 gr_crstr_map_table_cfg_r(void) 2760{ 2761 return 0x00418bb8U; 2762} 2763static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) 2764{ 2765 return (v & 0xffU) << 0U; 2766} 2767static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) 2768{ 2769 return (v & 0xffU) << 8U; 2770} 2771static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_r(void) 2772{ 2773 return 0x00418980U; 2774} 2775static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f(u32 v) 2776{ 2777 return (v & 0x7U) << 0U; 2778} 2779static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_1_f(u32 v) 2780{ 2781 return (v & 0x7U) << 4U; 2782} 2783static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_2_f(u32 v) 2784{ 2785 return (v & 0x7U) << 8U; 2786} 2787static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_3_f(u32 v) 2788{ 2789 return (v & 0x7U) << 12U; 2790} 2791static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_4_f(u32 v) 2792{ 2793 return (v & 0x7U) << 16U; 2794} 2795static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_5_f(u32 v) 2796{ 2797 return (v & 0x7U) << 20U; 2798} 2799static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_6_f(u32 v) 2800{ 2801 return (v & 0x7U) << 24U; 2802} 2803static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_7_f(u32 v) 2804{ 2805 return (v & 0x7U) << 28U; 2806} 2807static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_r(void) 2808{ 2809 return 0x00418984U; 2810} 2811static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f(u32 v) 2812{ 2813 return (v & 0x7U) << 0U; 2814} 2815static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_9_f(u32 v) 2816{ 2817 return (v & 0x7U) << 4U; 2818} 2819static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_10_f(u32 v) 2820{ 2821 return (v & 0x7U) << 8U; 2822} 2823static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_11_f(u32 v) 2824{ 2825 return (v & 0x7U) << 12U; 2826} 2827static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_12_f(u32 v) 2828{ 2829 return (v & 0x7U) << 16U; 2830} 2831static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_13_f(u32 v) 2832{ 2833 return (v & 0x7U) << 20U; 2834} 2835static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_14_f(u32 v) 2836{ 2837 return (v & 0x7U) << 24U; 2838} 2839static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_15_f(u32 v) 2840{ 2841 return (v & 0x7U) << 28U; 2842} 2843static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_r(void) 2844{ 2845 return 0x00418988U; 2846} 2847static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f(u32 v) 2848{ 2849 return (v & 0x7U) << 0U; 2850} 2851static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_17_f(u32 v) 2852{ 2853 return (v & 0x7U) << 4U; 2854} 2855static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_18_f(u32 v) 2856{ 2857 return (v & 0x7U) << 8U; 2858} 2859static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_19_f(u32 v) 2860{ 2861 return (v & 0x7U) << 12U; 2862} 2863static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_20_f(u32 v) 2864{ 2865 return (v & 0x7U) << 16U; 2866} 2867static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_21_f(u32 v) 2868{ 2869 return (v & 0x7U) << 20U; 2870} 2871static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_22_f(u32 v) 2872{ 2873 return (v & 0x7U) << 24U; 2874} 2875static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_s(void) 2876{ 2877 return 3U; 2878} 2879static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_f(u32 v) 2880{ 2881 return (v & 0x7U) << 28U; 2882} 2883static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_m(void) 2884{ 2885 return 0x7U << 28U; 2886} 2887static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_v(u32 r) 2888{ 2889 return (r >> 28U) & 0x7U; 2890} 2891static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_r(void) 2892{ 2893 return 0x0041898cU; 2894} 2895static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f(u32 v) 2896{ 2897 return (v & 0x7U) << 0U; 2898} 2899static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_25_f(u32 v) 2900{ 2901 return (v & 0x7U) << 4U; 2902} 2903static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_26_f(u32 v) 2904{ 2905 return (v & 0x7U) << 8U; 2906} 2907static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_27_f(u32 v) 2908{ 2909 return (v & 0x7U) << 12U; 2910} 2911static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_28_f(u32 v) 2912{ 2913 return (v & 0x7U) << 16U; 2914} 2915static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_29_f(u32 v) 2916{ 2917 return (v & 0x7U) << 20U; 2918} 2919static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_30_f(u32 v) 2920{ 2921 return (v & 0x7U) << 24U; 2922} 2923static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_31_f(u32 v) 2924{ 2925 return (v & 0x7U) << 28U; 2926} 2927static inline u32 gr_gpcs_gpm_pd_cfg_r(void) 2928{ 2929 return 0x00418c6cU; 2930} 2931static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_disable_f(void) 2932{ 2933 return 0x0U; 2934} 2935static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_enable_f(void) 2936{ 2937 return 0x1U; 2938} 2939static inline u32 gr_gpcs_gcc_pagepool_base_r(void) 2940{ 2941 return 0x00419004U; 2942} 2943static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) 2944{ 2945 return (v & 0xffffffffU) << 0U; 2946} 2947static inline u32 gr_gpcs_gcc_pagepool_r(void) 2948{ 2949 return 0x00419008U; 2950} 2951static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) 2952{ 2953 return (v & 0xffU) << 0U; 2954} 2955static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) 2956{ 2957 return 0x0041980cU; 2958} 2959static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) 2960{ 2961 return 0x10U; 2962} 2963static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) 2964{ 2965 return 0x00419848U; 2966} 2967static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) 2968{ 2969 return (v & 0xfffffffU) << 0U; 2970} 2971static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) 2972{ 2973 return (v & 0x1U) << 28U; 2974} 2975static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) 2976{ 2977 return 0x10000000U; 2978} 2979static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) 2980{ 2981 return 0x00419c00U; 2982} 2983static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) 2984{ 2985 return 0x0U; 2986} 2987static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) 2988{ 2989 return 0x8U; 2990} 2991static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(void) 2992{ 2993 return 0x00419e44U; 2994} 2995static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_error_report_f(void) 2996{ 2997 return 0x2U; 2998} 2999static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_api_stack_error_report_f(void) 3000{ 3001 return 0x4U; 3002} 3003static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_ret_empty_stack_error_report_f(void) 3004{ 3005 return 0x8U; 3006} 3007static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_wrap_report_f(void) 3008{ 3009 return 0x10U; 3010} 3011static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_pc_report_f(void) 3012{ 3013 return 0x20U; 3014} 3015static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_overflow_report_f(void) 3016{ 3017 return 0x40U; 3018} 3019static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_immc_addr_report_f(void) 3020{ 3021 return 0x80U; 3022} 3023static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_reg_report_f(void) 3024{ 3025 return 0x100U; 3026} 3027static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) 3028{ 3029 return 0x200U; 3030} 3031static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_sph_instr_combo_report_f(void) 3032{ 3033 return 0x400U; 3034} 3035static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) 3036{ 3037 return 0x800U; 3038} 3039static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_report_f(void) 3040{ 3041 return 0x1000U; 3042} 3043static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_reg_report_f(void) 3044{ 3045 return 0x2000U; 3046} 3047static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_addr_report_f(void) 3048{ 3049 return 0x4000U; 3050} 3051static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_addr_report_f(void) 3052{ 3053 return 0x8000U; 3054} 3055static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) 3056{ 3057 return 0x10000U; 3058} 3059static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param2_report_f(void) 3060{ 3061 return 0x20000U; 3062} 3063static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) 3064{ 3065 return 0x40000U; 3066} 3067static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_geometry_sm_error_report_f(void) 3068{ 3069 return 0x80000U; 3070} 3071static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_divergent_report_f(void) 3072{ 3073 return 0x100000U; 3074} 3075static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r(void) 3076{ 3077 return 0x00419e4cU; 3078} 3079static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_sm_to_sm_fault_report_f(void) 3080{ 3081 return 0x1U; 3082} 3083static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_l1_error_report_f(void) 3084{ 3085 return 0x2U; 3086} 3087static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) 3088{ 3089 return 0x4U; 3090} 3091static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_physical_stack_overflow_error_report_f(void) 3092{ 3093 return 0x8U; 3094} 3095static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_int_report_f(void) 3096{ 3097 return 0x10U; 3098} 3099static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_pause_report_f(void) 3100{ 3101 return 0x20U; 3102} 3103static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_single_step_complete_report_f(void) 3104{ 3105 return 0x40U; 3106} 3107static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) 3108{ 3109 return 0x00419d0cU; 3110} 3111static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) 3112{ 3113 return 0x2U; 3114} 3115static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) 3116{ 3117 return 0x1U; 3118} 3119static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 3120{ 3121 return 0x0050450cU; 3122} 3123static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) 3124{ 3125 return (r >> 1U) & 0x1U; 3126} 3127static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) 3128{ 3129 return 0x2U; 3130} 3131static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) 3132{ 3133 return 0x0041ac94U; 3134} 3135static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) 3136{ 3137 return (v & 0xffU) << 16U; 3138} 3139static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) 3140{ 3141 return 0x00502c90U; 3142} 3143static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) 3144{ 3145 return (r >> 2U) & 0x1U; 3146} 3147static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) 3148{ 3149 return (r >> 16U) & 0xffU; 3150} 3151static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) 3152{ 3153 return 0x00000001U; 3154} 3155static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) 3156{ 3157 return 0x00504508U; 3158} 3159static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) 3160{ 3161 return (r >> 0U) & 0x1U; 3162} 3163static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) 3164{ 3165 return 0x00000001U; 3166} 3167static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) 3168{ 3169 return (r >> 1U) & 0x1U; 3170} 3171static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) 3172{ 3173 return 0x00000001U; 3174} 3175static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_r(void) 3176{ 3177 return 0x00504610U; 3178} 3179static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_m(void) 3180{ 3181 return 0x1U << 0U; 3182} 3183static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(u32 r) 3184{ 3185 return (r >> 0U) & 0x1U; 3186} 3187static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v(void) 3188{ 3189 return 0x00000001U; 3190} 3191static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_f(void) 3192{ 3193 return 0x1U; 3194} 3195static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_v(void) 3196{ 3197 return 0x00000000U; 3198} 3199static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_f(void) 3200{ 3201 return 0x0U; 3202} 3203static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(void) 3204{ 3205 return 0x80000000U; 3206} 3207static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_disable_f(void) 3208{ 3209 return 0x0U; 3210} 3211static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_single_step_mode_enable_f(void) 3212{ 3213 return 0x8U; 3214} 3215static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_single_step_mode_disable_f(void) 3216{ 3217 return 0x0U; 3218} 3219static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_run_trigger_task_f(void) 3220{ 3221 return 0x40000000U; 3222} 3223static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_m(void) 3224{ 3225 return 0x1U << 1U; 3226} 3227static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_v(u32 r) 3228{ 3229 return (r >> 1U) & 0x1U; 3230} 3231static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_disable_f(void) 3232{ 3233 return 0x0U; 3234} 3235static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_m(void) 3236{ 3237 return 0x1U << 2U; 3238} 3239static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_v(u32 r) 3240{ 3241 return (r >> 2U) & 0x1U; 3242} 3243static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_disable_f(void) 3244{ 3245 return 0x0U; 3246} 3247static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_stop_on_any_warp_disable_v(void) 3248{ 3249 return 0x00000000U; 3250} 3251static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_stop_on_any_sm_disable_v(void) 3252{ 3253 return 0x00000000U; 3254} 3255static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_r(void) 3256{ 3257 return 0x00504614U; 3258} 3259static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_1_r(void) 3260{ 3261 return 0x00504618U; 3262} 3263static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r(void) 3264{ 3265 return 0x00504624U; 3266} 3267static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_1_r(void) 3268{ 3269 return 0x00504628U; 3270} 3271static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r(void) 3272{ 3273 return 0x00504634U; 3274} 3275static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r(void) 3276{ 3277 return 0x00504638U; 3278} 3279static inline u32 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_r(void) 3280{ 3281 return 0x00419e24U; 3282} 3283static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_r(void) 3284{ 3285 return 0x0050460cU; 3286} 3287static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_sm_in_trap_mode_v(u32 r) 3288{ 3289 return (r >> 0U) & 0x1U; 3290} 3291static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(u32 r) 3292{ 3293 return (r >> 4U) & 0x1U; 3294} 3295static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v(void) 3296{ 3297 return 0x00000001U; 3298} 3299static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_r(void) 3300{ 3301 return 0x00419e50U; 3302} 3303static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_int_pending_f(void) 3304{ 3305 return 0x10U; 3306} 3307static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_pause_pending_f(void) 3308{ 3309 return 0x20U; 3310} 3311static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_single_step_complete_pending_f(void) 3312{ 3313 return 0x40U; 3314} 3315static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_sm_to_sm_fault_pending_f(void) 3316{ 3317 return 0x1U; 3318} 3319static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_l1_error_pending_f(void) 3320{ 3321 return 0x2U; 3322} 3323static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_multiple_warp_errors_pending_f(void) 3324{ 3325 return 0x4U; 3326} 3327static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_physical_stack_overflow_error_pending_f(void) 3328{ 3329 return 0x8U; 3330} 3331static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_timeout_error_pending_f(void) 3332{ 3333 return 0x80000000U; 3334} 3335static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_r(void) 3336{ 3337 return 0x00504650U; 3338} 3339static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f(void) 3340{ 3341 return 0x10U; 3342} 3343static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f(void) 3344{ 3345 return 0x20U; 3346} 3347static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(void) 3348{ 3349 return 0x40U; 3350} 3351static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_sm_to_sm_fault_pending_f(void) 3352{ 3353 return 0x1U; 3354} 3355static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_l1_error_pending_f(void) 3356{ 3357 return 0x2U; 3358} 3359static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_multiple_warp_errors_pending_f(void) 3360{ 3361 return 0x4U; 3362} 3363static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_physical_stack_overflow_error_pending_f(void) 3364{ 3365 return 0x8U; 3366} 3367static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_timeout_error_pending_f(void) 3368{ 3369 return 0x80000000U; 3370} 3371static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_r(void) 3372{ 3373 return 0x00504224U; 3374} 3375static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_intr_pending_f(void) 3376{ 3377 return 0x1U; 3378} 3379static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_r(void) 3380{ 3381 return 0x00504648U; 3382} 3383static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_v(u32 r) 3384{ 3385 return (r >> 0U) & 0xffffU; 3386} 3387static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v(void) 3388{ 3389 return 0x00000000U; 3390} 3391static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f(void) 3392{ 3393 return 0x0U; 3394} 3395static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) 3396{ 3397 return 0x00504770U; 3398} 3399static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) 3400{ 3401 return 0x00419f70U; 3402} 3403static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) 3404{ 3405 return 0x1U << 4U; 3406} 3407static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) 3408{ 3409 return (v & 0x1U) << 4U; 3410} 3411static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) 3412{ 3413 return 0x0050477cU; 3414} 3415static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) 3416{ 3417 return 0x00419f7cU; 3418} 3419static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) 3420{ 3421 return 0x1U << 0U; 3422} 3423static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) 3424{ 3425 return (v & 0x1U) << 0U; 3426} 3427static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) 3428{ 3429 return 0x0041be08U; 3430} 3431static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) 3432{ 3433 return 0x4U; 3434} 3435static inline u32 gr_ppcs_wwdx_map_gpc_map0_r(void) 3436{ 3437 return 0x0041bf00U; 3438} 3439static inline u32 gr_ppcs_wwdx_map_gpc_map1_r(void) 3440{ 3441 return 0x0041bf04U; 3442} 3443static inline u32 gr_ppcs_wwdx_map_gpc_map2_r(void) 3444{ 3445 return 0x0041bf08U; 3446} 3447static inline u32 gr_ppcs_wwdx_map_gpc_map3_r(void) 3448{ 3449 return 0x0041bf0cU; 3450} 3451static inline u32 gr_ppcs_wwdx_map_gpc_map4_r(void) 3452{ 3453 return 0x0041bf10U; 3454} 3455static inline u32 gr_ppcs_wwdx_map_gpc_map5_r(void) 3456{ 3457 return 0x0041bf14U; 3458} 3459static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) 3460{ 3461 return 0x0041bfd0U; 3462} 3463static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) 3464{ 3465 return (v & 0xffU) << 0U; 3466} 3467static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) 3468{ 3469 return (v & 0xffU) << 8U; 3470} 3471static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) 3472{ 3473 return (v & 0x1fU) << 16U; 3474} 3475static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) 3476{ 3477 return (v & 0x7U) << 21U; 3478} 3479static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff5_mod_value_f(u32 v) 3480{ 3481 return (v & 0x1fU) << 24U; 3482} 3483static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) 3484{ 3485 return 0x0041bfd4U; 3486} 3487static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) 3488{ 3489 return (v & 0xffffffU) << 0U; 3490} 3491static inline u32 gr_ppcs_wwdx_map_table_cfg2_r(void) 3492{ 3493 return 0x0041bfe4U; 3494} 3495static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff6_mod_value_f(u32 v) 3496{ 3497 return (v & 0x1fU) << 0U; 3498} 3499static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff7_mod_value_f(u32 v) 3500{ 3501 return (v & 0x1fU) << 5U; 3502} 3503static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff8_mod_value_f(u32 v) 3504{ 3505 return (v & 0x1fU) << 10U; 3506} 3507static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff9_mod_value_f(u32 v) 3508{ 3509 return (v & 0x1fU) << 15U; 3510} 3511static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff10_mod_value_f(u32 v) 3512{ 3513 return (v & 0x1fU) << 20U; 3514} 3515static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff11_mod_value_f(u32 v) 3516{ 3517 return (v & 0x1fU) << 25U; 3518} 3519static inline u32 gr_gpcs_ppcs_cbm_cfg_r(void) 3520{ 3521 return 0x0041bec0U; 3522} 3523static inline u32 gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(void) 3524{ 3525 return 0x00000001U; 3526} 3527static inline u32 gr_bes_zrop_settings_r(void) 3528{ 3529 return 0x00408850U; 3530} 3531static inline u32 gr_bes_zrop_settings_num_active_fbps_f(u32 v) 3532{ 3533 return (v & 0xfU) << 0U; 3534} 3535static inline u32 gr_bes_crop_settings_r(void) 3536{ 3537 return 0x00408958U; 3538} 3539static inline u32 gr_bes_crop_settings_num_active_fbps_f(u32 v) 3540{ 3541 return (v & 0xfU) << 0U; 3542} 3543static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) 3544{ 3545 return 0x00000020U; 3546} 3547static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) 3548{ 3549 return 0x00000020U; 3550} 3551static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) 3552{ 3553 return 0x000000c0U; 3554} 3555static inline u32 gr_zcull_subregion_qty_v(void) 3556{ 3557 return 0x00000010U; 3558} 3559static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel0_r(void) 3560{ 3561 return 0x00504604U; 3562} 3563static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel1_r(void) 3564{ 3565 return 0x00504608U; 3566} 3567static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r(void) 3568{ 3569 return 0x0050465cU; 3570} 3571static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control1_r(void) 3572{ 3573 return 0x00504660U; 3574} 3575static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control2_r(void) 3576{ 3577 return 0x00504664U; 3578} 3579static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control3_r(void) 3580{ 3581 return 0x00504668U; 3582} 3583static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control4_r(void) 3584{ 3585 return 0x0050466cU; 3586} 3587static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control5_r(void) 3588{ 3589 return 0x00504658U; 3590} 3591static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status_r(void) 3592{ 3593 return 0x00504670U; 3594} 3595static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status1_r(void) 3596{ 3597 return 0x00504694U; 3598} 3599static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_control_r(void) 3600{ 3601 return 0x00504730U; 3602} 3603static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_control_r(void) 3604{ 3605 return 0x00504734U; 3606} 3607static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_control_r(void) 3608{ 3609 return 0x00504738U; 3610} 3611static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_control_r(void) 3612{ 3613 return 0x0050473cU; 3614} 3615static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_control_r(void) 3616{ 3617 return 0x00504740U; 3618} 3619static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_control_r(void) 3620{ 3621 return 0x00504744U; 3622} 3623static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_control_r(void) 3624{ 3625 return 0x00504748U; 3626} 3627static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_control_r(void) 3628{ 3629 return 0x0050474cU; 3630} 3631static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_r(void) 3632{ 3633 return 0x00504674U; 3634} 3635static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_r(void) 3636{ 3637 return 0x00504678U; 3638} 3639static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_r(void) 3640{ 3641 return 0x0050467cU; 3642} 3643static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_r(void) 3644{ 3645 return 0x00504680U; 3646} 3647static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_r(void) 3648{ 3649 return 0x00504684U; 3650} 3651static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_r(void) 3652{ 3653 return 0x00504688U; 3654} 3655static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_r(void) 3656{ 3657 return 0x0050468cU; 3658} 3659static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_r(void) 3660{ 3661 return 0x00504690U; 3662} 3663static inline u32 gr_fe_pwr_mode_r(void) 3664{ 3665 return 0x00404170U; 3666} 3667static inline u32 gr_fe_pwr_mode_mode_auto_f(void) 3668{ 3669 return 0x0U; 3670} 3671static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) 3672{ 3673 return 0x2U; 3674} 3675static inline u32 gr_fe_pwr_mode_req_v(u32 r) 3676{ 3677 return (r >> 4U) & 0x1U; 3678} 3679static inline u32 gr_fe_pwr_mode_req_send_f(void) 3680{ 3681 return 0x10U; 3682} 3683static inline u32 gr_fe_pwr_mode_req_done_v(void) 3684{ 3685 return 0x00000000U; 3686} 3687static inline u32 gr_gpc0_tpc0_l1c_dbg_r(void) 3688{ 3689 return 0x005044b0U; 3690} 3691static inline u32 gr_gpc0_tpc0_l1c_dbg_cya15_en_f(void) 3692{ 3693 return 0x8000000U; 3694} 3695static inline u32 gr_gpcs_tpcs_sm_sch_texlock_r(void) 3696{ 3697 return 0x00419ec8U; 3698} 3699static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_m(void) 3700{ 3701 return 0x1U << 0U; 3702} 3703static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_disable_f(void) 3704{ 3705 return 0x0U; 3706} 3707static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_tile_m(void) 3708{ 3709 return 0x1U << 1U; 3710} 3711static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_tile_disable_f(void) 3712{ 3713 return 0x0U; 3714} 3715static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_phase_m(void) 3716{ 3717 return 0x1U << 2U; 3718} 3719static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_phase_disable_f(void) 3720{ 3721 return 0x0U; 3722} 3723static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_tex_m(void) 3724{ 3725 return 0x1U << 3U; 3726} 3727static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_tex_disable_f(void) 3728{ 3729 return 0x0U; 3730} 3731static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_timeout_m(void) 3732{ 3733 return 0xffU << 4U; 3734} 3735static inline u32 gr_gpcs_tpcs_sm_sch_texlock_tex_hash_timeout_disable_f(void) 3736{ 3737 return 0x0U; 3738} 3739static inline u32 gr_gpcs_tpcs_sm_sch_texlock_dot_t_unlock_m(void) 3740{ 3741 return 0x1U << 16U; 3742} 3743static inline u32 gr_gpcs_tpcs_sm_sch_texlock_dot_t_unlock_disable_f(void) 3744{ 3745 return 0x0U; 3746} 3747static inline u32 gr_gpcs_tpcs_sm_sch_macro_sched_r(void) 3748{ 3749 return 0x00419eacU; 3750} 3751static inline u32 gr_gpcs_tpcs_sm_sch_macro_sched_lockboost_size_f(u32 v) 3752{ 3753 return (v & 0x1U) << 2U; 3754} 3755static inline u32 gr_gpcs_tpcs_sm_sch_macro_sched_lockboost_size_m(void) 3756{ 3757 return 0x1U << 2U; 3758} 3759static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_r(void) 3760{ 3761 return 0x00419e10U; 3762} 3763static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_f(u32 v) 3764{ 3765 return (v & 0x1U) << 0U; 3766} 3767static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_on_v(void) 3768{ 3769 return 0x00000001U; 3770} 3771static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_m(void) 3772{ 3773 return 0x1U << 31U; 3774} 3775static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_v(u32 r) 3776{ 3777 return (r >> 31U) & 0x1U; 3778} 3779static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(void) 3780{ 3781 return 0x80000000U; 3782} 3783static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_disable_f(void) 3784{ 3785 return 0x0U; 3786} 3787static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_m(void) 3788{ 3789 return 0x1U << 3U; 3790} 3791static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f(void) 3792{ 3793 return 0x8U; 3794} 3795static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_disable_f(void) 3796{ 3797 return 0x0U; 3798} 3799static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_m(void) 3800{ 3801 return 0x1U << 30U; 3802} 3803static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_v(u32 r) 3804{ 3805 return (r >> 30U) & 0x1U; 3806} 3807static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_task_f(void) 3808{ 3809 return 0x40000000U; 3810} 3811 3812static inline u32 gr_gpc0_gpccs_falcon_irqstat_r(void) 3813{ 3814 return 0x00502008U; 3815} 3816static inline u32 gr_gpc0_gpccs_falcon_irqmode_r(void) 3817{ 3818 return 0x0050200cU; 3819} 3820static inline u32 gr_gpc0_gpccs_falcon_irqmask_r(void) 3821{ 3822 return 0x00502018U; 3823} 3824static inline u32 gr_gpc0_gpccs_falcon_irqdest_r(void) 3825{ 3826 return 0x0050201cU; 3827} 3828static inline u32 gr_gpc0_gpccs_falcon_debug1_r(void) 3829{ 3830 return 0x00502090U; 3831} 3832static inline u32 gr_gpc0_gpccs_falcon_debuginfo_r(void) 3833{ 3834 return 0x00502094U; 3835} 3836static inline u32 gr_gpc0_gpccs_falcon_engctl_r(void) 3837{ 3838 return 0x005020a4U; 3839} 3840static inline u32 gr_gpc0_gpccs_falcon_curctx_r(void) 3841{ 3842 return 0x00502050U; 3843} 3844static inline u32 gr_gpc0_gpccs_falcon_nxtctx_r(void) 3845{ 3846 return 0x00502054U; 3847} 3848static inline u32 gr_gpc0_gpccs_ctxsw_mailbox_r(u32 i) 3849{ 3850 return 0x00502800U + i*4U; 3851} 3852static inline u32 gr_gpc0_gpccs_falcon_icd_cmd_r(void) 3853{ 3854 return 0x00502200U; 3855} 3856static inline u32 gr_gpc0_gpccs_falcon_icd_cmd_opc_rreg_f(void) 3857{ 3858 return 0x8U; 3859} 3860static inline u32 gr_gpc0_gpccs_falcon_icd_cmd_idx_f(u32 v) 3861{ 3862 return (v & 0x1fU) << 8U; 3863} 3864static inline u32 gr_gpc_gpccs_falcon_icd_rdata_r(void) 3865{ 3866 return 0x0050220cU; 3867} 3868#endif
diff --git a/include/nvgpu/hw/gk20a/hw_ltc_gk20a.h b/include/nvgpu/hw/gk20a/hw_ltc_gk20a.h
deleted file mode 100644
index efe7f98..0000000
--- a/include/nvgpu/hw/gk20a/hw_ltc_gk20a.h
+++ /dev/null
@@ -1,455 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ltc_gk20a_h_ 57#define _hw_ltc_gk20a_h_ 58 59static inline u32 ltc_pltcg_base_v(void) 60{ 61 return 0x00140000U; 62} 63static inline u32 ltc_pltcg_extent_v(void) 64{ 65 return 0x0017ffffU; 66} 67static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) 68{ 69 return 0x001410c8U; 70} 71static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) 72{ 73 return 0x00141200U; 74} 75static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) 76{ 77 return 0x0017ea00U; 78} 79static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) 80{ 81 return 0x00141104U; 82} 83static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) 84{ 85 return (r >> 0U) & 0xffffU; 86} 87static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) 88{ 89 return (r >> 16U) & 0x3U; 90} 91static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) 100{ 101 return 0x00000002U; 102} 103static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) 104{ 105 return 0x0017e8c8U; 106} 107static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) 108{ 109 return 0x1U; 110} 111static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) 112{ 113 return 0x2U; 114} 115static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) 116{ 117 return (r >> 2U) & 0x1U; 118} 119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) 124{ 125 return 0x4U; 126} 127static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) 128{ 129 return 0x001410c8U; 130} 131static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) 132{ 133 return 0x0017e8ccU; 134} 135static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) 136{ 137 return (v & 0x1ffffU) << 0U; 138} 139static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) 140{ 141 return 0x0017e8d0U; 142} 143static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) 144{ 145 return (v & 0x1ffffU) << 0U; 146} 147static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) 148{ 149 return 0x0001ffffU; 150} 151static inline u32 ltc_ltcs_ltss_cbc_base_r(void) 152{ 153 return 0x0017e8d4U; 154} 155static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) 156{ 157 return 0x0000000bU; 158} 159static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) 160{ 161 return (r >> 0U) & 0x3ffffffU; 162} 163static inline u32 ltc_ltcs_ltss_cbc_param_r(void) 164{ 165 return 0x0017e8dcU; 166} 167static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) 168{ 169 return (r >> 0U) & 0xffffU; 170} 171static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) 172{ 173 return (r >> 24U) & 0xfU; 174} 175static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_fbp_v(u32 r) 176{ 177 return (r >> 28U) & 0xfU; 178} 179static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) 180{ 181 return 0x0017e91cU; 182} 183static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) 184{ 185 return (v & 0x1fU) << 16U; 186} 187static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) 188{ 189 return 0x0017ea44U; 190} 191static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) 192{ 193 return (v & 0xfU) << 0U; 194} 195static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) 196{ 197 return 0x0017ea48U + i*4U; 198} 199static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) 200{ 201 return 0x00000004U; 202} 203static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) 204{ 205 return 0x0017ea58U; 206} 207static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) 208{ 209 return 32U; 210} 211static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) 212{ 213 return (v & 0xffffffffU) << 0U; 214} 215static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) 216{ 217 return 0xffffffffU << 0U; 218} 219static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) 220{ 221 return (r >> 0U) & 0xffffffffU; 222} 223static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) 224{ 225 return 0x0017e924U; 226} 227static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) 228{ 229 return 0x10000000U; 230} 231static inline u32 ltc_ltcs_ltss_g_elpg_r(void) 232{ 233 return 0x0017e828U; 234} 235static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) 236{ 237 return (r >> 0U) & 0x1U; 238} 239static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) 240{ 241 return 0x00000001U; 242} 243static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) 244{ 245 return 0x1U; 246} 247static inline u32 ltc_ltc0_ltss_g_elpg_r(void) 248{ 249 return 0x00140828U; 250} 251static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) 252{ 253 return (r >> 0U) & 0x1U; 254} 255static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) 256{ 257 return 0x00000001U; 258} 259static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) 260{ 261 return 0x1U; 262} 263static inline u32 ltc_ltc0_ltss_intr_r(void) 264{ 265 return 0x00140820U; 266} 267static inline u32 ltc_ltcs_ltss_intr_r(void) 268{ 269 return 0x0017e820U; 270} 271static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) 272{ 273 return 0x1U << 20U; 274} 275static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_m(void) 276{ 277 return 0x1U << 21U; 278} 279static inline u32 ltc_ltc0_lts0_intr_r(void) 280{ 281 return 0x00141020U; 282} 283static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) 284{ 285 return 0x0017e910U; 286} 287static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) 288{ 289 return (r >> 0U) & 0x1U; 290} 291static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) 292{ 293 return 0x00000001U; 294} 295static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) 296{ 297 return 0x1U; 298} 299static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) 300{ 301 return (r >> 8U) & 0xfU; 302} 303static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) 304{ 305 return 0x00000003U; 306} 307static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) 308{ 309 return 0x300U; 310} 311static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) 312{ 313 return (r >> 28U) & 0x1U; 314} 315static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) 316{ 317 return 0x00000001U; 318} 319static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) 320{ 321 return 0x10000000U; 322} 323static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) 324{ 325 return (r >> 29U) & 0x1U; 326} 327static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) 328{ 329 return 0x00000001U; 330} 331static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) 332{ 333 return 0x20000000U; 334} 335static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) 336{ 337 return (r >> 30U) & 0x1U; 338} 339static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) 340{ 341 return 0x00000001U; 342} 343static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) 344{ 345 return 0x40000000U; 346} 347static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) 348{ 349 return 0x0017e914U; 350} 351static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) 352{ 353 return (r >> 0U) & 0x1U; 354} 355static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) 356{ 357 return 0x00000001U; 358} 359static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) 360{ 361 return 0x1U; 362} 363static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) 364{ 365 return (r >> 8U) & 0xfU; 366} 367static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) 368{ 369 return 0x00000003U; 370} 371static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) 372{ 373 return 0x300U; 374} 375static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) 376{ 377 return (r >> 16U) & 0x1U; 378} 379static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) 384{ 385 return 0x10000U; 386} 387static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) 388{ 389 return (r >> 28U) & 0x1U; 390} 391static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) 392{ 393 return 0x00000001U; 394} 395static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) 396{ 397 return 0x10000000U; 398} 399static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) 400{ 401 return (r >> 29U) & 0x1U; 402} 403static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) 404{ 405 return 0x00000001U; 406} 407static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) 408{ 409 return 0x20000000U; 410} 411static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) 412{ 413 return (r >> 30U) & 0x1U; 414} 415static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) 416{ 417 return 0x00000001U; 418} 419static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) 420{ 421 return 0x40000000U; 422} 423static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) 424{ 425 return 0x00140910U; 426} 427static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) 428{ 429 return (r >> 0U) & 0x1U; 430} 431static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) 432{ 433 return 0x00000001U; 434} 435static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) 436{ 437 return 0x1U; 438} 439static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) 440{ 441 return 0x00140914U; 442} 443static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) 444{ 445 return (r >> 0U) & 0x1U; 446} 447static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) 448{ 449 return 0x00000001U; 450} 451static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) 452{ 453 return 0x1U; 454} 455#endif
diff --git a/include/nvgpu/hw/gk20a/hw_mc_gk20a.h b/include/nvgpu/hw/gk20a/hw_mc_gk20a.h
deleted file mode 100644
index 3ca2a29..0000000
--- a/include/nvgpu/hw/gk20a/hw_mc_gk20a.h
+++ /dev/null
@@ -1,291 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_mc_gk20a_h_ 57#define _hw_mc_gk20a_h_ 58 59static inline u32 mc_boot_0_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 mc_boot_0_architecture_v(u32 r) 64{ 65 return (r >> 24U) & 0x1fU; 66} 67static inline u32 mc_boot_0_implementation_v(u32 r) 68{ 69 return (r >> 20U) & 0xfU; 70} 71static inline u32 mc_boot_0_major_revision_v(u32 r) 72{ 73 return (r >> 4U) & 0xfU; 74} 75static inline u32 mc_boot_0_minor_revision_v(u32 r) 76{ 77 return (r >> 0U) & 0xfU; 78} 79static inline u32 mc_intr_0_r(void) 80{ 81 return 0x00000100U; 82} 83static inline u32 mc_intr_0_pfifo_pending_f(void) 84{ 85 return 0x100U; 86} 87static inline u32 mc_intr_0_pgraph_pending_f(void) 88{ 89 return 0x1000U; 90} 91static inline u32 mc_intr_0_pmu_pending_f(void) 92{ 93 return 0x1000000U; 94} 95static inline u32 mc_intr_0_ltc_pending_f(void) 96{ 97 return 0x2000000U; 98} 99static inline u32 mc_intr_0_priv_ring_pending_f(void) 100{ 101 return 0x40000000U; 102} 103static inline u32 mc_intr_0_pbus_pending_f(void) 104{ 105 return 0x10000000U; 106} 107static inline u32 mc_intr_1_r(void) 108{ 109 return 0x00000104U; 110} 111static inline u32 mc_intr_mask_0_r(void) 112{ 113 return 0x00000640U; 114} 115static inline u32 mc_intr_mask_0_pmu_enabled_f(void) 116{ 117 return 0x1000000U; 118} 119static inline u32 mc_intr_en_0_r(void) 120{ 121 return 0x00000140U; 122} 123static inline u32 mc_intr_en_0_inta_disabled_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 mc_intr_en_0_inta_hardware_f(void) 128{ 129 return 0x1U; 130} 131static inline u32 mc_intr_mask_1_r(void) 132{ 133 return 0x00000644U; 134} 135static inline u32 mc_intr_mask_1_pmu_s(void) 136{ 137 return 1U; 138} 139static inline u32 mc_intr_mask_1_pmu_f(u32 v) 140{ 141 return (v & 0x1U) << 24U; 142} 143static inline u32 mc_intr_mask_1_pmu_m(void) 144{ 145 return 0x1U << 24U; 146} 147static inline u32 mc_intr_mask_1_pmu_v(u32 r) 148{ 149 return (r >> 24U) & 0x1U; 150} 151static inline u32 mc_intr_mask_1_pmu_enabled_f(void) 152{ 153 return 0x1000000U; 154} 155static inline u32 mc_intr_en_1_r(void) 156{ 157 return 0x00000144U; 158} 159static inline u32 mc_intr_en_1_inta_disabled_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 mc_intr_en_1_inta_hardware_f(void) 164{ 165 return 0x1U; 166} 167static inline u32 mc_enable_r(void) 168{ 169 return 0x00000200U; 170} 171static inline u32 mc_enable_xbar_enabled_f(void) 172{ 173 return 0x4U; 174} 175static inline u32 mc_enable_l2_enabled_f(void) 176{ 177 return 0x8U; 178} 179static inline u32 mc_enable_pmedia_s(void) 180{ 181 return 1U; 182} 183static inline u32 mc_enable_pmedia_f(u32 v) 184{ 185 return (v & 0x1U) << 4U; 186} 187static inline u32 mc_enable_pmedia_m(void) 188{ 189 return 0x1U << 4U; 190} 191static inline u32 mc_enable_pmedia_v(u32 r) 192{ 193 return (r >> 4U) & 0x1U; 194} 195static inline u32 mc_enable_priv_ring_enabled_f(void) 196{ 197 return 0x20U; 198} 199static inline u32 mc_enable_ce0_m(void) 200{ 201 return 0x1U << 6U; 202} 203static inline u32 mc_enable_pfifo_enabled_f(void) 204{ 205 return 0x100U; 206} 207static inline u32 mc_enable_pgraph_enabled_f(void) 208{ 209 return 0x1000U; 210} 211static inline u32 mc_enable_pwr_v(u32 r) 212{ 213 return (r >> 13U) & 0x1U; 214} 215static inline u32 mc_enable_pwr_disabled_v(void) 216{ 217 return 0x00000000U; 218} 219static inline u32 mc_enable_pwr_enabled_f(void) 220{ 221 return 0x2000U; 222} 223static inline u32 mc_enable_pfb_enabled_f(void) 224{ 225 return 0x100000U; 226} 227static inline u32 mc_enable_ce2_m(void) 228{ 229 return 0x1U << 21U; 230} 231static inline u32 mc_enable_ce2_enabled_f(void) 232{ 233 return 0x200000U; 234} 235static inline u32 mc_enable_blg_enabled_f(void) 236{ 237 return 0x8000000U; 238} 239static inline u32 mc_enable_perfmon_enabled_f(void) 240{ 241 return 0x10000000U; 242} 243static inline u32 mc_enable_hub_enabled_f(void) 244{ 245 return 0x20000000U; 246} 247static inline u32 mc_enable_pb_r(void) 248{ 249 return 0x00000204U; 250} 251static inline u32 mc_enable_pb_0_s(void) 252{ 253 return 1U; 254} 255static inline u32 mc_enable_pb_0_f(u32 v) 256{ 257 return (v & 0x1U) << 0U; 258} 259static inline u32 mc_enable_pb_0_m(void) 260{ 261 return 0x1U << 0U; 262} 263static inline u32 mc_enable_pb_0_v(u32 r) 264{ 265 return (r >> 0U) & 0x1U; 266} 267static inline u32 mc_enable_pb_0_enabled_v(void) 268{ 269 return 0x00000001U; 270} 271static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) 272{ 273 return (v & 0x1U) << (0U + i*1U); 274} 275static inline u32 mc_elpg_enable_r(void) 276{ 277 return 0x0000020cU; 278} 279static inline u32 mc_elpg_enable_xbar_enabled_f(void) 280{ 281 return 0x4U; 282} 283static inline u32 mc_elpg_enable_pfb_enabled_f(void) 284{ 285 return 0x100000U; 286} 287static inline u32 mc_elpg_enable_hub_enabled_f(void) 288{ 289 return 0x20000000U; 290} 291#endif
diff --git a/include/nvgpu/hw/gk20a/hw_pbdma_gk20a.h b/include/nvgpu/hw/gk20a/hw_pbdma_gk20a.h
deleted file mode 100644
index 2c8f48d..0000000
--- a/include/nvgpu/hw/gk20a/hw_pbdma_gk20a.h
+++ /dev/null
@@ -1,575 +0,0 @@ 1/* 2 * Copyright (c) 2012-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pbdma_gk20a_h_ 57#define _hw_pbdma_gk20a_h_ 58 59static inline u32 pbdma_gp_entry1_r(void) 60{ 61 return 0x10000004U; 62} 63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) 64{ 65 return (r >> 0U) & 0xffU; 66} 67static inline u32 pbdma_gp_entry1_length_f(u32 v) 68{ 69 return (v & 0x1fffffU) << 10U; 70} 71static inline u32 pbdma_gp_entry1_length_v(u32 r) 72{ 73 return (r >> 10U) & 0x1fffffU; 74} 75static inline u32 pbdma_gp_base_r(u32 i) 76{ 77 return 0x00040048U + i*8192U; 78} 79static inline u32 pbdma_gp_base__size_1_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 pbdma_gp_base_offset_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 3U; 86} 87static inline u32 pbdma_gp_base_rsvd_s(void) 88{ 89 return 3U; 90} 91static inline u32 pbdma_gp_base_hi_r(u32 i) 92{ 93 return 0x0004004cU + i*8192U; 94} 95static inline u32 pbdma_gp_base_hi_offset_f(u32 v) 96{ 97 return (v & 0xffU) << 0U; 98} 99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) 100{ 101 return (v & 0x1fU) << 16U; 102} 103static inline u32 pbdma_gp_fetch_r(u32 i) 104{ 105 return 0x00040050U + i*8192U; 106} 107static inline u32 pbdma_gp_get_r(u32 i) 108{ 109 return 0x00040014U + i*8192U; 110} 111static inline u32 pbdma_gp_put_r(u32 i) 112{ 113 return 0x00040000U + i*8192U; 114} 115static inline u32 pbdma_timeout_r(u32 i) 116{ 117 return 0x0004012cU + i*8192U; 118} 119static inline u32 pbdma_timeout__size_1_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 pbdma_timeout_period_m(void) 124{ 125 return 0xffffffffU << 0U; 126} 127static inline u32 pbdma_timeout_period_max_f(void) 128{ 129 return 0xffffffffU; 130} 131static inline u32 pbdma_pb_fetch_r(u32 i) 132{ 133 return 0x00040054U + i*8192U; 134} 135static inline u32 pbdma_pb_fetch_hi_r(u32 i) 136{ 137 return 0x00040058U + i*8192U; 138} 139static inline u32 pbdma_get_r(u32 i) 140{ 141 return 0x00040018U + i*8192U; 142} 143static inline u32 pbdma_get_hi_r(u32 i) 144{ 145 return 0x0004001cU + i*8192U; 146} 147static inline u32 pbdma_put_r(u32 i) 148{ 149 return 0x0004005cU + i*8192U; 150} 151static inline u32 pbdma_put_hi_r(u32 i) 152{ 153 return 0x00040060U + i*8192U; 154} 155static inline u32 pbdma_formats_r(u32 i) 156{ 157 return 0x0004009cU + i*8192U; 158} 159static inline u32 pbdma_formats_gp_fermi0_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 pbdma_formats_pb_fermi1_f(void) 164{ 165 return 0x100U; 166} 167static inline u32 pbdma_formats_mp_fermi0_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 pbdma_pb_header_r(u32 i) 172{ 173 return 0x00040084U + i*8192U; 174} 175static inline u32 pbdma_pb_header_priv_user_f(void) 176{ 177 return 0x0U; 178} 179static inline u32 pbdma_pb_header_method_zero_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 pbdma_pb_header_subchannel_zero_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 pbdma_pb_header_level_main_f(void) 188{ 189 return 0x0U; 190} 191static inline u32 pbdma_pb_header_first_true_f(void) 192{ 193 return 0x400000U; 194} 195static inline u32 pbdma_pb_header_type_inc_f(void) 196{ 197 return 0x20000000U; 198} 199static inline u32 pbdma_pb_header_type_non_inc_f(void) 200{ 201 return 0x60000000U; 202} 203static inline u32 pbdma_hdr_shadow_r(u32 i) 204{ 205 return 0x00040118U + i*8192U; 206} 207static inline u32 pbdma_gp_shadow_0_r(u32 i) 208{ 209 return 0x00040110U + i*8192U; 210} 211static inline u32 pbdma_gp_shadow_1_r(u32 i) 212{ 213 return 0x00040114U + i*8192U; 214} 215static inline u32 pbdma_subdevice_r(u32 i) 216{ 217 return 0x00040094U + i*8192U; 218} 219static inline u32 pbdma_subdevice_id_f(u32 v) 220{ 221 return (v & 0xfffU) << 0U; 222} 223static inline u32 pbdma_subdevice_status_active_f(void) 224{ 225 return 0x10000000U; 226} 227static inline u32 pbdma_subdevice_channel_dma_enable_f(void) 228{ 229 return 0x20000000U; 230} 231static inline u32 pbdma_method0_r(u32 i) 232{ 233 return 0x000400c0U + i*8192U; 234} 235static inline u32 pbdma_method0_addr_f(u32 v) 236{ 237 return (v & 0xfffU) << 2U; 238} 239static inline u32 pbdma_method0_addr_v(u32 r) 240{ 241 return (r >> 2U) & 0xfffU; 242} 243static inline u32 pbdma_method0_subch_v(u32 r) 244{ 245 return (r >> 16U) & 0x7U; 246} 247static inline u32 pbdma_method0_first_true_f(void) 248{ 249 return 0x400000U; 250} 251static inline u32 pbdma_method0_valid_true_f(void) 252{ 253 return 0x80000000U; 254} 255static inline u32 pbdma_method1_r(u32 i) 256{ 257 return 0x000400c8U + i*8192U; 258} 259static inline u32 pbdma_method2_r(u32 i) 260{ 261 return 0x000400d0U + i*8192U; 262} 263static inline u32 pbdma_method3_r(u32 i) 264{ 265 return 0x000400d8U + i*8192U; 266} 267static inline u32 pbdma_data0_r(u32 i) 268{ 269 return 0x000400c4U + i*8192U; 270} 271static inline u32 pbdma_target_r(u32 i) 272{ 273 return 0x000400acU + i*8192U; 274} 275static inline u32 pbdma_target_engine_sw_f(void) 276{ 277 return 0x1fU; 278} 279static inline u32 pbdma_acquire_r(u32 i) 280{ 281 return 0x00040030U + i*8192U; 282} 283static inline u32 pbdma_acquire_retry_man_2_f(void) 284{ 285 return 0x2U; 286} 287static inline u32 pbdma_acquire_retry_exp_2_f(void) 288{ 289 return 0x100U; 290} 291static inline u32 pbdma_acquire_timeout_exp_f(u32 v) 292{ 293 return (v & 0xfU) << 11U; 294} 295static inline u32 pbdma_acquire_timeout_exp_max_v(void) 296{ 297 return 0x0000000fU; 298} 299static inline u32 pbdma_acquire_timeout_exp_max_f(void) 300{ 301 return 0x7800U; 302} 303static inline u32 pbdma_acquire_timeout_man_f(u32 v) 304{ 305 return (v & 0xffffU) << 15U; 306} 307static inline u32 pbdma_acquire_timeout_man_max_v(void) 308{ 309 return 0x0000ffffU; 310} 311static inline u32 pbdma_acquire_timeout_man_max_f(void) 312{ 313 return 0x7fff8000U; 314} 315static inline u32 pbdma_acquire_timeout_en_enable_f(void) 316{ 317 return 0x80000000U; 318} 319static inline u32 pbdma_acquire_timeout_en_disable_f(void) 320{ 321 return 0x0U; 322} 323static inline u32 pbdma_status_r(u32 i) 324{ 325 return 0x00040100U + i*8192U; 326} 327static inline u32 pbdma_channel_r(u32 i) 328{ 329 return 0x00040120U + i*8192U; 330} 331static inline u32 pbdma_signature_r(u32 i) 332{ 333 return 0x00040010U + i*8192U; 334} 335static inline u32 pbdma_signature_hw_valid_f(void) 336{ 337 return 0xfaceU; 338} 339static inline u32 pbdma_signature_sw_zero_f(void) 340{ 341 return 0x0U; 342} 343static inline u32 pbdma_userd_r(u32 i) 344{ 345 return 0x00040008U + i*8192U; 346} 347static inline u32 pbdma_userd_target_vid_mem_f(void) 348{ 349 return 0x0U; 350} 351static inline u32 pbdma_userd_target_sys_mem_coh_f(void) 352{ 353 return 0x2U; 354} 355static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) 356{ 357 return 0x3U; 358} 359static inline u32 pbdma_userd_addr_f(u32 v) 360{ 361 return (v & 0x7fffffU) << 9U; 362} 363static inline u32 pbdma_userd_hi_r(u32 i) 364{ 365 return 0x0004000cU + i*8192U; 366} 367static inline u32 pbdma_userd_hi_addr_f(u32 v) 368{ 369 return (v & 0xffU) << 0U; 370} 371static inline u32 pbdma_hce_ctrl_r(u32 i) 372{ 373 return 0x000400e4U + i*8192U; 374} 375static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) 376{ 377 return 0x20U; 378} 379static inline u32 pbdma_intr_0_r(u32 i) 380{ 381 return 0x00040108U + i*8192U; 382} 383static inline u32 pbdma_intr_0_memreq_v(u32 r) 384{ 385 return (r >> 0U) & 0x1U; 386} 387static inline u32 pbdma_intr_0_memreq_pending_f(void) 388{ 389 return 0x1U; 390} 391static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) 392{ 393 return 0x2U; 394} 395static inline u32 pbdma_intr_0_memack_extra_pending_f(void) 396{ 397 return 0x4U; 398} 399static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) 400{ 401 return 0x8U; 402} 403static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) 404{ 405 return 0x10U; 406} 407static inline u32 pbdma_intr_0_memflush_pending_f(void) 408{ 409 return 0x20U; 410} 411static inline u32 pbdma_intr_0_memop_pending_f(void) 412{ 413 return 0x40U; 414} 415static inline u32 pbdma_intr_0_lbconnect_pending_f(void) 416{ 417 return 0x80U; 418} 419static inline u32 pbdma_intr_0_lbreq_pending_f(void) 420{ 421 return 0x100U; 422} 423static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) 424{ 425 return 0x200U; 426} 427static inline u32 pbdma_intr_0_lback_extra_pending_f(void) 428{ 429 return 0x400U; 430} 431static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) 432{ 433 return 0x800U; 434} 435static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) 436{ 437 return 0x1000U; 438} 439static inline u32 pbdma_intr_0_gpfifo_pending_f(void) 440{ 441 return 0x2000U; 442} 443static inline u32 pbdma_intr_0_gpptr_pending_f(void) 444{ 445 return 0x4000U; 446} 447static inline u32 pbdma_intr_0_gpentry_pending_f(void) 448{ 449 return 0x8000U; 450} 451static inline u32 pbdma_intr_0_gpcrc_pending_f(void) 452{ 453 return 0x10000U; 454} 455static inline u32 pbdma_intr_0_pbptr_pending_f(void) 456{ 457 return 0x20000U; 458} 459static inline u32 pbdma_intr_0_pbentry_pending_f(void) 460{ 461 return 0x40000U; 462} 463static inline u32 pbdma_intr_0_pbcrc_pending_f(void) 464{ 465 return 0x80000U; 466} 467static inline u32 pbdma_intr_0_xbarconnect_pending_f(void) 468{ 469 return 0x100000U; 470} 471static inline u32 pbdma_intr_0_method_pending_f(void) 472{ 473 return 0x200000U; 474} 475static inline u32 pbdma_intr_0_methodcrc_pending_f(void) 476{ 477 return 0x400000U; 478} 479static inline u32 pbdma_intr_0_device_pending_f(void) 480{ 481 return 0x800000U; 482} 483static inline u32 pbdma_intr_0_semaphore_pending_f(void) 484{ 485 return 0x2000000U; 486} 487static inline u32 pbdma_intr_0_acquire_pending_f(void) 488{ 489 return 0x4000000U; 490} 491static inline u32 pbdma_intr_0_pri_pending_f(void) 492{ 493 return 0x8000000U; 494} 495static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) 496{ 497 return 0x20000000U; 498} 499static inline u32 pbdma_intr_0_pbseg_pending_f(void) 500{ 501 return 0x40000000U; 502} 503static inline u32 pbdma_intr_0_signature_pending_f(void) 504{ 505 return 0x80000000U; 506} 507static inline u32 pbdma_intr_1_r(u32 i) 508{ 509 return 0x00040148U + i*8192U; 510} 511static inline u32 pbdma_intr_en_0_r(u32 i) 512{ 513 return 0x0004010cU + i*8192U; 514} 515static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) 516{ 517 return 0x100U; 518} 519static inline u32 pbdma_intr_en_1_r(u32 i) 520{ 521 return 0x0004014cU + i*8192U; 522} 523static inline u32 pbdma_intr_stall_r(u32 i) 524{ 525 return 0x0004013cU + i*8192U; 526} 527static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) 528{ 529 return 0x100U; 530} 531static inline u32 pbdma_intr_stall_1_r(u32 i) 532{ 533 return 0x00040140U + i*8192U; 534} 535static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void) 536{ 537 return 0x1U; 538} 539static inline u32 pbdma_udma_nop_r(void) 540{ 541 return 0x00000008U; 542} 543static inline u32 pbdma_syncpointa_r(u32 i) 544{ 545 return 0x000400a4U + i*8192U; 546} 547static inline u32 pbdma_syncpointa_payload_v(u32 r) 548{ 549 return (r >> 0U) & 0xffffffffU; 550} 551static inline u32 pbdma_syncpointb_r(u32 i) 552{ 553 return 0x000400a8U + i*8192U; 554} 555static inline u32 pbdma_syncpointb_op_v(u32 r) 556{ 557 return (r >> 0U) & 0x3U; 558} 559static inline u32 pbdma_syncpointb_op_wait_v(void) 560{ 561 return 0x00000000U; 562} 563static inline u32 pbdma_syncpointb_wait_switch_v(u32 r) 564{ 565 return (r >> 4U) & 0x1U; 566} 567static inline u32 pbdma_syncpointb_wait_switch_en_v(void) 568{ 569 return 0x00000001U; 570} 571static inline u32 pbdma_syncpointb_syncpt_index_v(u32 r) 572{ 573 return (r >> 8U) & 0xffU; 574} 575#endif
diff --git a/include/nvgpu/hw/gk20a/hw_perf_gk20a.h b/include/nvgpu/hw/gk20a/hw_perf_gk20a.h
deleted file mode 100644
index a93560f..0000000
--- a/include/nvgpu/hw/gk20a/hw_perf_gk20a.h
+++ /dev/null
@@ -1,211 +0,0 @@ 1/* 2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_perf_gk20a_h_ 57#define _hw_perf_gk20a_h_ 58 59static inline u32 perf_pmasys_control_r(void) 60{ 61 return 0x001b4000U; 62} 63static inline u32 perf_pmasys_control_membuf_status_v(u32 r) 64{ 65 return (r >> 4U) & 0x1U; 66} 67static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) 72{ 73 return 0x10U; 74} 75static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) 76{ 77 return (v & 0x1U) << 5U; 78} 79static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) 80{ 81 return (r >> 5U) & 0x1U; 82} 83static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) 84{ 85 return 0x00000001U; 86} 87static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) 88{ 89 return 0x20U; 90} 91static inline u32 perf_pmasys_mem_block_r(void) 92{ 93 return 0x001b4070U; 94} 95static inline u32 perf_pmasys_mem_block_base_f(u32 v) 96{ 97 return (v & 0xfffffffU) << 0U; 98} 99static inline u32 perf_pmasys_mem_block_target_f(u32 v) 100{ 101 return (v & 0x3U) << 28U; 102} 103static inline u32 perf_pmasys_mem_block_target_v(u32 r) 104{ 105 return (r >> 28U) & 0x3U; 106} 107static inline u32 perf_pmasys_mem_block_target_lfb_v(void) 108{ 109 return 0x00000000U; 110} 111static inline u32 perf_pmasys_mem_block_target_lfb_f(void) 112{ 113 return 0x0U; 114} 115static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) 124{ 125 return 0x00000003U; 126} 127static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) 128{ 129 return 0x30000000U; 130} 131static inline u32 perf_pmasys_mem_block_valid_f(u32 v) 132{ 133 return (v & 0x1U) << 31U; 134} 135static inline u32 perf_pmasys_mem_block_valid_v(u32 r) 136{ 137 return (r >> 31U) & 0x1U; 138} 139static inline u32 perf_pmasys_mem_block_valid_true_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 perf_pmasys_mem_block_valid_true_f(void) 144{ 145 return 0x80000000U; 146} 147static inline u32 perf_pmasys_mem_block_valid_false_v(void) 148{ 149 return 0x00000000U; 150} 151static inline u32 perf_pmasys_mem_block_valid_false_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 perf_pmasys_outbase_r(void) 156{ 157 return 0x001b4074U; 158} 159static inline u32 perf_pmasys_outbase_ptr_f(u32 v) 160{ 161 return (v & 0x7ffffffU) << 5U; 162} 163static inline u32 perf_pmasys_outbaseupper_r(void) 164{ 165 return 0x001b4078U; 166} 167static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) 168{ 169 return (v & 0xffU) << 0U; 170} 171static inline u32 perf_pmasys_outsize_r(void) 172{ 173 return 0x001b407cU; 174} 175static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) 176{ 177 return (v & 0x7ffffffU) << 5U; 178} 179static inline u32 perf_pmasys_mem_bytes_r(void) 180{ 181 return 0x001b4084U; 182} 183static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) 184{ 185 return (v & 0xfffffffU) << 4U; 186} 187static inline u32 perf_pmasys_mem_bump_r(void) 188{ 189 return 0x001b4088U; 190} 191static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) 192{ 193 return (v & 0xfffffffU) << 4U; 194} 195static inline u32 perf_pmasys_enginestatus_r(void) 196{ 197 return 0x001b40a4U; 198} 199static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) 200{ 201 return (v & 0x1U) << 4U; 202} 203static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) 204{ 205 return 0x00000001U; 206} 207static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) 208{ 209 return 0x10U; 210} 211#endif
diff --git a/include/nvgpu/hw/gk20a/hw_pram_gk20a.h b/include/nvgpu/hw/gk20a/hw_pram_gk20a.h
deleted file mode 100644
index 10923e2..0000000
--- a/include/nvgpu/hw/gk20a/hw_pram_gk20a.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pram_gk20a_h_ 57#define _hw_pram_gk20a_h_ 58 59static inline u32 pram_data032_r(u32 i) 60{ 61 return 0x00700000U + i*4U; 62} 63#endif
diff --git a/include/nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h b/include/nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h
deleted file mode 100644
index ca2775e..0000000
--- a/include/nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h
+++ /dev/null
@@ -1,159 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringmaster_gk20a_h_ 57#define _hw_pri_ringmaster_gk20a_h_ 58 59static inline u32 pri_ringmaster_command_r(void) 60{ 61 return 0x0012004cU; 62} 63static inline u32 pri_ringmaster_command_cmd_m(void) 64{ 65 return 0x3fU << 0U; 66} 67static inline u32 pri_ringmaster_command_cmd_v(u32 r) 68{ 69 return (r >> 0U) & 0x3fU; 70} 71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 pri_ringmaster_command_data_r(void) 92{ 93 return 0x00120048U; 94} 95static inline u32 pri_ringmaster_start_results_r(void) 96{ 97 return 0x00120050U; 98} 99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 pri_ringmaster_intr_status0_r(void) 108{ 109 return 0x00120058U; 110} 111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) 112{ 113 return (r >> 0U) & 0x1U; 114} 115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) 116{ 117 return (r >> 1U) & 0x1U; 118} 119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) 120{ 121 return (r >> 2U) & 0x1U; 122} 123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) 124{ 125 return (r >> 8U) & 0x1U; 126} 127static inline u32 pri_ringmaster_intr_status1_r(void) 128{ 129 return 0x0012005cU; 130} 131static inline u32 pri_ringmaster_global_ctl_r(void) 132{ 133 return 0x00120060U; 134} 135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) 136{ 137 return 0x1U; 138} 139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 pri_ringmaster_enum_fbp_r(void) 144{ 145 return 0x00120074U; 146} 147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) 148{ 149 return (r >> 0U) & 0x1fU; 150} 151static inline u32 pri_ringmaster_enum_gpc_r(void) 152{ 153 return 0x00120078U; 154} 155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) 156{ 157 return (r >> 0U) & 0x1fU; 158} 159#endif
diff --git a/include/nvgpu/hw/gk20a/hw_pri_ringstation_fbp_gk20a.h b/include/nvgpu/hw/gk20a/hw_pri_ringstation_fbp_gk20a.h
deleted file mode 100644
index 06e08bd..0000000
--- a/include/nvgpu/hw/gk20a/hw_pri_ringstation_fbp_gk20a.h
+++ /dev/null
@@ -1,231 +0,0 @@ 1/* 2 * drivers/video/tegra/host/gk20a/hw_pri_ringstation_fbp_gk20a.h 3 * 4 * Copyright (c) 2012-2013, NVIDIA Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25 /* 26 * Function naming determines intended use: 27 * 28 * <x>_r(void) : Returns the offset for register <x>. 29 * 30 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 31 * 32 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 33 * 34 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 35 * and masked to place it at field <y> of register <x>. This value 36 * can be |'d with others to produce a full register value for 37 * register <x>. 38 * 39 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 40 * value can be ~'d and then &'d to clear the value of field <y> for 41 * register <x>. 42 * 43 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 44 * to place it at field <y> of register <x>. This value can be |'d 45 * with others to produce a full register value for <x>. 46 * 47 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 48 * <x> value 'r' after being shifted to place its LSB at bit 0. 49 * This value is suitable for direct comparison with other unshifted 50 * values appropriate for use in field <y> of register <x>. 51 * 52 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 53 * field <y> of register <x>. This value is suitable for direct 54 * comparison with unshifted values appropriate for use in field <y> 55 * of register <x>. 56 */ 57 58#ifndef __hw_pri_ringstation_fbp_gk20a_h__ 59#define __hw_pri_ringstation_fbp_gk20a_h__ 60/*This file is autogenerated. Do not edit. */ 61 62static inline u32 pri_ringstation_fbp_master_config_r(u32 i) 63{ 64 return 0x00124300+((i)*4); 65} 66static inline u32 pri_ringstation_fbp_master_config__size_1_v(void) 67{ 68 return 64; 69} 70static inline u32 pri_ringstation_fbp_master_config_timeout_s(void) 71{ 72 return 18; 73} 74static inline u32 pri_ringstation_fbp_master_config_timeout_f(u32 v) 75{ 76 return (v & 0x3ffff) << 0; 77} 78static inline u32 pri_ringstation_fbp_master_config_timeout_m(void) 79{ 80 return 0x3ffff << 0; 81} 82static inline u32 pri_ringstation_fbp_master_config_timeout_v(u32 r) 83{ 84 return (r >> 0) & 0x3ffff; 85} 86static inline u32 pri_ringstation_fbp_master_config_timeout_i_v(void) 87{ 88 return 0x00000064; 89} 90static inline u32 pri_ringstation_fbp_master_config_timeout_i_f(void) 91{ 92 return 0x64; 93} 94static inline u32 pri_ringstation_fbp_master_config_fs_action_s(void) 95{ 96 return 1; 97} 98static inline u32 pri_ringstation_fbp_master_config_fs_action_f(u32 v) 99{ 100 return (v & 0x1) << 30; 101} 102static inline u32 pri_ringstation_fbp_master_config_fs_action_m(void) 103{ 104 return 0x1 << 30; 105} 106static inline u32 pri_ringstation_fbp_master_config_fs_action_v(u32 r) 107{ 108 return (r >> 30) & 0x1; 109} 110static inline u32 pri_ringstation_fbp_master_config_fs_action_error_v(void) 111{ 112 return 0x00000000; 113} 114static inline u32 pri_ringstation_fbp_master_config_fs_action_error_f(void) 115{ 116 return 0x0; 117} 118static inline u32 pri_ringstation_fbp_master_config_fs_action_soldier_on_v(void) 119{ 120 return 0x00000001; 121} 122static inline u32 pri_ringstation_fbp_master_config_fs_action_soldier_on_f(void) 123{ 124 return 0x40000000; 125} 126static inline u32 pri_ringstation_fbp_master_config_reset_action_s(void) 127{ 128 return 1; 129} 130static inline u32 pri_ringstation_fbp_master_config_reset_action_f(u32 v) 131{ 132 return (v & 0x1) << 31; 133} 134static inline u32 pri_ringstation_fbp_master_config_reset_action_m(void) 135{ 136 return 0x1 << 31; 137} 138static inline u32 pri_ringstation_fbp_master_config_reset_action_v(u32 r) 139{ 140 return (r >> 31) & 0x1; 141} 142static inline u32 pri_ringstation_fbp_master_config_reset_action_error_v(void) 143{ 144 return 0x00000000; 145} 146static inline u32 pri_ringstation_fbp_master_config_reset_action_error_f(void) 147{ 148 return 0x0; 149} 150static inline u32 pri_ringstation_fbp_master_config_reset_action_soldier_on_v(void) 151{ 152 return 0x00000001; 153} 154static inline u32 pri_ringstation_fbp_master_config_reset_action_soldier_on_f(void) 155{ 156 return 0x80000000; 157} 158static inline u32 pri_ringstation_fbp_master_config_setup_clocks_s(void) 159{ 160 return 3; 161} 162static inline u32 pri_ringstation_fbp_master_config_setup_clocks_f(u32 v) 163{ 164 return (v & 0x7) << 20; 165} 166static inline u32 pri_ringstation_fbp_master_config_setup_clocks_m(void) 167{ 168 return 0x7 << 20; 169} 170static inline u32 pri_ringstation_fbp_master_config_setup_clocks_v(u32 r) 171{ 172 return (r >> 20) & 0x7; 173} 174static inline u32 pri_ringstation_fbp_master_config_setup_clocks_i_v(void) 175{ 176 return 0x00000000; 177} 178static inline u32 pri_ringstation_fbp_master_config_setup_clocks_i_f(void) 179{ 180 return 0x0; 181} 182static inline u32 pri_ringstation_fbp_master_config_wait_clocks_s(void) 183{ 184 return 3; 185} 186static inline u32 pri_ringstation_fbp_master_config_wait_clocks_f(u32 v) 187{ 188 return (v & 0x7) << 24; 189} 190static inline u32 pri_ringstation_fbp_master_config_wait_clocks_m(void) 191{ 192 return 0x7 << 24; 193} 194static inline u32 pri_ringstation_fbp_master_config_wait_clocks_v(u32 r) 195{ 196 return (r >> 24) & 0x7; 197} 198static inline u32 pri_ringstation_fbp_master_config_wait_clocks_i_v(void) 199{ 200 return 0x00000000; 201} 202static inline u32 pri_ringstation_fbp_master_config_wait_clocks_i_f(void) 203{ 204 return 0x0; 205} 206static inline u32 pri_ringstation_fbp_master_config_hold_clocks_s(void) 207{ 208 return 3; 209} 210static inline u32 pri_ringstation_fbp_master_config_hold_clocks_f(u32 v) 211{ 212 return (v & 0x7) << 27; 213} 214static inline u32 pri_ringstation_fbp_master_config_hold_clocks_m(void) 215{ 216 return 0x7 << 27; 217} 218static inline u32 pri_ringstation_fbp_master_config_hold_clocks_v(u32 r) 219{ 220 return (r >> 27) & 0x7; 221} 222static inline u32 pri_ringstation_fbp_master_config_hold_clocks_i_v(void) 223{ 224 return 0x00000000; 225} 226static inline u32 pri_ringstation_fbp_master_config_hold_clocks_i_f(void) 227{ 228 return 0x0; 229} 230 231#endif /* __hw_pri_ringstation_fbp_gk20a_h__ */
diff --git a/include/nvgpu/hw/gk20a/hw_pri_ringstation_gpc_gk20a.h b/include/nvgpu/hw/gk20a/hw_pri_ringstation_gpc_gk20a.h
deleted file mode 100644
index 6b57429..0000000
--- a/include/nvgpu/hw/gk20a/hw_pri_ringstation_gpc_gk20a.h
+++ /dev/null
@@ -1,79 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_gpc_gk20a_h_ 57#define _hw_pri_ringstation_gpc_gk20a_h_ 58 59static inline u32 pri_ringstation_gpc_master_config_r(u32 i) 60{ 61 return 0x00128300U + i*4U; 62} 63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) 64{ 65 return 0x00128120U; 66} 67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) 68{ 69 return 0x00128124U; 70} 71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) 72{ 73 return 0x00128128U; 74} 75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) 76{ 77 return 0x0012812cU; 78} 79#endif
diff --git a/include/nvgpu/hw/gk20a/hw_pri_ringstation_sys_gk20a.h b/include/nvgpu/hw/gk20a/hw_pri_ringstation_sys_gk20a.h
deleted file mode 100644
index e4d5c3b..0000000
--- a/include/nvgpu/hw/gk20a/hw_pri_ringstation_sys_gk20a.h
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_sys_gk20a_h_ 57#define _hw_pri_ringstation_sys_gk20a_h_ 58 59static inline u32 pri_ringstation_sys_master_config_r(u32 i) 60{ 61 return 0x00122300U + i*4U; 62} 63static inline u32 pri_ringstation_sys_decode_config_r(void) 64{ 65 return 0x00122204U; 66} 67static inline u32 pri_ringstation_sys_decode_config_ring_m(void) 68{ 69 return 0x7U << 0U; 70} 71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 pri_ringstation_sys_priv_error_adr_r(void) 76{ 77 return 0x00122120U; 78} 79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) 80{ 81 return 0x00122124U; 82} 83static inline u32 pri_ringstation_sys_priv_error_info_r(void) 84{ 85 return 0x00122128U; 86} 87static inline u32 pri_ringstation_sys_priv_error_code_r(void) 88{ 89 return 0x0012212cU; 90} 91#endif
diff --git a/include/nvgpu/hw/gk20a/hw_proj_gk20a.h b/include/nvgpu/hw/gk20a/hw_proj_gk20a.h
deleted file mode 100644
index 10509ca..0000000
--- a/include/nvgpu/hw/gk20a/hw_proj_gk20a.h
+++ /dev/null
@@ -1,167 +0,0 @@ 1/* 2 * Copyright (c) 2012-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_proj_gk20a_h_ 57#define _hw_proj_gk20a_h_ 58 59static inline u32 proj_gpc_base_v(void) 60{ 61 return 0x00500000U; 62} 63static inline u32 proj_gpc_shared_base_v(void) 64{ 65 return 0x00418000U; 66} 67static inline u32 proj_gpc_stride_v(void) 68{ 69 return 0x00008000U; 70} 71static inline u32 proj_gpc_priv_stride_v(void) 72{ 73 return 0x00000800U; 74} 75static inline u32 proj_ltc_stride_v(void) 76{ 77 return 0x00002000U; 78} 79static inline u32 proj_lts_stride_v(void) 80{ 81 return 0x00000400U; 82} 83static inline u32 proj_fbpa_stride_v(void) 84{ 85 return 0x00001000U; 86} 87static inline u32 proj_ppc_in_gpc_base_v(void) 88{ 89 return 0x00003000U; 90} 91static inline u32 proj_ppc_in_gpc_shared_base_v(void) 92{ 93 return 0x00003e00U; 94} 95static inline u32 proj_ppc_in_gpc_stride_v(void) 96{ 97 return 0x00000200U; 98} 99static inline u32 proj_rop_base_v(void) 100{ 101 return 0x00410000U; 102} 103static inline u32 proj_rop_shared_base_v(void) 104{ 105 return 0x00408800U; 106} 107static inline u32 proj_rop_stride_v(void) 108{ 109 return 0x00000400U; 110} 111static inline u32 proj_tpc_in_gpc_base_v(void) 112{ 113 return 0x00004000U; 114} 115static inline u32 proj_tpc_in_gpc_stride_v(void) 116{ 117 return 0x00000800U; 118} 119static inline u32 proj_tpc_in_gpc_shared_base_v(void) 120{ 121 return 0x00001800U; 122} 123static inline u32 proj_host_num_engines_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 proj_host_num_pbdma_v(void) 128{ 129 return 0x00000001U; 130} 131static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) 132{ 133 return 0x00000001U; 134} 135static inline u32 proj_scal_litter_num_fbps_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 proj_scal_litter_num_fbpas_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 proj_scal_litter_num_gpcs_v(void) 144{ 145 return 0x00000001U; 146} 147static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 proj_scal_litter_num_zcull_banks_v(void) 156{ 157 return 0x00000004U; 158} 159static inline u32 proj_scal_max_gpcs_v(void) 160{ 161 return 0x00000020U; 162} 163static inline u32 proj_scal_max_tpc_per_gpc_v(void) 164{ 165 return 0x00000008U; 166} 167#endif
diff --git a/include/nvgpu/hw/gk20a/hw_pwr_gk20a.h b/include/nvgpu/hw/gk20a/hw_pwr_gk20a.h
deleted file mode 100644
index b879563..0000000
--- a/include/nvgpu/hw/gk20a/hw_pwr_gk20a.h
+++ /dev/null
@@ -1,827 +0,0 @@ 1/* 2 * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pwr_gk20a_h_ 57#define _hw_pwr_gk20a_h_ 58 59static inline u32 pwr_falcon_irqsset_r(void) 60{ 61 return 0x0010a000U; 62} 63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pwr_falcon_irqsclr_r(void) 68{ 69 return 0x0010a004U; 70} 71static inline u32 pwr_falcon_irqstat_r(void) 72{ 73 return 0x0010a008U; 74} 75static inline u32 pwr_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pwr_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pwr_falcon_irqmode_r(void) 88{ 89 return 0x0010a00cU; 90} 91static inline u32 pwr_falcon_irqmset_r(void) 92{ 93 return 0x0010a010U; 94} 95static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 pwr_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 pwr_falcon_irqmclr_r(void) 128{ 129 return 0x0010a014U; 130} 131static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 pwr_falcon_irqmask_r(void) 168{ 169 return 0x0010a018U; 170} 171static inline u32 pwr_falcon_irqdest_r(void) 172{ 173 return 0x0010a01cU; 174} 175static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 pwr_falcon_curctx_r(void) 248{ 249 return 0x0010a050U; 250} 251static inline u32 pwr_falcon_nxtctx_r(void) 252{ 253 return 0x0010a054U; 254} 255static inline u32 pwr_falcon_mailbox0_r(void) 256{ 257 return 0x0010a040U; 258} 259static inline u32 pwr_falcon_mailbox1_r(void) 260{ 261 return 0x0010a044U; 262} 263static inline u32 pwr_falcon_itfen_r(void) 264{ 265 return 0x0010a048U; 266} 267static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 pwr_falcon_idlestate_r(void) 272{ 273 return 0x0010a04cU; 274} 275static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 pwr_falcon_os_r(void) 284{ 285 return 0x0010a080U; 286} 287static inline u32 pwr_falcon_engctl_r(void) 288{ 289 return 0x0010a0a4U; 290} 291static inline u32 pwr_falcon_cpuctl_r(void) 292{ 293 return 0x0010a100U; 294} 295static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) 300{ 301 return (v & 0x1U) << 4U; 302} 303static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) 304{ 305 return 0x1U << 4U; 306} 307static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) 308{ 309 return (r >> 4U) & 0x1U; 310} 311static inline u32 pwr_falcon_imemc_r(u32 i) 312{ 313 return 0x0010a180U + i*16U; 314} 315static inline u32 pwr_falcon_imemc_offs_f(u32 v) 316{ 317 return (v & 0x3fU) << 2U; 318} 319static inline u32 pwr_falcon_imemc_blk_f(u32 v) 320{ 321 return (v & 0xffU) << 8U; 322} 323static inline u32 pwr_falcon_imemc_aincw_f(u32 v) 324{ 325 return (v & 0x1U) << 24U; 326} 327static inline u32 pwr_falcon_imemd_r(u32 i) 328{ 329 return 0x0010a184U + i*16U; 330} 331static inline u32 pwr_falcon_imemt_r(u32 i) 332{ 333 return 0x0010a188U + i*16U; 334} 335static inline u32 pwr_falcon_bootvec_r(void) 336{ 337 return 0x0010a104U; 338} 339static inline u32 pwr_falcon_bootvec_vec_f(u32 v) 340{ 341 return (v & 0xffffffffU) << 0U; 342} 343static inline u32 pwr_falcon_dmactl_r(void) 344{ 345 return 0x0010a10cU; 346} 347static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) 348{ 349 return 0x1U << 1U; 350} 351static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) 352{ 353 return 0x1U << 2U; 354} 355static inline u32 pwr_falcon_hwcfg_r(void) 356{ 357 return 0x0010a108U; 358} 359static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) 360{ 361 return (r >> 0U) & 0x1ffU; 362} 363static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) 364{ 365 return (r >> 9U) & 0x1ffU; 366} 367static inline u32 pwr_falcon_dmatrfbase_r(void) 368{ 369 return 0x0010a110U; 370} 371static inline u32 pwr_falcon_dmatrfmoffs_r(void) 372{ 373 return 0x0010a114U; 374} 375static inline u32 pwr_falcon_dmatrfcmd_r(void) 376{ 377 return 0x0010a118U; 378} 379static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) 380{ 381 return (v & 0x1U) << 4U; 382} 383static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) 384{ 385 return (v & 0x1U) << 5U; 386} 387static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) 388{ 389 return (v & 0x7U) << 8U; 390} 391static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) 392{ 393 return (v & 0x7U) << 12U; 394} 395static inline u32 pwr_falcon_dmatrffboffs_r(void) 396{ 397 return 0x0010a11cU; 398} 399static inline u32 pwr_falcon_exterraddr_r(void) 400{ 401 return 0x0010a168U; 402} 403static inline u32 pwr_falcon_exterrstat_r(void) 404{ 405 return 0x0010a16cU; 406} 407static inline u32 pwr_falcon_exterrstat_valid_m(void) 408{ 409 return 0x1U << 31U; 410} 411static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) 412{ 413 return (r >> 31U) & 0x1U; 414} 415static inline u32 pwr_falcon_exterrstat_valid_true_v(void) 416{ 417 return 0x00000001U; 418} 419static inline u32 pwr_pmu_falcon_icd_cmd_r(void) 420{ 421 return 0x0010a200U; 422} 423static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) 424{ 425 return 4U; 426} 427static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) 428{ 429 return (v & 0xfU) << 0U; 430} 431static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) 432{ 433 return 0xfU << 0U; 434} 435static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) 436{ 437 return (r >> 0U) & 0xfU; 438} 439static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) 440{ 441 return 0x8U; 442} 443static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) 444{ 445 return 0xeU; 446} 447static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) 448{ 449 return (v & 0x1fU) << 8U; 450} 451static inline u32 pwr_pmu_falcon_icd_rdata_r(void) 452{ 453 return 0x0010a20cU; 454} 455static inline u32 pwr_falcon_dmemc_r(u32 i) 456{ 457 return 0x0010a1c0U + i*8U; 458} 459static inline u32 pwr_falcon_dmemc_offs_f(u32 v) 460{ 461 return (v & 0x3fU) << 2U; 462} 463static inline u32 pwr_falcon_dmemc_offs_m(void) 464{ 465 return 0x3fU << 2U; 466} 467static inline u32 pwr_falcon_dmemc_blk_f(u32 v) 468{ 469 return (v & 0xffU) << 8U; 470} 471static inline u32 pwr_falcon_dmemc_blk_m(void) 472{ 473 return 0xffU << 8U; 474} 475static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) 476{ 477 return (v & 0x1U) << 24U; 478} 479static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) 480{ 481 return (v & 0x1U) << 25U; 482} 483static inline u32 pwr_falcon_dmemd_r(u32 i) 484{ 485 return 0x0010a1c4U + i*8U; 486} 487static inline u32 pwr_pmu_new_instblk_r(void) 488{ 489 return 0x0010a480U; 490} 491static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) 492{ 493 return (v & 0xfffffffU) << 0U; 494} 495static inline u32 pwr_pmu_new_instblk_target_fb_f(void) 496{ 497 return 0x0U; 498} 499static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) 500{ 501 return 0x20000000U; 502} 503static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) 504{ 505 return 0x30000000U; 506} 507static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) 508{ 509 return (v & 0x1U) << 30U; 510} 511static inline u32 pwr_pmu_mutex_id_r(void) 512{ 513 return 0x0010a488U; 514} 515static inline u32 pwr_pmu_mutex_id_value_v(u32 r) 516{ 517 return (r >> 0U) & 0xffU; 518} 519static inline u32 pwr_pmu_mutex_id_value_init_v(void) 520{ 521 return 0x00000000U; 522} 523static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) 524{ 525 return 0x000000ffU; 526} 527static inline u32 pwr_pmu_mutex_id_release_r(void) 528{ 529 return 0x0010a48cU; 530} 531static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) 532{ 533 return (v & 0xffU) << 0U; 534} 535static inline u32 pwr_pmu_mutex_id_release_value_m(void) 536{ 537 return 0xffU << 0U; 538} 539static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) 540{ 541 return 0x00000000U; 542} 543static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) 544{ 545 return 0x0U; 546} 547static inline u32 pwr_pmu_mutex_r(u32 i) 548{ 549 return 0x0010a580U + i*4U; 550} 551static inline u32 pwr_pmu_mutex__size_1_v(void) 552{ 553 return 0x00000010U; 554} 555static inline u32 pwr_pmu_mutex_value_f(u32 v) 556{ 557 return (v & 0xffU) << 0U; 558} 559static inline u32 pwr_pmu_mutex_value_v(u32 r) 560{ 561 return (r >> 0U) & 0xffU; 562} 563static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) 564{ 565 return 0x0U; 566} 567static inline u32 pwr_pmu_queue_head_r(u32 i) 568{ 569 return 0x0010a4a0U + i*4U; 570} 571static inline u32 pwr_pmu_queue_head__size_1_v(void) 572{ 573 return 0x00000004U; 574} 575static inline u32 pwr_pmu_queue_head_address_f(u32 v) 576{ 577 return (v & 0xffffffffU) << 0U; 578} 579static inline u32 pwr_pmu_queue_head_address_v(u32 r) 580{ 581 return (r >> 0U) & 0xffffffffU; 582} 583static inline u32 pwr_pmu_queue_tail_r(u32 i) 584{ 585 return 0x0010a4b0U + i*4U; 586} 587static inline u32 pwr_pmu_queue_tail__size_1_v(void) 588{ 589 return 0x00000004U; 590} 591static inline u32 pwr_pmu_queue_tail_address_f(u32 v) 592{ 593 return (v & 0xffffffffU) << 0U; 594} 595static inline u32 pwr_pmu_queue_tail_address_v(u32 r) 596{ 597 return (r >> 0U) & 0xffffffffU; 598} 599static inline u32 pwr_pmu_msgq_head_r(void) 600{ 601 return 0x0010a4c8U; 602} 603static inline u32 pwr_pmu_msgq_head_val_f(u32 v) 604{ 605 return (v & 0xffffffffU) << 0U; 606} 607static inline u32 pwr_pmu_msgq_head_val_v(u32 r) 608{ 609 return (r >> 0U) & 0xffffffffU; 610} 611static inline u32 pwr_pmu_msgq_tail_r(void) 612{ 613 return 0x0010a4ccU; 614} 615static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) 616{ 617 return (v & 0xffffffffU) << 0U; 618} 619static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) 620{ 621 return (r >> 0U) & 0xffffffffU; 622} 623static inline u32 pwr_pmu_idle_mask_r(u32 i) 624{ 625 return 0x0010a504U + i*16U; 626} 627static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) 628{ 629 return 0x1U; 630} 631static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) 632{ 633 return 0x200000U; 634} 635static inline u32 pwr_pmu_idle_mask_1_r(u32 i) 636{ 637 return 0x0010aa34U + i*8U; 638} 639static inline u32 pwr_pmu_idle_count_r(u32 i) 640{ 641 return 0x0010a508U + i*16U; 642} 643static inline u32 pwr_pmu_idle_count_value_f(u32 v) 644{ 645 return (v & 0x7fffffffU) << 0U; 646} 647static inline u32 pwr_pmu_idle_count_value_v(u32 r) 648{ 649 return (r >> 0U) & 0x7fffffffU; 650} 651static inline u32 pwr_pmu_idle_count_reset_f(u32 v) 652{ 653 return (v & 0x1U) << 31U; 654} 655static inline u32 pwr_pmu_idle_ctrl_r(u32 i) 656{ 657 return 0x0010a50cU + i*16U; 658} 659static inline u32 pwr_pmu_idle_ctrl_value_m(void) 660{ 661 return 0x3U << 0U; 662} 663static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) 664{ 665 return 0x2U; 666} 667static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) 668{ 669 return 0x3U; 670} 671static inline u32 pwr_pmu_idle_ctrl_filter_m(void) 672{ 673 return 0x1U << 2U; 674} 675static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) 676{ 677 return 0x0U; 678} 679static inline u32 pwr_pmu_idle_threshold_r(u32 i) 680{ 681 return 0x0010a8a0U + i*4U; 682} 683static inline u32 pwr_pmu_idle_threshold_value_f(u32 v) 684{ 685 return (v & 0x7fffffffU) << 0U; 686} 687static inline u32 pwr_pmu_idle_intr_r(void) 688{ 689 return 0x0010a9e8U; 690} 691static inline u32 pwr_pmu_idle_intr_en_f(u32 v) 692{ 693 return (v & 0x1U) << 0U; 694} 695static inline u32 pwr_pmu_idle_intr_en_disabled_v(void) 696{ 697 return 0x00000000U; 698} 699static inline u32 pwr_pmu_idle_intr_en_enabled_v(void) 700{ 701 return 0x00000001U; 702} 703static inline u32 pwr_pmu_idle_intr_status_r(void) 704{ 705 return 0x0010a9ecU; 706} 707static inline u32 pwr_pmu_idle_intr_status_intr_f(u32 v) 708{ 709 return (v & 0x1U) << 0U; 710} 711static inline u32 pwr_pmu_idle_intr_status_intr_m(void) 712{ 713 return 0x1U << 0U; 714} 715static inline u32 pwr_pmu_idle_intr_status_intr_v(u32 r) 716{ 717 return (r >> 0U) & 0x1U; 718} 719static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) 720{ 721 return 0x0010a9f0U + i*8U; 722} 723static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) 724{ 725 return 0x0010a9f4U + i*8U; 726} 727static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) 728{ 729 return 0x0010aa30U + i*8U; 730} 731static inline u32 pwr_pmu_debug_r(u32 i) 732{ 733 return 0x0010a5c0U + i*4U; 734} 735static inline u32 pwr_pmu_debug__size_1_v(void) 736{ 737 return 0x00000004U; 738} 739static inline u32 pwr_pmu_mailbox_r(u32 i) 740{ 741 return 0x0010a450U + i*4U; 742} 743static inline u32 pwr_pmu_mailbox__size_1_v(void) 744{ 745 return 0x0000000cU; 746} 747static inline u32 pwr_pmu_bar0_addr_r(void) 748{ 749 return 0x0010a7a0U; 750} 751static inline u32 pwr_pmu_bar0_data_r(void) 752{ 753 return 0x0010a7a4U; 754} 755static inline u32 pwr_pmu_bar0_ctl_r(void) 756{ 757 return 0x0010a7acU; 758} 759static inline u32 pwr_pmu_bar0_timeout_r(void) 760{ 761 return 0x0010a7a8U; 762} 763static inline u32 pwr_pmu_bar0_fecs_error_r(void) 764{ 765 return 0x0010a988U; 766} 767static inline u32 pwr_pmu_bar0_error_status_r(void) 768{ 769 return 0x0010a7b0U; 770} 771static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) 772{ 773 return 0x0010a6c0U + i*4U; 774} 775static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) 776{ 777 return 0x0010a6e8U + i*4U; 778} 779static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) 780{ 781 return 0x0010a710U + i*4U; 782} 783static inline u32 pwr_pmu_pg_intren_r(u32 i) 784{ 785 return 0x0010a760U + i*4U; 786} 787static inline u32 pwr_fbif_transcfg_r(u32 i) 788{ 789 return 0x0010a600U + i*4U; 790} 791static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) 792{ 793 return 0x0U; 794} 795static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) 796{ 797 return 0x1U; 798} 799static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) 800{ 801 return 0x2U; 802} 803static inline u32 pwr_fbif_transcfg_mem_type_s(void) 804{ 805 return 1U; 806} 807static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) 808{ 809 return (v & 0x1U) << 2U; 810} 811static inline u32 pwr_fbif_transcfg_mem_type_m(void) 812{ 813 return 0x1U << 2U; 814} 815static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) 816{ 817 return (r >> 2U) & 0x1U; 818} 819static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) 820{ 821 return 0x0U; 822} 823static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) 824{ 825 return 0x4U; 826} 827#endif
diff --git a/include/nvgpu/hw/gk20a/hw_ram_gk20a.h b/include/nvgpu/hw/gk20a/hw_ram_gk20a.h
deleted file mode 100644
index ed385d9..0000000
--- a/include/nvgpu/hw/gk20a/hw_ram_gk20a.h
+++ /dev/null
@@ -1,443 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ram_gk20a_h_ 57#define _hw_ram_gk20a_h_ 58 59static inline u32 ram_in_ramfc_s(void) 60{ 61 return 4096U; 62} 63static inline u32 ram_in_ramfc_w(void) 64{ 65 return 0U; 66} 67static inline u32 ram_in_page_dir_base_target_f(u32 v) 68{ 69 return (v & 0x3U) << 0U; 70} 71static inline u32 ram_in_page_dir_base_target_w(void) 72{ 73 return 128U; 74} 75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 ram_in_page_dir_base_vol_w(void) 88{ 89 return 128U; 90} 91static inline u32 ram_in_page_dir_base_vol_true_f(void) 92{ 93 return 0x4U; 94} 95static inline u32 ram_in_page_dir_base_lo_f(u32 v) 96{ 97 return (v & 0xfffffU) << 12U; 98} 99static inline u32 ram_in_page_dir_base_lo_w(void) 100{ 101 return 128U; 102} 103static inline u32 ram_in_page_dir_base_hi_f(u32 v) 104{ 105 return (v & 0xffU) << 0U; 106} 107static inline u32 ram_in_page_dir_base_hi_w(void) 108{ 109 return 129U; 110} 111static inline u32 ram_in_adr_limit_lo_f(u32 v) 112{ 113 return (v & 0xfffffU) << 12U; 114} 115static inline u32 ram_in_adr_limit_lo_w(void) 116{ 117 return 130U; 118} 119static inline u32 ram_in_adr_limit_hi_f(u32 v) 120{ 121 return (v & 0xffU) << 0U; 122} 123static inline u32 ram_in_adr_limit_hi_w(void) 124{ 125 return 131U; 126} 127static inline u32 ram_in_engine_cs_w(void) 128{ 129 return 132U; 130} 131static inline u32 ram_in_engine_cs_wfi_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 ram_in_engine_cs_wfi_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 ram_in_engine_cs_fg_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 ram_in_engine_cs_fg_f(void) 144{ 145 return 0x8U; 146} 147static inline u32 ram_in_gr_cs_w(void) 148{ 149 return 132U; 150} 151static inline u32 ram_in_gr_cs_wfi_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 ram_in_gr_wfi_target_w(void) 156{ 157 return 132U; 158} 159static inline u32 ram_in_gr_wfi_mode_w(void) 160{ 161 return 132U; 162} 163static inline u32 ram_in_gr_wfi_mode_physical_v(void) 164{ 165 return 0x00000000U; 166} 167static inline u32 ram_in_gr_wfi_mode_physical_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 ram_in_gr_wfi_mode_virtual_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 ram_in_gr_wfi_mode_virtual_f(void) 176{ 177 return 0x4U; 178} 179static inline u32 ram_in_gr_wfi_ptr_lo_f(u32 v) 180{ 181 return (v & 0xfffffU) << 12U; 182} 183static inline u32 ram_in_gr_wfi_ptr_lo_w(void) 184{ 185 return 132U; 186} 187static inline u32 ram_in_gr_wfi_ptr_hi_f(u32 v) 188{ 189 return (v & 0xffU) << 0U; 190} 191static inline u32 ram_in_gr_wfi_ptr_hi_w(void) 192{ 193 return 133U; 194} 195static inline u32 ram_in_base_shift_v(void) 196{ 197 return 0x0000000cU; 198} 199static inline u32 ram_in_alloc_size_v(void) 200{ 201 return 0x00001000U; 202} 203static inline u32 ram_fc_size_val_v(void) 204{ 205 return 0x00000200U; 206} 207static inline u32 ram_fc_gp_put_w(void) 208{ 209 return 0U; 210} 211static inline u32 ram_fc_userd_w(void) 212{ 213 return 2U; 214} 215static inline u32 ram_fc_userd_hi_w(void) 216{ 217 return 3U; 218} 219static inline u32 ram_fc_signature_w(void) 220{ 221 return 4U; 222} 223static inline u32 ram_fc_gp_get_w(void) 224{ 225 return 5U; 226} 227static inline u32 ram_fc_pb_get_w(void) 228{ 229 return 6U; 230} 231static inline u32 ram_fc_pb_get_hi_w(void) 232{ 233 return 7U; 234} 235static inline u32 ram_fc_pb_top_level_get_w(void) 236{ 237 return 8U; 238} 239static inline u32 ram_fc_pb_top_level_get_hi_w(void) 240{ 241 return 9U; 242} 243static inline u32 ram_fc_acquire_w(void) 244{ 245 return 12U; 246} 247static inline u32 ram_fc_semaphorea_w(void) 248{ 249 return 14U; 250} 251static inline u32 ram_fc_semaphoreb_w(void) 252{ 253 return 15U; 254} 255static inline u32 ram_fc_semaphorec_w(void) 256{ 257 return 16U; 258} 259static inline u32 ram_fc_semaphored_w(void) 260{ 261 return 17U; 262} 263static inline u32 ram_fc_gp_base_w(void) 264{ 265 return 18U; 266} 267static inline u32 ram_fc_gp_base_hi_w(void) 268{ 269 return 19U; 270} 271static inline u32 ram_fc_gp_fetch_w(void) 272{ 273 return 20U; 274} 275static inline u32 ram_fc_pb_fetch_w(void) 276{ 277 return 21U; 278} 279static inline u32 ram_fc_pb_fetch_hi_w(void) 280{ 281 return 22U; 282} 283static inline u32 ram_fc_pb_put_w(void) 284{ 285 return 23U; 286} 287static inline u32 ram_fc_pb_put_hi_w(void) 288{ 289 return 24U; 290} 291static inline u32 ram_fc_pb_header_w(void) 292{ 293 return 33U; 294} 295static inline u32 ram_fc_pb_count_w(void) 296{ 297 return 34U; 298} 299static inline u32 ram_fc_subdevice_w(void) 300{ 301 return 37U; 302} 303static inline u32 ram_fc_formats_w(void) 304{ 305 return 39U; 306} 307static inline u32 ram_fc_syncpointa_w(void) 308{ 309 return 41U; 310} 311static inline u32 ram_fc_syncpointb_w(void) 312{ 313 return 42U; 314} 315static inline u32 ram_fc_target_w(void) 316{ 317 return 43U; 318} 319static inline u32 ram_fc_hce_ctrl_w(void) 320{ 321 return 57U; 322} 323static inline u32 ram_fc_chid_w(void) 324{ 325 return 58U; 326} 327static inline u32 ram_fc_chid_id_f(u32 v) 328{ 329 return (v & 0xfffU) << 0U; 330} 331static inline u32 ram_fc_chid_id_w(void) 332{ 333 return 0U; 334} 335static inline u32 ram_fc_runlist_timeslice_w(void) 336{ 337 return 62U; 338} 339static inline u32 ram_fc_pb_timeslice_w(void) 340{ 341 return 63U; 342} 343static inline u32 ram_userd_base_shift_v(void) 344{ 345 return 0x00000009U; 346} 347static inline u32 ram_userd_chan_size_v(void) 348{ 349 return 0x00000200U; 350} 351static inline u32 ram_userd_put_w(void) 352{ 353 return 16U; 354} 355static inline u32 ram_userd_get_w(void) 356{ 357 return 17U; 358} 359static inline u32 ram_userd_ref_w(void) 360{ 361 return 18U; 362} 363static inline u32 ram_userd_put_hi_w(void) 364{ 365 return 19U; 366} 367static inline u32 ram_userd_ref_threshold_w(void) 368{ 369 return 20U; 370} 371static inline u32 ram_userd_top_level_get_w(void) 372{ 373 return 22U; 374} 375static inline u32 ram_userd_top_level_get_hi_w(void) 376{ 377 return 23U; 378} 379static inline u32 ram_userd_get_hi_w(void) 380{ 381 return 24U; 382} 383static inline u32 ram_userd_gp_get_w(void) 384{ 385 return 34U; 386} 387static inline u32 ram_userd_gp_put_w(void) 388{ 389 return 35U; 390} 391static inline u32 ram_userd_gp_top_level_get_w(void) 392{ 393 return 22U; 394} 395static inline u32 ram_userd_gp_top_level_get_hi_w(void) 396{ 397 return 23U; 398} 399static inline u32 ram_rl_entry_size_v(void) 400{ 401 return 0x00000008U; 402} 403static inline u32 ram_rl_entry_chid_f(u32 v) 404{ 405 return (v & 0xfffU) << 0U; 406} 407static inline u32 ram_rl_entry_id_f(u32 v) 408{ 409 return (v & 0xfffU) << 0U; 410} 411static inline u32 ram_rl_entry_type_f(u32 v) 412{ 413 return (v & 0x1U) << 13U; 414} 415static inline u32 ram_rl_entry_type_chid_f(void) 416{ 417 return 0x0U; 418} 419static inline u32 ram_rl_entry_type_tsg_f(void) 420{ 421 return 0x2000U; 422} 423static inline u32 ram_rl_entry_timeslice_scale_f(u32 v) 424{ 425 return (v & 0xfU) << 14U; 426} 427static inline u32 ram_rl_entry_timeslice_scale_3_f(void) 428{ 429 return 0xc000U; 430} 431static inline u32 ram_rl_entry_timeslice_timeout_f(u32 v) 432{ 433 return (v & 0xffU) << 18U; 434} 435static inline u32 ram_rl_entry_timeslice_timeout_128_f(void) 436{ 437 return 0x2000000U; 438} 439static inline u32 ram_rl_entry_tsg_length_f(u32 v) 440{ 441 return (v & 0x3fU) << 26U; 442} 443#endif
diff --git a/include/nvgpu/hw/gk20a/hw_therm_gk20a.h b/include/nvgpu/hw/gk20a/hw_therm_gk20a.h
deleted file mode 100644
index 075c9bc..0000000
--- a/include/nvgpu/hw/gk20a/hw_therm_gk20a.h
+++ /dev/null
@@ -1,367 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_therm_gk20a_h_ 57#define _hw_therm_gk20a_h_ 58 59static inline u32 therm_use_a_r(void) 60{ 61 return 0x00020798U; 62} 63static inline u32 therm_use_a_ext_therm_0_enable_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 therm_use_a_ext_therm_1_enable_f(void) 68{ 69 return 0x2U; 70} 71static inline u32 therm_use_a_ext_therm_2_enable_f(void) 72{ 73 return 0x4U; 74} 75static inline u32 therm_evt_ext_therm_0_r(void) 76{ 77 return 0x00020700U; 78} 79static inline u32 therm_evt_ext_therm_0_slow_factor_f(u32 v) 80{ 81 return (v & 0x3fU) << 8U; 82} 83static inline u32 therm_evt_ext_therm_0_slow_factor_init_v(void) 84{ 85 return 0x00000000U; 86} 87static inline u32 therm_evt_ext_therm_0_priority_f(u32 v) 88{ 89 return (v & 0x1fU) << 24U; 90} 91static inline u32 therm_evt_ext_therm_1_r(void) 92{ 93 return 0x00020704U; 94} 95static inline u32 therm_evt_ext_therm_1_slow_factor_f(u32 v) 96{ 97 return (v & 0x3fU) << 8U; 98} 99static inline u32 therm_evt_ext_therm_1_slow_factor_init_v(void) 100{ 101 return 0x00000000U; 102} 103static inline u32 therm_evt_ext_therm_1_priority_f(u32 v) 104{ 105 return (v & 0x1fU) << 24U; 106} 107static inline u32 therm_evt_ext_therm_2_r(void) 108{ 109 return 0x00020708U; 110} 111static inline u32 therm_evt_ext_therm_2_slow_factor_f(u32 v) 112{ 113 return (v & 0x3fU) << 8U; 114} 115static inline u32 therm_evt_ext_therm_2_slow_factor_init_v(void) 116{ 117 return 0x00000000U; 118} 119static inline u32 therm_evt_ext_therm_2_priority_f(u32 v) 120{ 121 return (v & 0x1fU) << 24U; 122} 123static inline u32 therm_weight_1_r(void) 124{ 125 return 0x00020024U; 126} 127static inline u32 therm_config1_r(void) 128{ 129 return 0x00020050U; 130} 131static inline u32 therm_config2_r(void) 132{ 133 return 0x00020130U; 134} 135static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) 136{ 137 return (v & 0x1U) << 24U; 138} 139static inline u32 therm_config2_grad_enable_f(u32 v) 140{ 141 return (v & 0x1U) << 31U; 142} 143static inline u32 therm_gate_ctrl_r(u32 i) 144{ 145 return 0x00020200U + i*4U; 146} 147static inline u32 therm_gate_ctrl_eng_clk_m(void) 148{ 149 return 0x3U << 0U; 150} 151static inline u32 therm_gate_ctrl_eng_clk_run_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) 156{ 157 return 0x1U; 158} 159static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 therm_gate_ctrl_blk_clk_m(void) 164{ 165 return 0x3U << 2U; 166} 167static inline u32 therm_gate_ctrl_blk_clk_run_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) 172{ 173 return 0x4U; 174} 175static inline u32 therm_gate_ctrl_eng_pwr_m(void) 176{ 177 return 0x3U << 4U; 178} 179static inline u32 therm_gate_ctrl_eng_pwr_auto_f(void) 180{ 181 return 0x10U; 182} 183static inline u32 therm_gate_ctrl_eng_pwr_off_v(void) 184{ 185 return 0x00000002U; 186} 187static inline u32 therm_gate_ctrl_eng_pwr_off_f(void) 188{ 189 return 0x20U; 190} 191static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) 192{ 193 return (v & 0x1fU) << 8U; 194} 195static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) 196{ 197 return 0x1fU << 8U; 198} 199static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) 200{ 201 return (v & 0x7U) << 13U; 202} 203static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) 204{ 205 return 0x7U << 13U; 206} 207static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) 208{ 209 return (v & 0xfU) << 16U; 210} 211static inline u32 therm_gate_ctrl_eng_delay_before_m(void) 212{ 213 return 0xfU << 16U; 214} 215static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) 216{ 217 return (v & 0xfU) << 20U; 218} 219static inline u32 therm_gate_ctrl_eng_delay_after_m(void) 220{ 221 return 0xfU << 20U; 222} 223static inline u32 therm_fecs_idle_filter_r(void) 224{ 225 return 0x00020288U; 226} 227static inline u32 therm_fecs_idle_filter_value_m(void) 228{ 229 return 0xffffffffU << 0U; 230} 231static inline u32 therm_hubmmu_idle_filter_r(void) 232{ 233 return 0x0002028cU; 234} 235static inline u32 therm_hubmmu_idle_filter_value_m(void) 236{ 237 return 0xffffffffU << 0U; 238} 239static inline u32 therm_clk_slowdown_r(u32 i) 240{ 241 return 0x00020160U + i*4U; 242} 243static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) 244{ 245 return (v & 0x3fU) << 16U; 246} 247static inline u32 therm_clk_slowdown_idle_factor_m(void) 248{ 249 return 0x3fU << 16U; 250} 251static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) 252{ 253 return (r >> 16U) & 0x3fU; 254} 255static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) 256{ 257 return 0x0U; 258} 259static inline u32 therm_grad_stepping_table_r(u32 i) 260{ 261 return 0x000202c8U + i*4U; 262} 263static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) 264{ 265 return (v & 0x3fU) << 0U; 266} 267static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) 268{ 269 return 0x3fU << 0U; 270} 271static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) 272{ 273 return 0x1U; 274} 275static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) 276{ 277 return 0x2U; 278} 279static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) 280{ 281 return 0x6U; 282} 283static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) 284{ 285 return 0xeU; 286} 287static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) 288{ 289 return (v & 0x3fU) << 6U; 290} 291static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) 292{ 293 return 0x3fU << 6U; 294} 295static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) 296{ 297 return (v & 0x3fU) << 12U; 298} 299static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) 300{ 301 return 0x3fU << 12U; 302} 303static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) 304{ 305 return (v & 0x3fU) << 18U; 306} 307static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) 308{ 309 return 0x3fU << 18U; 310} 311static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) 312{ 313 return (v & 0x3fU) << 24U; 314} 315static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) 316{ 317 return 0x3fU << 24U; 318} 319static inline u32 therm_grad_stepping0_r(void) 320{ 321 return 0x000202c0U; 322} 323static inline u32 therm_grad_stepping0_feature_s(void) 324{ 325 return 1U; 326} 327static inline u32 therm_grad_stepping0_feature_f(u32 v) 328{ 329 return (v & 0x1U) << 0U; 330} 331static inline u32 therm_grad_stepping0_feature_m(void) 332{ 333 return 0x1U << 0U; 334} 335static inline u32 therm_grad_stepping0_feature_v(u32 r) 336{ 337 return (r >> 0U) & 0x1U; 338} 339static inline u32 therm_grad_stepping0_feature_enable_f(void) 340{ 341 return 0x1U; 342} 343static inline u32 therm_grad_stepping1_r(void) 344{ 345 return 0x000202c4U; 346} 347static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) 348{ 349 return (v & 0x1ffffU) << 0U; 350} 351static inline u32 therm_clk_timing_r(u32 i) 352{ 353 return 0x000203c0U + i*4U; 354} 355static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) 356{ 357 return (v & 0x1U) << 16U; 358} 359static inline u32 therm_clk_timing_grad_slowdown_m(void) 360{ 361 return 0x1U << 16U; 362} 363static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) 364{ 365 return 0x10000U; 366} 367#endif
diff --git a/include/nvgpu/hw/gk20a/hw_timer_gk20a.h b/include/nvgpu/hw/gk20a/hw_timer_gk20a.h
deleted file mode 100644
index 972d68a..0000000
--- a/include/nvgpu/hw/gk20a/hw_timer_gk20a.h
+++ /dev/null
@@ -1,127 +0,0 @@ 1/* 2 * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_timer_gk20a_h_ 57#define _hw_timer_gk20a_h_ 58 59static inline u32 timer_pri_timeout_r(void) 60{ 61 return 0x00009080U; 62} 63static inline u32 timer_pri_timeout_period_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 timer_pri_timeout_period_m(void) 68{ 69 return 0xffffffU << 0U; 70} 71static inline u32 timer_pri_timeout_period_v(u32 r) 72{ 73 return (r >> 0U) & 0xffffffU; 74} 75static inline u32 timer_pri_timeout_en_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 timer_pri_timeout_en_m(void) 80{ 81 return 0x1U << 31U; 82} 83static inline u32 timer_pri_timeout_en_v(u32 r) 84{ 85 return (r >> 31U) & 0x1U; 86} 87static inline u32 timer_pri_timeout_en_en_enabled_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 timer_pri_timeout_en_en_disabled_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 timer_pri_timeout_save_0_r(void) 96{ 97 return 0x00009084U; 98} 99static inline u32 timer_pri_timeout_save_0_fecs_tgt_v(u32 r) 100{ 101 return (r >> 31U) & 0x1U; 102} 103static inline u32 timer_pri_timeout_save_0_addr_v(u32 r) 104{ 105 return (r >> 2U) & 0x3fffffU; 106} 107static inline u32 timer_pri_timeout_save_0_write_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 timer_pri_timeout_save_1_r(void) 112{ 113 return 0x00009088U; 114} 115static inline u32 timer_pri_timeout_fecs_errcode_r(void) 116{ 117 return 0x0000908cU; 118} 119static inline u32 timer_time_0_r(void) 120{ 121 return 0x00009400U; 122} 123static inline u32 timer_time_1_r(void) 124{ 125 return 0x00009410U; 126} 127#endif
diff --git a/include/nvgpu/hw/gk20a/hw_top_gk20a.h b/include/nvgpu/hw/gk20a/hw_top_gk20a.h
deleted file mode 100644
index be7fa4a..0000000
--- a/include/nvgpu/hw/gk20a/hw_top_gk20a.h
+++ /dev/null
@@ -1,211 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_top_gk20a_h_ 57#define _hw_top_gk20a_h_ 58 59static inline u32 top_num_gpcs_r(void) 60{ 61 return 0x00022430U; 62} 63static inline u32 top_num_gpcs_value_v(u32 r) 64{ 65 return (r >> 0U) & 0x1fU; 66} 67static inline u32 top_tpc_per_gpc_r(void) 68{ 69 return 0x00022434U; 70} 71static inline u32 top_tpc_per_gpc_value_v(u32 r) 72{ 73 return (r >> 0U) & 0x1fU; 74} 75static inline u32 top_num_fbps_r(void) 76{ 77 return 0x00022438U; 78} 79static inline u32 top_num_fbps_value_v(u32 r) 80{ 81 return (r >> 0U) & 0x1fU; 82} 83static inline u32 top_device_info_r(u32 i) 84{ 85 return 0x00022700U + i*4U; 86} 87static inline u32 top_device_info__size_1_v(void) 88{ 89 return 0x00000040U; 90} 91static inline u32 top_device_info_chain_v(u32 r) 92{ 93 return (r >> 31U) & 0x1U; 94} 95static inline u32 top_device_info_chain_enable_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 top_device_info_engine_enum_v(u32 r) 100{ 101 return (r >> 26U) & 0xfU; 102} 103static inline u32 top_device_info_runlist_enum_v(u32 r) 104{ 105 return (r >> 21U) & 0xfU; 106} 107static inline u32 top_device_info_intr_enum_v(u32 r) 108{ 109 return (r >> 15U) & 0x1fU; 110} 111static inline u32 top_device_info_reset_enum_v(u32 r) 112{ 113 return (r >> 9U) & 0x1fU; 114} 115static inline u32 top_device_info_type_enum_v(u32 r) 116{ 117 return (r >> 2U) & 0x1fffffffU; 118} 119static inline u32 top_device_info_type_enum_graphics_v(void) 120{ 121 return 0x00000000U; 122} 123static inline u32 top_device_info_type_enum_graphics_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 top_device_info_type_enum_copy0_v(void) 128{ 129 return 0x00000001U; 130} 131static inline u32 top_device_info_type_enum_copy0_f(void) 132{ 133 return 0x4U; 134} 135static inline u32 top_device_info_type_enum_copy1_v(void) 136{ 137 return 0x00000002U; 138} 139static inline u32 top_device_info_type_enum_copy1_f(void) 140{ 141 return 0x8U; 142} 143static inline u32 top_device_info_type_enum_copy2_v(void) 144{ 145 return 0x00000003U; 146} 147static inline u32 top_device_info_type_enum_copy2_f(void) 148{ 149 return 0xcU; 150} 151static inline u32 top_device_info_engine_v(u32 r) 152{ 153 return (r >> 5U) & 0x1U; 154} 155static inline u32 top_device_info_runlist_v(u32 r) 156{ 157 return (r >> 4U) & 0x1U; 158} 159static inline u32 top_device_info_intr_v(u32 r) 160{ 161 return (r >> 3U) & 0x1U; 162} 163static inline u32 top_device_info_reset_v(u32 r) 164{ 165 return (r >> 2U) & 0x1U; 166} 167static inline u32 top_device_info_entry_v(u32 r) 168{ 169 return (r >> 0U) & 0x3U; 170} 171static inline u32 top_device_info_entry_not_valid_v(void) 172{ 173 return 0x00000000U; 174} 175static inline u32 top_device_info_entry_enum_v(void) 176{ 177 return 0x00000002U; 178} 179static inline u32 top_device_info_entry_engine_type_v(void) 180{ 181 return 0x00000003U; 182} 183static inline u32 top_device_info_entry_data_v(void) 184{ 185 return 0x00000001U; 186} 187static inline u32 top_fs_status_fbp_r(void) 188{ 189 return 0x00022548U; 190} 191static inline u32 top_fs_status_fbp_cluster_v(u32 r) 192{ 193 return (r >> 0U) & 0xffffU; 194} 195static inline u32 top_fs_status_fbp_cluster_enable_v(void) 196{ 197 return 0x00000000U; 198} 199static inline u32 top_fs_status_fbp_cluster_enable_f(void) 200{ 201 return 0x0U; 202} 203static inline u32 top_fs_status_fbp_cluster_disable_v(void) 204{ 205 return 0x00000001U; 206} 207static inline u32 top_fs_status_fbp_cluster_disable_f(void) 208{ 209 return 0x1U; 210} 211#endif
diff --git a/include/nvgpu/hw/gk20a/hw_trim_gk20a.h b/include/nvgpu/hw/gk20a/hw_trim_gk20a.h
deleted file mode 100644
index f28c21f..0000000
--- a/include/nvgpu/hw/gk20a/hw_trim_gk20a.h
+++ /dev/null
@@ -1,315 +0,0 @@ 1/* 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_trim_gk20a_h_ 57#define _hw_trim_gk20a_h_ 58 59static inline u32 trim_sys_gpcpll_cfg_r(void) 60{ 61 return 0x00137000U; 62} 63static inline u32 trim_sys_gpcpll_cfg_enable_m(void) 64{ 65 return 0x1U << 0U; 66} 67static inline u32 trim_sys_gpcpll_cfg_enable_v(u32 r) 68{ 69 return (r >> 0U) & 0x1U; 70} 71static inline u32 trim_sys_gpcpll_cfg_enable_no_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 trim_sys_gpcpll_cfg_enable_yes_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 trim_sys_gpcpll_cfg_iddq_m(void) 80{ 81 return 0x1U << 1U; 82} 83static inline u32 trim_sys_gpcpll_cfg_iddq_v(u32 r) 84{ 85 return (r >> 1U) & 0x1U; 86} 87static inline u32 trim_sys_gpcpll_cfg_iddq_power_on_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 trim_sys_gpcpll_cfg_enb_lckdet_m(void) 92{ 93 return 0x1U << 4U; 94} 95static inline u32 trim_sys_gpcpll_cfg_enb_lckdet_power_on_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 trim_sys_gpcpll_cfg_enb_lckdet_power_off_f(void) 100{ 101 return 0x10U; 102} 103static inline u32 trim_sys_gpcpll_cfg_pll_lock_v(u32 r) 104{ 105 return (r >> 17U) & 0x1U; 106} 107static inline u32 trim_sys_gpcpll_cfg_pll_lock_true_f(void) 108{ 109 return 0x20000U; 110} 111static inline u32 trim_sys_gpcpll_coeff_r(void) 112{ 113 return 0x00137004U; 114} 115static inline u32 trim_sys_gpcpll_coeff_mdiv_f(u32 v) 116{ 117 return (v & 0xffU) << 0U; 118} 119static inline u32 trim_sys_gpcpll_coeff_mdiv_m(void) 120{ 121 return 0xffU << 0U; 122} 123static inline u32 trim_sys_gpcpll_coeff_mdiv_v(u32 r) 124{ 125 return (r >> 0U) & 0xffU; 126} 127static inline u32 trim_sys_gpcpll_coeff_ndiv_f(u32 v) 128{ 129 return (v & 0xffU) << 8U; 130} 131static inline u32 trim_sys_gpcpll_coeff_ndiv_m(void) 132{ 133 return 0xffU << 8U; 134} 135static inline u32 trim_sys_gpcpll_coeff_ndiv_v(u32 r) 136{ 137 return (r >> 8U) & 0xffU; 138} 139static inline u32 trim_sys_gpcpll_coeff_pldiv_f(u32 v) 140{ 141 return (v & 0x3fU) << 16U; 142} 143static inline u32 trim_sys_gpcpll_coeff_pldiv_m(void) 144{ 145 return 0x3fU << 16U; 146} 147static inline u32 trim_sys_gpcpll_coeff_pldiv_v(u32 r) 148{ 149 return (r >> 16U) & 0x3fU; 150} 151static inline u32 trim_sys_sel_vco_r(void) 152{ 153 return 0x00137100U; 154} 155static inline u32 trim_sys_sel_vco_gpc2clk_out_m(void) 156{ 157 return 0x1U << 0U; 158} 159static inline u32 trim_sys_sel_vco_gpc2clk_out_init_v(void) 160{ 161 return 0x00000000U; 162} 163static inline u32 trim_sys_sel_vco_gpc2clk_out_init_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 trim_sys_sel_vco_gpc2clk_out_bypass_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 trim_sys_sel_vco_gpc2clk_out_vco_f(void) 172{ 173 return 0x1U; 174} 175static inline u32 trim_sys_gpc2clk_out_r(void) 176{ 177 return 0x00137250U; 178} 179static inline u32 trim_sys_gpc2clk_out_bypdiv_s(void) 180{ 181 return 6U; 182} 183static inline u32 trim_sys_gpc2clk_out_bypdiv_f(u32 v) 184{ 185 return (v & 0x3fU) << 0U; 186} 187static inline u32 trim_sys_gpc2clk_out_bypdiv_m(void) 188{ 189 return 0x3fU << 0U; 190} 191static inline u32 trim_sys_gpc2clk_out_bypdiv_v(u32 r) 192{ 193 return (r >> 0U) & 0x3fU; 194} 195static inline u32 trim_sys_gpc2clk_out_bypdiv_by31_f(void) 196{ 197 return 0x3cU; 198} 199static inline u32 trim_sys_gpc2clk_out_vcodiv_s(void) 200{ 201 return 6U; 202} 203static inline u32 trim_sys_gpc2clk_out_vcodiv_f(u32 v) 204{ 205 return (v & 0x3fU) << 8U; 206} 207static inline u32 trim_sys_gpc2clk_out_vcodiv_m(void) 208{ 209 return 0x3fU << 8U; 210} 211static inline u32 trim_sys_gpc2clk_out_vcodiv_v(u32 r) 212{ 213 return (r >> 8U) & 0x3fU; 214} 215static inline u32 trim_sys_gpc2clk_out_vcodiv_by1_f(void) 216{ 217 return 0x0U; 218} 219static inline u32 trim_sys_gpc2clk_out_sdiv14_m(void) 220{ 221 return 0x1U << 31U; 222} 223static inline u32 trim_sys_gpc2clk_out_sdiv14_indiv4_mode_f(void) 224{ 225 return 0x80000000U; 226} 227static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_r(u32 i) 228{ 229 return 0x00134124U + i*512U; 230} 231static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_noofipclks_f(u32 v) 232{ 233 return (v & 0x3fffU) << 0U; 234} 235static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_write_en_asserted_f(void) 236{ 237 return 0x10000U; 238} 239static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_enable_asserted_f(void) 240{ 241 return 0x100000U; 242} 243static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f(void) 244{ 245 return 0x1000000U; 246} 247static inline u32 trim_gpc_clk_cntr_ncgpcclk_cnt_r(u32 i) 248{ 249 return 0x00134128U + i*512U; 250} 251static inline u32 trim_gpc_clk_cntr_ncgpcclk_cnt_value_v(u32 r) 252{ 253 return (r >> 0U) & 0xfffffU; 254} 255static inline u32 trim_sys_gpcpll_cfg2_r(void) 256{ 257 return 0x0013700cU; 258} 259static inline u32 trim_sys_gpcpll_cfg2_pll_stepa_f(u32 v) 260{ 261 return (v & 0xffU) << 24U; 262} 263static inline u32 trim_sys_gpcpll_cfg2_pll_stepa_m(void) 264{ 265 return 0xffU << 24U; 266} 267static inline u32 trim_sys_gpcpll_cfg3_r(void) 268{ 269 return 0x00137018U; 270} 271static inline u32 trim_sys_gpcpll_cfg3_pll_stepb_f(u32 v) 272{ 273 return (v & 0xffU) << 16U; 274} 275static inline u32 trim_sys_gpcpll_cfg3_pll_stepb_m(void) 276{ 277 return 0xffU << 16U; 278} 279static inline u32 trim_sys_gpcpll_ndiv_slowdown_r(void) 280{ 281 return 0x0013701cU; 282} 283static inline u32 trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_m(void) 284{ 285 return 0x1U << 22U; 286} 287static inline u32 trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_yes_f(void) 288{ 289 return 0x400000U; 290} 291static inline u32 trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_no_f(void) 292{ 293 return 0x0U; 294} 295static inline u32 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(void) 296{ 297 return 0x1U << 31U; 298} 299static inline u32 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_yes_f(void) 300{ 301 return 0x80000000U; 302} 303static inline u32 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f(void) 304{ 305 return 0x0U; 306} 307static inline u32 trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r(void) 308{ 309 return 0x001328a0U; 310} 311static inline u32 trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(u32 r) 312{ 313 return (r >> 24U) & 0x1U; 314} 315#endif
diff --git a/include/nvgpu/hw/gm20b/hw_bus_gm20b.h b/include/nvgpu/hw/gm20b/hw_bus_gm20b.h
deleted file mode 100644
index 15cddae..0000000
--- a/include/nvgpu/hw/gm20b/hw_bus_gm20b.h
+++ /dev/null
@@ -1,223 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_bus_gm20b_h_ 57#define _hw_bus_gm20b_h_ 58 59static inline u32 bus_bar0_window_r(void) 60{ 61 return 0x00001700U; 62} 63static inline u32 bus_bar0_window_base_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 bus_bar0_window_target_vid_mem_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) 72{ 73 return 0x2000000U; 74} 75static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) 76{ 77 return 0x3000000U; 78} 79static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) 80{ 81 return 0x00000010U; 82} 83static inline u32 bus_bar1_block_r(void) 84{ 85 return 0x00001704U; 86} 87static inline u32 bus_bar1_block_ptr_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 0U; 90} 91static inline u32 bus_bar1_block_target_vid_mem_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) 96{ 97 return 0x20000000U; 98} 99static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) 100{ 101 return 0x30000000U; 102} 103static inline u32 bus_bar1_block_mode_virtual_f(void) 104{ 105 return 0x80000000U; 106} 107static inline u32 bus_bar2_block_r(void) 108{ 109 return 0x00001714U; 110} 111static inline u32 bus_bar2_block_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 bus_bar2_block_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 bus_bar2_block_mode_virtual_f(void) 128{ 129 return 0x80000000U; 130} 131static inline u32 bus_bar1_block_ptr_shift_v(void) 132{ 133 return 0x0000000cU; 134} 135static inline u32 bus_bar2_block_ptr_shift_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 bus_bind_status_r(void) 140{ 141 return 0x00001710U; 142} 143static inline u32 bus_bind_status_bar1_pending_v(u32 r) 144{ 145 return (r >> 0U) & 0x1U; 146} 147static inline u32 bus_bind_status_bar1_pending_empty_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 bus_bind_status_bar1_pending_busy_f(void) 152{ 153 return 0x1U; 154} 155static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) 156{ 157 return (r >> 1U) & 0x1U; 158} 159static inline u32 bus_bind_status_bar1_outstanding_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 bus_bind_status_bar1_outstanding_true_f(void) 164{ 165 return 0x2U; 166} 167static inline u32 bus_bind_status_bar2_pending_v(u32 r) 168{ 169 return (r >> 2U) & 0x1U; 170} 171static inline u32 bus_bind_status_bar2_pending_empty_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 bus_bind_status_bar2_pending_busy_f(void) 176{ 177 return 0x4U; 178} 179static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) 180{ 181 return (r >> 3U) & 0x1U; 182} 183static inline u32 bus_bind_status_bar2_outstanding_false_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 bus_bind_status_bar2_outstanding_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 bus_intr_0_r(void) 192{ 193 return 0x00001100U; 194} 195static inline u32 bus_intr_0_pri_squash_m(void) 196{ 197 return 0x1U << 1U; 198} 199static inline u32 bus_intr_0_pri_fecserr_m(void) 200{ 201 return 0x1U << 2U; 202} 203static inline u32 bus_intr_0_pri_timeout_m(void) 204{ 205 return 0x1U << 3U; 206} 207static inline u32 bus_intr_en_0_r(void) 208{ 209 return 0x00001140U; 210} 211static inline u32 bus_intr_en_0_pri_squash_m(void) 212{ 213 return 0x1U << 1U; 214} 215static inline u32 bus_intr_en_0_pri_fecserr_m(void) 216{ 217 return 0x1U << 2U; 218} 219static inline u32 bus_intr_en_0_pri_timeout_m(void) 220{ 221 return 0x1U << 3U; 222} 223#endif
diff --git a/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h b/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h
deleted file mode 100644
index adfce72..0000000
--- a/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h
+++ /dev/null
@@ -1,163 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ccsr_gm20b_h_ 57#define _hw_ccsr_gm20b_h_ 58 59static inline u32 ccsr_channel_inst_r(u32 i) 60{ 61 return 0x00800000U + i*8U; 62} 63static inline u32 ccsr_channel_inst__size_1_v(void) 64{ 65 return 0x00000200U; 66} 67static inline u32 ccsr_channel_inst_ptr_f(u32 v) 68{ 69 return (v & 0xfffffffU) << 0U; 70} 71static inline u32 ccsr_channel_inst_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) 76{ 77 return 0x20000000U; 78} 79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) 80{ 81 return 0x30000000U; 82} 83static inline u32 ccsr_channel_inst_bind_false_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 ccsr_channel_inst_bind_true_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 ccsr_channel_r(u32 i) 92{ 93 return 0x00800004U + i*8U; 94} 95static inline u32 ccsr_channel__size_1_v(void) 96{ 97 return 0x00000200U; 98} 99static inline u32 ccsr_channel_enable_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 ccsr_channel_enable_set_f(u32 v) 104{ 105 return (v & 0x1U) << 10U; 106} 107static inline u32 ccsr_channel_enable_set_true_f(void) 108{ 109 return 0x400U; 110} 111static inline u32 ccsr_channel_enable_clr_true_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ccsr_channel_status_v(u32 r) 116{ 117 return (r >> 24U) & 0xfU; 118} 119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) 120{ 121 return 0x00000002U; 122} 123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) 124{ 125 return 0x00000004U; 126} 127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) 128{ 129 return 0x0000000aU; 130} 131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) 132{ 133 return 0x0000000bU; 134} 135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) 140{ 141 return 0x0000000dU; 142} 143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) 144{ 145 return 0x0000000eU; 146} 147static inline u32 ccsr_channel_next_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 ccsr_channel_next_true_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 ccsr_channel_force_ctx_reload_true_f(void) 156{ 157 return 0x100U; 158} 159static inline u32 ccsr_channel_busy_v(u32 r) 160{ 161 return (r >> 28U) & 0x1U; 162} 163#endif
diff --git a/include/nvgpu/hw/gm20b/hw_ce2_gm20b.h b/include/nvgpu/hw/gm20b/hw_ce2_gm20b.h
deleted file mode 100644
index fb741a7..0000000
--- a/include/nvgpu/hw/gm20b/hw_ce2_gm20b.h
+++ /dev/null
@@ -1,87 +0,0 @@ 1/* 2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ce2_gm20b_h_ 57#define _hw_ce2_gm20b_h_ 58 59static inline u32 ce2_intr_status_r(void) 60{ 61 return 0x00106908U; 62} 63static inline u32 ce2_intr_status_blockpipe_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 ce2_intr_status_blockpipe_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 ce2_intr_status_nonblockpipe_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 ce2_intr_status_nonblockpipe_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 ce2_intr_status_launcherr_pending_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 ce2_intr_status_launcherr_reset_f(void) 84{ 85 return 0x4U; 86} 87#endif
diff --git a/include/nvgpu/hw/gm20b/hw_ctxsw_prog_gm20b.h b/include/nvgpu/hw/gm20b/hw_ctxsw_prog_gm20b.h
deleted file mode 100644
index 6b5632a..0000000
--- a/include/nvgpu/hw/gm20b/hw_ctxsw_prog_gm20b.h
+++ /dev/null
@@ -1,475 +0,0 @@ 1/* 2 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ctxsw_prog_gm20b_h_ 57#define _hw_ctxsw_prog_gm20b_h_ 58 59static inline u32 ctxsw_prog_fecs_header_v(void) 60{ 61 return 0x00000100U; 62} 63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ctxsw_prog_main_image_ctl_o(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 ctxsw_prog_main_image_ctl_cde_enabled_f(void) 72{ 73 return 0x400U; 74} 75static inline u32 ctxsw_prog_main_image_ctl_cde_disabled_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ctxsw_prog_main_image_patch_count_o(void) 80{ 81 return 0x00000010U; 82} 83static inline u32 ctxsw_prog_main_image_context_id_o(void) 84{ 85 return 0x000000f0U; 86} 87static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) 88{ 89 return 0x00000014U; 90} 91static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) 92{ 93 return 0x00000018U; 94} 95static inline u32 ctxsw_prog_main_image_zcull_o(void) 96{ 97 return 0x0000001cU; 98} 99static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) 104{ 105 return 0x00000002U; 106} 107static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) 108{ 109 return 0x00000020U; 110} 111static inline u32 ctxsw_prog_main_image_pm_o(void) 112{ 113 return 0x00000028U; 114} 115static inline u32 ctxsw_prog_main_image_pm_mode_m(void) 116{ 117 return 0x7U << 0U; 118} 119static inline u32 ctxsw_prog_main_image_pm_mode_ctxsw_f(void) 120{ 121 return 0x1U; 122} 123static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) 128{ 129 return 0x7U << 3U; 130} 131static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) 132{ 133 return 0x8U; 134} 135static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 ctxsw_prog_main_image_pm_pc_sampling_f(u32 v) 140{ 141 return (v & 0x1U) << 6U; 142} 143static inline u32 ctxsw_prog_main_image_pm_pc_sampling_m(void) 144{ 145 return 0x1U << 6U; 146} 147static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) 148{ 149 return 0x0000002cU; 150} 151static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) 152{ 153 return 0x000000f4U; 154} 155static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) 156{ 157 return 0x000000f8U; 158} 159static inline u32 ctxsw_prog_main_image_magic_value_o(void) 160{ 161 return 0x000000fcU; 162} 163static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) 164{ 165 return 0x600dc0deU; 166} 167static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) 168{ 169 return 0x0000000cU; 170} 171static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) 172{ 173 return (r >> 0U) & 0xffffU; 174} 175static inline u32 ctxsw_prog_local_image_ppc_info_o(void) 176{ 177 return 0x000000f4U; 178} 179static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) 180{ 181 return (r >> 0U) & 0xffffU; 182} 183static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) 184{ 185 return (r >> 16U) & 0xffffU; 186} 187static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) 188{ 189 return 0x000000f8U; 190} 191static inline u32 ctxsw_prog_local_magic_value_o(void) 192{ 193 return 0x000000fcU; 194} 195static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) 196{ 197 return 0xad0becabU; 198} 199static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) 200{ 201 return 0x000000ecU; 202} 203static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) 204{ 205 return (r >> 0U) & 0xffffU; 206} 207static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) 208{ 209 return (r >> 16U) & 0xffU; 210} 211static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) 212{ 213 return 0x00000100U; 214} 215static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) 216{ 217 return 0x00000004U; 218} 219static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) 220{ 221 return 0x00000000U; 222} 223static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) 224{ 225 return 0x00000002U; 226} 227static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) 228{ 229 return 0x000000a0U; 230} 231static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) 232{ 233 return 2U; 234} 235static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) 236{ 237 return (v & 0x3U) << 0U; 238} 239static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) 240{ 241 return 0x3U << 0U; 242} 243static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) 244{ 245 return (r >> 0U) & 0x3U; 246} 247static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) 248{ 249 return 0x0U; 250} 251static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) 252{ 253 return 0x2U; 254} 255static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) 256{ 257 return 0x000000a4U; 258} 259static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) 260{ 261 return 0x000000a8U; 262} 263static inline u32 ctxsw_prog_main_image_misc_options_o(void) 264{ 265 return 0x0000003cU; 266} 267static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) 268{ 269 return 0x1U << 3U; 270} 271static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) 272{ 273 return 0x0U; 274} 275static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_control_o(void) 276{ 277 return 0x000000acU; 278} 279static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f(u32 v) 280{ 281 return (v & 0xffffU) << 0U; 282} 283static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(void) 284{ 285 return 0x000000b0U; 286} 287static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_v_m(void) 288{ 289 return 0xfffffffU << 0U; 290} 291static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_m(void) 292{ 293 return 0x3U << 28U; 294} 295static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f(void) 296{ 297 return 0x0U; 298} 299static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_coherent_f(void) 300{ 301 return 0x20000000U; 302} 303static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(void) 304{ 305 return 0x30000000U; 306} 307static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(void) 308{ 309 return 0x000000b4U; 310} 311static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(u32 v) 312{ 313 return (v & 0xffffffffU) << 0U; 314} 315static inline u32 ctxsw_prog_record_timestamp_record_size_in_bytes_v(void) 316{ 317 return 0x00000080U; 318} 319static inline u32 ctxsw_prog_record_timestamp_record_size_in_words_v(void) 320{ 321 return 0x00000020U; 322} 323static inline u32 ctxsw_prog_record_timestamp_magic_value_lo_o(void) 324{ 325 return 0x00000000U; 326} 327static inline u32 ctxsw_prog_record_timestamp_magic_value_lo_v_value_v(void) 328{ 329 return 0x00000000U; 330} 331static inline u32 ctxsw_prog_record_timestamp_magic_value_hi_o(void) 332{ 333 return 0x00000004U; 334} 335static inline u32 ctxsw_prog_record_timestamp_magic_value_hi_v_value_v(void) 336{ 337 return 0x600dbeefU; 338} 339static inline u32 ctxsw_prog_record_timestamp_context_id_o(void) 340{ 341 return 0x00000008U; 342} 343static inline u32 ctxsw_prog_record_timestamp_context_ptr_o(void) 344{ 345 return 0x0000000cU; 346} 347static inline u32 ctxsw_prog_record_timestamp_new_context_id_o(void) 348{ 349 return 0x00000010U; 350} 351static inline u32 ctxsw_prog_record_timestamp_new_context_ptr_o(void) 352{ 353 return 0x00000014U; 354} 355static inline u32 ctxsw_prog_record_timestamp_timestamp_lo_o(void) 356{ 357 return 0x00000018U; 358} 359static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_o(void) 360{ 361 return 0x0000001cU; 362} 363static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_v_f(u32 v) 364{ 365 return (v & 0xffffffU) << 0U; 366} 367static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_v_v(u32 r) 368{ 369 return (r >> 0U) & 0xffffffU; 370} 371static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_f(u32 v) 372{ 373 return (v & 0xffU) << 24U; 374} 375static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_m(void) 376{ 377 return 0xffU << 24U; 378} 379static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_v(u32 r) 380{ 381 return (r >> 24U) & 0xffU; 382} 383static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_ctxsw_req_by_host_v(void) 384{ 385 return 0x00000001U; 386} 387static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_ctxsw_req_by_host_f(void) 388{ 389 return 0x1000000U; 390} 391static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_v(void) 392{ 393 return 0x00000002U; 394} 395static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_f(void) 396{ 397 return 0x2000000U; 398} 399static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_wfi_v(void) 400{ 401 return 0x0000000aU; 402} 403static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_wfi_f(void) 404{ 405 return 0xa000000U; 406} 407static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_gfxp_v(void) 408{ 409 return 0x0000000bU; 410} 411static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_gfxp_f(void) 412{ 413 return 0xb000000U; 414} 415static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_ctap_v(void) 416{ 417 return 0x0000000cU; 418} 419static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_ctap_f(void) 420{ 421 return 0xc000000U; 422} 423static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_cilp_v(void) 424{ 425 return 0x0000000dU; 426} 427static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_cilp_f(void) 428{ 429 return 0xd000000U; 430} 431static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_save_end_v(void) 432{ 433 return 0x00000003U; 434} 435static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_save_end_f(void) 436{ 437 return 0x3000000U; 438} 439static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_restore_start_v(void) 440{ 441 return 0x00000004U; 442} 443static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_restore_start_f(void) 444{ 445 return 0x4000000U; 446} 447static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_context_start_v(void) 448{ 449 return 0x00000005U; 450} 451static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_context_start_f(void) 452{ 453 return 0x5000000U; 454} 455static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_v(void) 456{ 457 return 0x000000ffU; 458} 459static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_f(void) 460{ 461 return 0xff000000U; 462} 463static inline u32 ctxsw_prog_main_image_preemption_options_o(void) 464{ 465 return 0x00000060U; 466} 467static inline u32 ctxsw_prog_main_image_preemption_options_control_f(u32 v) 468{ 469 return (v & 0x3U) << 0U; 470} 471static inline u32 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(void) 472{ 473 return 0x1U; 474} 475#endif
diff --git a/include/nvgpu/hw/gm20b/hw_falcon_gm20b.h b/include/nvgpu/hw/gm20b/hw_falcon_gm20b.h
deleted file mode 100644
index c598568..0000000
--- a/include/nvgpu/hw/gm20b/hw_falcon_gm20b.h
+++ /dev/null
@@ -1,599 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_falcon_gm20b_h_ 57#define _hw_falcon_gm20b_h_ 58 59static inline u32 falcon_falcon_irqsset_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 falcon_falcon_irqsclr_r(void) 68{ 69 return 0x00000004U; 70} 71static inline u32 falcon_falcon_irqstat_r(void) 72{ 73 return 0x00000008U; 74} 75static inline u32 falcon_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 falcon_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 falcon_falcon_irqmode_r(void) 88{ 89 return 0x0000000cU; 90} 91static inline u32 falcon_falcon_irqmset_r(void) 92{ 93 return 0x00000010U; 94} 95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 falcon_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 falcon_falcon_irqmclr_r(void) 128{ 129 return 0x00000014U; 130} 131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 falcon_falcon_irqmask_r(void) 168{ 169 return 0x00000018U; 170} 171static inline u32 falcon_falcon_irqdest_r(void) 172{ 173 return 0x0000001cU; 174} 175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 falcon_falcon_curctx_r(void) 248{ 249 return 0x00000050U; 250} 251static inline u32 falcon_falcon_nxtctx_r(void) 252{ 253 return 0x00000054U; 254} 255static inline u32 falcon_falcon_mailbox0_r(void) 256{ 257 return 0x00000040U; 258} 259static inline u32 falcon_falcon_mailbox1_r(void) 260{ 261 return 0x00000044U; 262} 263static inline u32 falcon_falcon_itfen_r(void) 264{ 265 return 0x00000048U; 266} 267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 falcon_falcon_idlestate_r(void) 272{ 273 return 0x0000004cU; 274} 275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 falcon_falcon_os_r(void) 284{ 285 return 0x00000080U; 286} 287static inline u32 falcon_falcon_engctl_r(void) 288{ 289 return 0x000000a4U; 290} 291static inline u32 falcon_falcon_cpuctl_r(void) 292{ 293 return 0x00000100U; 294} 295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) 300{ 301 return (v & 0x1U) << 2U; 302} 303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) 304{ 305 return (v & 0x1U) << 3U; 306} 307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) 308{ 309 return (v & 0x1U) << 4U; 310} 311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) 312{ 313 return 0x1U << 4U; 314} 315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) 316{ 317 return (r >> 4U) & 0x1U; 318} 319static inline u32 falcon_falcon_cpuctl_stopped_m(void) 320{ 321 return 0x1U << 5U; 322} 323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 324{ 325 return (v & 0x1U) << 6U; 326} 327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) 328{ 329 return 0x1U << 6U; 330} 331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 332{ 333 return (r >> 6U) & 0x1U; 334} 335static inline u32 falcon_falcon_cpuctl_alias_r(void) 336{ 337 return 0x00000130U; 338} 339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) 340{ 341 return (v & 0x1U) << 1U; 342} 343static inline u32 falcon_falcon_imemc_r(u32 i) 344{ 345 return 0x00000180U + i*16U; 346} 347static inline u32 falcon_falcon_imemc_offs_f(u32 v) 348{ 349 return (v & 0x3fU) << 2U; 350} 351static inline u32 falcon_falcon_imemc_blk_f(u32 v) 352{ 353 return (v & 0xffU) << 8U; 354} 355static inline u32 falcon_falcon_imemc_aincw_f(u32 v) 356{ 357 return (v & 0x1U) << 24U; 358} 359static inline u32 falcon_falcon_imemc_secure_f(u32 v) 360{ 361 return (v & 0x1U) << 28U; 362} 363static inline u32 falcon_falcon_imemd_r(u32 i) 364{ 365 return 0x00000184U + i*16U; 366} 367static inline u32 falcon_falcon_imemt_r(u32 i) 368{ 369 return 0x00000188U + i*16U; 370} 371static inline u32 falcon_falcon_sctl_r(void) 372{ 373 return 0x00000240U; 374} 375static inline u32 falcon_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 falcon_falcon_bootvec_r(void) 380{ 381 return 0x00000104U; 382} 383static inline u32 falcon_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 falcon_falcon_dmactl_r(void) 388{ 389 return 0x0000010cU; 390} 391static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) 400{ 401 return (v & 0x1U) << 0U; 402} 403static inline u32 falcon_falcon_hwcfg_r(void) 404{ 405 return 0x00000108U; 406} 407static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) 408{ 409 return (r >> 0U) & 0x1ffU; 410} 411static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) 412{ 413 return (r >> 9U) & 0x1ffU; 414} 415static inline u32 falcon_falcon_dmatrfbase_r(void) 416{ 417 return 0x00000110U; 418} 419static inline u32 falcon_falcon_dmatrfmoffs_r(void) 420{ 421 return 0x00000114U; 422} 423static inline u32 falcon_falcon_dmatrfcmd_r(void) 424{ 425 return 0x00000118U; 426} 427static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) 428{ 429 return (v & 0x1U) << 4U; 430} 431static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) 432{ 433 return (v & 0x1U) << 5U; 434} 435static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) 436{ 437 return (v & 0x7U) << 8U; 438} 439static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) 440{ 441 return (v & 0x7U) << 12U; 442} 443static inline u32 falcon_falcon_dmatrffboffs_r(void) 444{ 445 return 0x0000011cU; 446} 447static inline u32 falcon_falcon_imctl_debug_r(void) 448{ 449 return 0x0000015cU; 450} 451static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) 452{ 453 return (v & 0xffffffU) << 0U; 454} 455static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) 456{ 457 return (v & 0x7U) << 24U; 458} 459static inline u32 falcon_falcon_imstat_r(void) 460{ 461 return 0x00000144U; 462} 463static inline u32 falcon_falcon_traceidx_r(void) 464{ 465 return 0x00000148U; 466} 467static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) 468{ 469 return (r >> 16U) & 0xffU; 470} 471static inline u32 falcon_falcon_traceidx_idx_f(u32 v) 472{ 473 return (v & 0xffU) << 0U; 474} 475static inline u32 falcon_falcon_tracepc_r(void) 476{ 477 return 0x0000014cU; 478} 479static inline u32 falcon_falcon_tracepc_pc_v(u32 r) 480{ 481 return (r >> 0U) & 0xffffffU; 482} 483static inline u32 falcon_falcon_exterraddr_r(void) 484{ 485 return 0x00000168U; 486} 487static inline u32 falcon_falcon_exterrstat_r(void) 488{ 489 return 0x0000016cU; 490} 491static inline u32 falcon_falcon_exterrstat_valid_m(void) 492{ 493 return 0x1U << 31U; 494} 495static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) 496{ 497 return (r >> 31U) & 0x1U; 498} 499static inline u32 falcon_falcon_exterrstat_valid_true_v(void) 500{ 501 return 0x00000001U; 502} 503static inline u32 falcon_falcon_icd_cmd_r(void) 504{ 505 return 0x00000200U; 506} 507static inline u32 falcon_falcon_icd_cmd_opc_s(void) 508{ 509 return 4U; 510} 511static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) 512{ 513 return (v & 0xfU) << 0U; 514} 515static inline u32 falcon_falcon_icd_cmd_opc_m(void) 516{ 517 return 0xfU << 0U; 518} 519static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) 520{ 521 return (r >> 0U) & 0xfU; 522} 523static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) 524{ 525 return 0x8U; 526} 527static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) 528{ 529 return 0xeU; 530} 531static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) 532{ 533 return (v & 0x1fU) << 8U; 534} 535static inline u32 falcon_falcon_icd_rdata_r(void) 536{ 537 return 0x0000020cU; 538} 539static inline u32 falcon_falcon_dmemc_r(u32 i) 540{ 541 return 0x000001c0U + i*8U; 542} 543static inline u32 falcon_falcon_dmemc_offs_f(u32 v) 544{ 545 return (v & 0x3fU) << 2U; 546} 547static inline u32 falcon_falcon_dmemc_offs_m(void) 548{ 549 return 0x3fU << 2U; 550} 551static inline u32 falcon_falcon_dmemc_blk_f(u32 v) 552{ 553 return (v & 0xffU) << 8U; 554} 555static inline u32 falcon_falcon_dmemc_blk_m(void) 556{ 557 return 0xffU << 8U; 558} 559static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) 560{ 561 return (v & 0x1U) << 24U; 562} 563static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) 564{ 565 return (v & 0x1U) << 25U; 566} 567static inline u32 falcon_falcon_dmemd_r(u32 i) 568{ 569 return 0x000001c4U + i*8U; 570} 571static inline u32 falcon_falcon_debug1_r(void) 572{ 573 return 0x00000090U; 574} 575static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) 576{ 577 return 1U; 578} 579static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) 580{ 581 return (v & 0x1U) << 16U; 582} 583static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) 584{ 585 return 0x1U << 16U; 586} 587static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) 588{ 589 return (r >> 16U) & 0x1U; 590} 591static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) 592{ 593 return 0x0U; 594} 595static inline u32 falcon_falcon_debuginfo_r(void) 596{ 597 return 0x00000094U; 598} 599#endif
diff --git a/include/nvgpu/hw/gm20b/hw_fb_gm20b.h b/include/nvgpu/hw/gm20b/hw_fb_gm20b.h
deleted file mode 100644
index e6464c1..0000000
--- a/include/nvgpu/hw/gm20b/hw_fb_gm20b.h
+++ /dev/null
@@ -1,339 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fb_gm20b_h_ 57#define _hw_fb_gm20b_h_ 58 59static inline u32 fb_fbhub_num_active_ltcs_r(void) 60{ 61 return 0x00100800U; 62} 63static inline u32 fb_mmu_ctrl_r(void) 64{ 65 return 0x00100c80U; 66} 67static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) 68{ 69 return (r >> 15U) & 0x1U; 70} 71static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) 76{ 77 return (r >> 16U) & 0xffU; 78} 79static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_v(u32 r) 80{ 81 return (r >> 11U) & 0x1U; 82} 83static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_true_f(void) 84{ 85 return 0x800U; 86} 87static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_false_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fb_mmu_ctrl_use_full_comp_tag_line_v(u32 r) 92{ 93 return (r >> 12U) & 0x1U; 94} 95static inline u32 fb_mmu_ctrl_use_full_comp_tag_line_true_f(void) 96{ 97 return 0x1000U; 98} 99static inline u32 fb_priv_mmu_phy_secure_r(void) 100{ 101 return 0x00100ce4U; 102} 103static inline u32 fb_mmu_invalidate_pdb_r(void) 104{ 105 return 0x00100cb8U; 106} 107static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) 112{ 113 return 0x2U; 114} 115static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) 116{ 117 return (v & 0xfffffffU) << 4U; 118} 119static inline u32 fb_mmu_invalidate_r(void) 120{ 121 return 0x00100cbcU; 122} 123static inline u32 fb_mmu_invalidate_all_va_true_f(void) 124{ 125 return 0x1U; 126} 127static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) 128{ 129 return 0x2U; 130} 131static inline u32 fb_mmu_invalidate_trigger_s(void) 132{ 133 return 1U; 134} 135static inline u32 fb_mmu_invalidate_trigger_f(u32 v) 136{ 137 return (v & 0x1U) << 31U; 138} 139static inline u32 fb_mmu_invalidate_trigger_m(void) 140{ 141 return 0x1U << 31U; 142} 143static inline u32 fb_mmu_invalidate_trigger_v(u32 r) 144{ 145 return (r >> 31U) & 0x1U; 146} 147static inline u32 fb_mmu_invalidate_trigger_true_f(void) 148{ 149 return 0x80000000U; 150} 151static inline u32 fb_mmu_debug_wr_r(void) 152{ 153 return 0x00100cc8U; 154} 155static inline u32 fb_mmu_debug_wr_aperture_s(void) 156{ 157 return 2U; 158} 159static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) 160{ 161 return (v & 0x3U) << 0U; 162} 163static inline u32 fb_mmu_debug_wr_aperture_m(void) 164{ 165 return 0x3U << 0U; 166} 167static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) 168{ 169 return (r >> 0U) & 0x3U; 170} 171static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) 176{ 177 return 0x2U; 178} 179static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) 180{ 181 return 0x3U; 182} 183static inline u32 fb_mmu_debug_wr_vol_false_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 fb_mmu_debug_wr_vol_true_v(void) 188{ 189 return 0x00000001U; 190} 191static inline u32 fb_mmu_debug_wr_vol_true_f(void) 192{ 193 return 0x4U; 194} 195static inline u32 fb_mmu_debug_wr_addr_f(u32 v) 196{ 197 return (v & 0xfffffffU) << 4U; 198} 199static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) 200{ 201 return 0x0000000cU; 202} 203static inline u32 fb_mmu_debug_rd_r(void) 204{ 205 return 0x00100cccU; 206} 207static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) 212{ 213 return 0x2U; 214} 215static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) 216{ 217 return 0x3U; 218} 219static inline u32 fb_mmu_debug_rd_vol_false_f(void) 220{ 221 return 0x0U; 222} 223static inline u32 fb_mmu_debug_rd_addr_f(u32 v) 224{ 225 return (v & 0xfffffffU) << 4U; 226} 227static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) 228{ 229 return 0x0000000cU; 230} 231static inline u32 fb_mmu_debug_ctrl_r(void) 232{ 233 return 0x00100cc4U; 234} 235static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) 236{ 237 return (r >> 16U) & 0x1U; 238} 239static inline u32 fb_mmu_debug_ctrl_debug_m(void) 240{ 241 return 0x1U << 16U; 242} 243static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) 244{ 245 return 0x00000001U; 246} 247static inline u32 fb_mmu_debug_ctrl_debug_enabled_f(void) 248{ 249 return 0x10000U; 250} 251static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) 252{ 253 return 0x00000000U; 254} 255static inline u32 fb_mmu_debug_ctrl_debug_disabled_f(void) 256{ 257 return 0x0U; 258} 259static inline u32 fb_mmu_vpr_info_r(void) 260{ 261 return 0x00100cd0U; 262} 263static inline u32 fb_mmu_vpr_info_index_f(u32 v) 264{ 265 return (v & 0x3U) << 0U; 266} 267static inline u32 fb_mmu_vpr_info_index_v(u32 r) 268{ 269 return (r >> 0U) & 0x3U; 270} 271static inline u32 fb_mmu_vpr_info_index_addr_lo_v(void) 272{ 273 return 0x00000000U; 274} 275static inline u32 fb_mmu_vpr_info_index_addr_hi_v(void) 276{ 277 return 0x00000001U; 278} 279static inline u32 fb_mmu_vpr_info_index_cya_lo_v(void) 280{ 281 return 0x00000002U; 282} 283static inline u32 fb_mmu_vpr_info_index_cya_hi_v(void) 284{ 285 return 0x00000003U; 286} 287static inline u32 fb_mmu_vpr_info_fetch_f(u32 v) 288{ 289 return (v & 0x1U) << 2U; 290} 291static inline u32 fb_mmu_vpr_info_fetch_v(u32 r) 292{ 293 return (r >> 2U) & 0x1U; 294} 295static inline u32 fb_mmu_vpr_info_fetch_false_v(void) 296{ 297 return 0x00000000U; 298} 299static inline u32 fb_mmu_vpr_info_fetch_true_v(void) 300{ 301 return 0x00000001U; 302} 303static inline u32 fb_mmu_wpr_info_r(void) 304{ 305 return 0x00100cd4U; 306} 307static inline u32 fb_mmu_wpr_info_index_f(u32 v) 308{ 309 return (v & 0xfU) << 0U; 310} 311static inline u32 fb_mmu_wpr_info_index_allow_read_v(void) 312{ 313 return 0x00000000U; 314} 315static inline u32 fb_mmu_wpr_info_index_allow_write_v(void) 316{ 317 return 0x00000001U; 318} 319static inline u32 fb_mmu_wpr_info_index_wpr1_addr_lo_v(void) 320{ 321 return 0x00000002U; 322} 323static inline u32 fb_mmu_wpr_info_index_wpr1_addr_hi_v(void) 324{ 325 return 0x00000003U; 326} 327static inline u32 fb_mmu_wpr_info_index_wpr2_addr_lo_v(void) 328{ 329 return 0x00000004U; 330} 331static inline u32 fb_mmu_wpr_info_index_wpr2_addr_hi_v(void) 332{ 333 return 0x00000005U; 334} 335static inline u32 fb_niso_flush_sysmem_addr_r(void) 336{ 337 return 0x00100c10U; 338} 339#endif
diff --git a/include/nvgpu/hw/gm20b/hw_fifo_gm20b.h b/include/nvgpu/hw/gm20b/hw_fifo_gm20b.h
deleted file mode 100644
index d32506d..0000000
--- a/include/nvgpu/hw/gm20b/hw_fifo_gm20b.h
+++ /dev/null
@@ -1,571 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fifo_gm20b_h_ 57#define _hw_fifo_gm20b_h_ 58 59static inline u32 fifo_bar1_base_r(void) 60{ 61 return 0x00002254U; 62} 63static inline u32 fifo_bar1_base_ptr_f(u32 v) 64{ 65 return (v & 0xfffffffU) << 0U; 66} 67static inline u32 fifo_bar1_base_ptr_align_shift_v(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 fifo_bar1_base_valid_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fifo_bar1_base_valid_true_f(void) 76{ 77 return 0x10000000U; 78} 79static inline u32 fifo_runlist_base_r(void) 80{ 81 return 0x00002270U; 82} 83static inline u32 fifo_runlist_base_ptr_f(u32 v) 84{ 85 return (v & 0xfffffffU) << 0U; 86} 87static inline u32 fifo_runlist_base_target_vid_mem_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) 92{ 93 return 0x20000000U; 94} 95static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) 96{ 97 return 0x30000000U; 98} 99static inline u32 fifo_runlist_r(void) 100{ 101 return 0x00002274U; 102} 103static inline u32 fifo_runlist_engine_f(u32 v) 104{ 105 return (v & 0xfU) << 20U; 106} 107static inline u32 fifo_eng_runlist_base_r(u32 i) 108{ 109 return 0x00002280U + i*8U; 110} 111static inline u32 fifo_eng_runlist_base__size_1_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 fifo_eng_runlist_r(u32 i) 116{ 117 return 0x00002284U + i*8U; 118} 119static inline u32 fifo_eng_runlist__size_1_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 fifo_eng_runlist_length_f(u32 v) 124{ 125 return (v & 0xffffU) << 0U; 126} 127static inline u32 fifo_eng_runlist_length_max_v(void) 128{ 129 return 0x0000ffffU; 130} 131static inline u32 fifo_eng_runlist_pending_true_f(void) 132{ 133 return 0x100000U; 134} 135static inline u32 fifo_pb_timeslice_r(u32 i) 136{ 137 return 0x00002350U + i*4U; 138} 139static inline u32 fifo_pb_timeslice_timeout_16_f(void) 140{ 141 return 0x10U; 142} 143static inline u32 fifo_pb_timeslice_timescale_0_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 fifo_pb_timeslice_enable_true_f(void) 148{ 149 return 0x10000000U; 150} 151static inline u32 fifo_pbdma_map_r(u32 i) 152{ 153 return 0x00002390U + i*4U; 154} 155static inline u32 fifo_intr_0_r(void) 156{ 157 return 0x00002100U; 158} 159static inline u32 fifo_intr_0_bind_error_pending_f(void) 160{ 161 return 0x1U; 162} 163static inline u32 fifo_intr_0_bind_error_reset_f(void) 164{ 165 return 0x1U; 166} 167static inline u32 fifo_intr_0_sched_error_pending_f(void) 168{ 169 return 0x100U; 170} 171static inline u32 fifo_intr_0_sched_error_reset_f(void) 172{ 173 return 0x100U; 174} 175static inline u32 fifo_intr_0_chsw_error_pending_f(void) 176{ 177 return 0x10000U; 178} 179static inline u32 fifo_intr_0_chsw_error_reset_f(void) 180{ 181 return 0x10000U; 182} 183static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void) 184{ 185 return 0x800000U; 186} 187static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void) 188{ 189 return 0x800000U; 190} 191static inline u32 fifo_intr_0_lb_error_pending_f(void) 192{ 193 return 0x1000000U; 194} 195static inline u32 fifo_intr_0_lb_error_reset_f(void) 196{ 197 return 0x1000000U; 198} 199static inline u32 fifo_intr_0_dropped_mmu_fault_pending_f(void) 200{ 201 return 0x8000000U; 202} 203static inline u32 fifo_intr_0_dropped_mmu_fault_reset_f(void) 204{ 205 return 0x8000000U; 206} 207static inline u32 fifo_intr_0_mmu_fault_pending_f(void) 208{ 209 return 0x10000000U; 210} 211static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) 212{ 213 return 0x20000000U; 214} 215static inline u32 fifo_intr_0_runlist_event_pending_f(void) 216{ 217 return 0x40000000U; 218} 219static inline u32 fifo_intr_0_channel_intr_pending_f(void) 220{ 221 return 0x80000000U; 222} 223static inline u32 fifo_intr_en_0_r(void) 224{ 225 return 0x00002140U; 226} 227static inline u32 fifo_intr_en_0_sched_error_f(u32 v) 228{ 229 return (v & 0x1U) << 8U; 230} 231static inline u32 fifo_intr_en_0_sched_error_m(void) 232{ 233 return 0x1U << 8U; 234} 235static inline u32 fifo_intr_en_0_mmu_fault_f(u32 v) 236{ 237 return (v & 0x1U) << 28U; 238} 239static inline u32 fifo_intr_en_0_mmu_fault_m(void) 240{ 241 return 0x1U << 28U; 242} 243static inline u32 fifo_intr_en_1_r(void) 244{ 245 return 0x00002528U; 246} 247static inline u32 fifo_intr_bind_error_r(void) 248{ 249 return 0x0000252cU; 250} 251static inline u32 fifo_intr_sched_error_r(void) 252{ 253 return 0x0000254cU; 254} 255static inline u32 fifo_intr_sched_error_code_f(u32 v) 256{ 257 return (v & 0xffU) << 0U; 258} 259static inline u32 fifo_intr_sched_error_code_ctxsw_timeout_v(void) 260{ 261 return 0x0000000aU; 262} 263static inline u32 fifo_intr_chsw_error_r(void) 264{ 265 return 0x0000256cU; 266} 267static inline u32 fifo_intr_mmu_fault_id_r(void) 268{ 269 return 0x0000259cU; 270} 271static inline u32 fifo_intr_mmu_fault_eng_id_graphics_v(void) 272{ 273 return 0x00000000U; 274} 275static inline u32 fifo_intr_mmu_fault_eng_id_graphics_f(void) 276{ 277 return 0x0U; 278} 279static inline u32 fifo_intr_mmu_fault_inst_r(u32 i) 280{ 281 return 0x00002800U + i*16U; 282} 283static inline u32 fifo_intr_mmu_fault_inst_ptr_v(u32 r) 284{ 285 return (r >> 0U) & 0xfffffffU; 286} 287static inline u32 fifo_intr_mmu_fault_inst_ptr_align_shift_v(void) 288{ 289 return 0x0000000cU; 290} 291static inline u32 fifo_intr_mmu_fault_lo_r(u32 i) 292{ 293 return 0x00002804U + i*16U; 294} 295static inline u32 fifo_intr_mmu_fault_hi_r(u32 i) 296{ 297 return 0x00002808U + i*16U; 298} 299static inline u32 fifo_intr_mmu_fault_info_r(u32 i) 300{ 301 return 0x0000280cU + i*16U; 302} 303static inline u32 fifo_intr_mmu_fault_info_type_v(u32 r) 304{ 305 return (r >> 0U) & 0xfU; 306} 307static inline u32 fifo_intr_mmu_fault_info_write_v(u32 r) 308{ 309 return (r >> 7U) & 0x1U; 310} 311static inline u32 fifo_intr_mmu_fault_info_engine_subid_v(u32 r) 312{ 313 return (r >> 6U) & 0x1U; 314} 315static inline u32 fifo_intr_mmu_fault_info_engine_subid_gpc_v(void) 316{ 317 return 0x00000000U; 318} 319static inline u32 fifo_intr_mmu_fault_info_engine_subid_hub_v(void) 320{ 321 return 0x00000001U; 322} 323static inline u32 fifo_intr_mmu_fault_info_client_v(u32 r) 324{ 325 return (r >> 8U) & 0x3fU; 326} 327static inline u32 fifo_intr_pbdma_id_r(void) 328{ 329 return 0x000025a0U; 330} 331static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) 332{ 333 return (v & 0x1U) << (0U + i*1U); 334} 335static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) 336{ 337 return (r >> (0U + i*1U)) & 0x1U; 338} 339static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) 340{ 341 return 0x00000001U; 342} 343static inline u32 fifo_intr_runlist_r(void) 344{ 345 return 0x00002a00U; 346} 347static inline u32 fifo_fb_timeout_r(void) 348{ 349 return 0x00002a04U; 350} 351static inline u32 fifo_fb_timeout_period_m(void) 352{ 353 return 0x3fffffffU << 0U; 354} 355static inline u32 fifo_fb_timeout_period_max_f(void) 356{ 357 return 0x3fffffffU; 358} 359static inline u32 fifo_error_sched_disable_r(void) 360{ 361 return 0x0000262cU; 362} 363static inline u32 fifo_sched_disable_r(void) 364{ 365 return 0x00002630U; 366} 367static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) 368{ 369 return (v & 0x1U) << (0U + i*1U); 370} 371static inline u32 fifo_sched_disable_runlist_m(u32 i) 372{ 373 return 0x1U << (0U + i*1U); 374} 375static inline u32 fifo_sched_disable_true_v(void) 376{ 377 return 0x00000001U; 378} 379static inline u32 fifo_preempt_r(void) 380{ 381 return 0x00002634U; 382} 383static inline u32 fifo_preempt_pending_true_f(void) 384{ 385 return 0x100000U; 386} 387static inline u32 fifo_preempt_type_channel_f(void) 388{ 389 return 0x0U; 390} 391static inline u32 fifo_preempt_type_tsg_f(void) 392{ 393 return 0x1000000U; 394} 395static inline u32 fifo_preempt_chid_f(u32 v) 396{ 397 return (v & 0xfffU) << 0U; 398} 399static inline u32 fifo_preempt_id_f(u32 v) 400{ 401 return (v & 0xfffU) << 0U; 402} 403static inline u32 fifo_trigger_mmu_fault_r(u32 i) 404{ 405 return 0x00002a30U + i*4U; 406} 407static inline u32 fifo_trigger_mmu_fault_id_f(u32 v) 408{ 409 return (v & 0x1fU) << 0U; 410} 411static inline u32 fifo_trigger_mmu_fault_enable_f(u32 v) 412{ 413 return (v & 0x1U) << 8U; 414} 415static inline u32 fifo_engine_status_r(u32 i) 416{ 417 return 0x00002640U + i*8U; 418} 419static inline u32 fifo_engine_status__size_1_v(void) 420{ 421 return 0x00000002U; 422} 423static inline u32 fifo_engine_status_id_v(u32 r) 424{ 425 return (r >> 0U) & 0xfffU; 426} 427static inline u32 fifo_engine_status_id_type_v(u32 r) 428{ 429 return (r >> 12U) & 0x1U; 430} 431static inline u32 fifo_engine_status_id_type_chid_v(void) 432{ 433 return 0x00000000U; 434} 435static inline u32 fifo_engine_status_id_type_tsgid_v(void) 436{ 437 return 0x00000001U; 438} 439static inline u32 fifo_engine_status_ctx_status_v(u32 r) 440{ 441 return (r >> 13U) & 0x7U; 442} 443static inline u32 fifo_engine_status_ctx_status_invalid_v(void) 444{ 445 return 0x00000000U; 446} 447static inline u32 fifo_engine_status_ctx_status_valid_v(void) 448{ 449 return 0x00000001U; 450} 451static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) 452{ 453 return 0x00000005U; 454} 455static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) 456{ 457 return 0x00000006U; 458} 459static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) 460{ 461 return 0x00000007U; 462} 463static inline u32 fifo_engine_status_next_id_v(u32 r) 464{ 465 return (r >> 16U) & 0xfffU; 466} 467static inline u32 fifo_engine_status_next_id_type_v(u32 r) 468{ 469 return (r >> 28U) & 0x1U; 470} 471static inline u32 fifo_engine_status_next_id_type_chid_v(void) 472{ 473 return 0x00000000U; 474} 475static inline u32 fifo_engine_status_faulted_v(u32 r) 476{ 477 return (r >> 30U) & 0x1U; 478} 479static inline u32 fifo_engine_status_faulted_true_v(void) 480{ 481 return 0x00000001U; 482} 483static inline u32 fifo_engine_status_engine_v(u32 r) 484{ 485 return (r >> 31U) & 0x1U; 486} 487static inline u32 fifo_engine_status_engine_idle_v(void) 488{ 489 return 0x00000000U; 490} 491static inline u32 fifo_engine_status_engine_busy_v(void) 492{ 493 return 0x00000001U; 494} 495static inline u32 fifo_engine_status_ctxsw_v(u32 r) 496{ 497 return (r >> 15U) & 0x1U; 498} 499static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) 500{ 501 return 0x00000001U; 502} 503static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) 504{ 505 return 0x8000U; 506} 507static inline u32 fifo_pbdma_status_r(u32 i) 508{ 509 return 0x00003080U + i*4U; 510} 511static inline u32 fifo_pbdma_status__size_1_v(void) 512{ 513 return 0x00000001U; 514} 515static inline u32 fifo_pbdma_status_id_v(u32 r) 516{ 517 return (r >> 0U) & 0xfffU; 518} 519static inline u32 fifo_pbdma_status_id_type_v(u32 r) 520{ 521 return (r >> 12U) & 0x1U; 522} 523static inline u32 fifo_pbdma_status_id_type_chid_v(void) 524{ 525 return 0x00000000U; 526} 527static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) 528{ 529 return 0x00000001U; 530} 531static inline u32 fifo_pbdma_status_chan_status_v(u32 r) 532{ 533 return (r >> 13U) & 0x7U; 534} 535static inline u32 fifo_pbdma_status_chan_status_valid_v(void) 536{ 537 return 0x00000001U; 538} 539static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) 540{ 541 return 0x00000005U; 542} 543static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) 544{ 545 return 0x00000006U; 546} 547static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) 548{ 549 return 0x00000007U; 550} 551static inline u32 fifo_pbdma_status_next_id_v(u32 r) 552{ 553 return (r >> 16U) & 0xfffU; 554} 555static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) 556{ 557 return (r >> 28U) & 0x1U; 558} 559static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) 560{ 561 return 0x00000000U; 562} 563static inline u32 fifo_pbdma_status_chsw_v(u32 r) 564{ 565 return (r >> 15U) & 0x1U; 566} 567static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) 568{ 569 return 0x00000001U; 570} 571#endif
diff --git a/include/nvgpu/hw/gm20b/hw_flush_gm20b.h b/include/nvgpu/hw/gm20b/hw_flush_gm20b.h
deleted file mode 100644
index 3b5801b..0000000
--- a/include/nvgpu/hw/gm20b/hw_flush_gm20b.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_flush_gm20b_h_ 57#define _hw_flush_gm20b_h_ 58 59static inline u32 flush_l2_system_invalidate_r(void) 60{ 61 return 0x00070004U; 62} 63static inline u32 flush_l2_system_invalidate_pending_v(u32 r) 64{ 65 return (r >> 0U) & 0x1U; 66} 67static inline u32 flush_l2_system_invalidate_pending_busy_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 flush_l2_system_invalidate_pending_busy_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) 76{ 77 return (r >> 1U) & 0x1U; 78} 79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 flush_l2_flush_dirty_r(void) 84{ 85 return 0x00070010U; 86} 87static inline u32 flush_l2_flush_dirty_pending_v(u32 r) 88{ 89 return (r >> 0U) & 0x1U; 90} 91static inline u32 flush_l2_flush_dirty_pending_empty_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 flush_l2_flush_dirty_pending_empty_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 flush_l2_flush_dirty_pending_busy_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 flush_l2_flush_dirty_pending_busy_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 flush_l2_clean_comptags_r(void) 124{ 125 return 0x0007000cU; 126} 127static inline u32 flush_l2_clean_comptags_pending_v(u32 r) 128{ 129 return (r >> 0U) & 0x1U; 130} 131static inline u32 flush_l2_clean_comptags_pending_empty_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 flush_l2_clean_comptags_pending_empty_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 flush_l2_clean_comptags_pending_busy_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 flush_l2_clean_comptags_pending_busy_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 flush_fb_flush_r(void) 164{ 165 return 0x00070000U; 166} 167static inline u32 flush_fb_flush_pending_v(u32 r) 168{ 169 return (r >> 0U) & 0x1U; 170} 171static inline u32 flush_fb_flush_pending_busy_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 flush_fb_flush_pending_busy_f(void) 176{ 177 return 0x1U; 178} 179static inline u32 flush_fb_flush_outstanding_v(u32 r) 180{ 181 return (r >> 1U) & 0x1U; 182} 183static inline u32 flush_fb_flush_outstanding_true_v(void) 184{ 185 return 0x00000001U; 186} 187#endif
diff --git a/include/nvgpu/hw/gm20b/hw_fuse_gm20b.h b/include/nvgpu/hw/gm20b/hw_fuse_gm20b.h
deleted file mode 100644
index d97eb7d..0000000
--- a/include/nvgpu/hw/gm20b/hw_fuse_gm20b.h
+++ /dev/null
@@ -1,147 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fuse_gm20b_h_ 57#define _hw_fuse_gm20b_h_ 58 59static inline u32 fuse_status_opt_gpc_r(void) 60{ 61 return 0x00021c1cU; 62} 63static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) 64{ 65 return 0x00021c38U + i*4U; 66} 67static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) 68{ 69 return 0x00021838U + i*4U; 70} 71static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) 72{ 73 return 0x00021944U; 74} 75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) 76{ 77 return (v & 0x3U) << 0U; 78} 79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) 80{ 81 return 0x3U << 0U; 82} 83static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) 84{ 85 return (r >> 0U) & 0x3U; 86} 87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) 88{ 89 return 0x00021948U; 90} 91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) 92{ 93 return (v & 0x1U) << 0U; 94} 95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) 96{ 97 return 0x1U << 0U; 98} 99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 fuse_status_opt_fbio_r(void) 112{ 113 return 0x00021c14U; 114} 115static inline u32 fuse_status_opt_fbio_data_f(u32 v) 116{ 117 return (v & 0xffffU) << 0U; 118} 119static inline u32 fuse_status_opt_fbio_data_m(void) 120{ 121 return 0xffffU << 0U; 122} 123static inline u32 fuse_status_opt_fbio_data_v(u32 r) 124{ 125 return (r >> 0U) & 0xffffU; 126} 127static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) 128{ 129 return 0x00021d70U + i*4U; 130} 131static inline u32 fuse_status_opt_fbp_r(void) 132{ 133 return 0x00021d38U; 134} 135static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) 136{ 137 return (r >> (0U + i*1U)) & 0x1U; 138} 139static inline u32 fuse_opt_sec_debug_en_r(void) 140{ 141 return 0x00021218U; 142} 143static inline u32 fuse_opt_priv_sec_en_r(void) 144{ 145 return 0x00021434U; 146} 147#endif
diff --git a/include/nvgpu/hw/gm20b/hw_gmmu_gm20b.h b/include/nvgpu/hw/gm20b/hw_gmmu_gm20b.h
deleted file mode 100644
index 11cc3d7..0000000
--- a/include/nvgpu/hw/gm20b/hw_gmmu_gm20b.h
+++ /dev/null
@@ -1,283 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gmmu_gm20b_h_ 57#define _hw_gmmu_gm20b_h_ 58 59static inline u32 gmmu_pde_aperture_big_w(void) 60{ 61 return 0U; 62} 63static inline u32 gmmu_pde_aperture_big_invalid_f(void) 64{ 65 return 0x0U; 66} 67static inline u32 gmmu_pde_aperture_big_video_memory_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gmmu_pde_aperture_big_sys_mem_coh_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gmmu_pde_aperture_big_sys_mem_ncoh_f(void) 76{ 77 return 0x3U; 78} 79static inline u32 gmmu_pde_size_w(void) 80{ 81 return 0U; 82} 83static inline u32 gmmu_pde_size_full_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 gmmu_pde_address_big_sys_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 4U; 90} 91static inline u32 gmmu_pde_address_big_sys_w(void) 92{ 93 return 0U; 94} 95static inline u32 gmmu_pde_aperture_small_w(void) 96{ 97 return 1U; 98} 99static inline u32 gmmu_pde_aperture_small_invalid_f(void) 100{ 101 return 0x0U; 102} 103static inline u32 gmmu_pde_aperture_small_video_memory_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 gmmu_pde_aperture_small_sys_mem_coh_f(void) 108{ 109 return 0x2U; 110} 111static inline u32 gmmu_pde_aperture_small_sys_mem_ncoh_f(void) 112{ 113 return 0x3U; 114} 115static inline u32 gmmu_pde_vol_small_w(void) 116{ 117 return 1U; 118} 119static inline u32 gmmu_pde_vol_small_true_f(void) 120{ 121 return 0x4U; 122} 123static inline u32 gmmu_pde_vol_small_false_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 gmmu_pde_vol_big_w(void) 128{ 129 return 1U; 130} 131static inline u32 gmmu_pde_vol_big_true_f(void) 132{ 133 return 0x8U; 134} 135static inline u32 gmmu_pde_vol_big_false_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 gmmu_pde_address_small_sys_f(u32 v) 140{ 141 return (v & 0xfffffffU) << 4U; 142} 143static inline u32 gmmu_pde_address_small_sys_w(void) 144{ 145 return 1U; 146} 147static inline u32 gmmu_pde_address_shift_v(void) 148{ 149 return 0x0000000cU; 150} 151static inline u32 gmmu_pde__size_v(void) 152{ 153 return 0x00000008U; 154} 155static inline u32 gmmu_pte__size_v(void) 156{ 157 return 0x00000008U; 158} 159static inline u32 gmmu_pte_valid_w(void) 160{ 161 return 0U; 162} 163static inline u32 gmmu_pte_valid_true_f(void) 164{ 165 return 0x1U; 166} 167static inline u32 gmmu_pte_valid_false_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 gmmu_pte_privilege_w(void) 172{ 173 return 0U; 174} 175static inline u32 gmmu_pte_privilege_true_f(void) 176{ 177 return 0x2U; 178} 179static inline u32 gmmu_pte_privilege_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 gmmu_pte_address_sys_f(u32 v) 184{ 185 return (v & 0xfffffffU) << 4U; 186} 187static inline u32 gmmu_pte_address_sys_w(void) 188{ 189 return 0U; 190} 191static inline u32 gmmu_pte_address_vid_f(u32 v) 192{ 193 return (v & 0x1ffffffU) << 4U; 194} 195static inline u32 gmmu_pte_address_vid_w(void) 196{ 197 return 0U; 198} 199static inline u32 gmmu_pte_vol_w(void) 200{ 201 return 1U; 202} 203static inline u32 gmmu_pte_vol_true_f(void) 204{ 205 return 0x1U; 206} 207static inline u32 gmmu_pte_vol_false_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 gmmu_pte_aperture_w(void) 212{ 213 return 1U; 214} 215static inline u32 gmmu_pte_aperture_video_memory_f(void) 216{ 217 return 0x0U; 218} 219static inline u32 gmmu_pte_aperture_sys_mem_coh_f(void) 220{ 221 return 0x4U; 222} 223static inline u32 gmmu_pte_aperture_sys_mem_ncoh_f(void) 224{ 225 return 0x6U; 226} 227static inline u32 gmmu_pte_read_only_w(void) 228{ 229 return 0U; 230} 231static inline u32 gmmu_pte_read_only_true_f(void) 232{ 233 return 0x4U; 234} 235static inline u32 gmmu_pte_write_disable_w(void) 236{ 237 return 1U; 238} 239static inline u32 gmmu_pte_write_disable_true_f(void) 240{ 241 return 0x80000000U; 242} 243static inline u32 gmmu_pte_read_disable_w(void) 244{ 245 return 1U; 246} 247static inline u32 gmmu_pte_read_disable_true_f(void) 248{ 249 return 0x40000000U; 250} 251static inline u32 gmmu_pte_comptagline_s(void) 252{ 253 return 17U; 254} 255static inline u32 gmmu_pte_comptagline_f(u32 v) 256{ 257 return (v & 0x1ffffU) << 12U; 258} 259static inline u32 gmmu_pte_comptagline_w(void) 260{ 261 return 1U; 262} 263static inline u32 gmmu_pte_address_shift_v(void) 264{ 265 return 0x0000000cU; 266} 267static inline u32 gmmu_pte_kind_f(u32 v) 268{ 269 return (v & 0xffU) << 4U; 270} 271static inline u32 gmmu_pte_kind_w(void) 272{ 273 return 1U; 274} 275static inline u32 gmmu_pte_kind_invalid_v(void) 276{ 277 return 0x000000ffU; 278} 279static inline u32 gmmu_pte_kind_pitch_v(void) 280{ 281 return 0x00000000U; 282} 283#endif
diff --git a/include/nvgpu/hw/gm20b/hw_gr_gm20b.h b/include/nvgpu/hw/gm20b/hw_gr_gm20b.h
deleted file mode 100644
index 79ad326..0000000
--- a/include/nvgpu/hw/gm20b/hw_gr_gm20b.h
+++ /dev/null
@@ -1,3939 +0,0 @@ 1/* 2 * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gr_gm20b_h_ 57#define _hw_gr_gm20b_h_ 58 59static inline u32 gr_intr_r(void) 60{ 61 return 0x00400100U; 62} 63static inline u32 gr_intr_notify_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 gr_intr_notify_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gr_intr_semaphore_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gr_intr_semaphore_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gr_intr_illegal_method_pending_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 gr_intr_illegal_method_reset_f(void) 84{ 85 return 0x10U; 86} 87static inline u32 gr_intr_illegal_notify_pending_f(void) 88{ 89 return 0x40U; 90} 91static inline u32 gr_intr_illegal_notify_reset_f(void) 92{ 93 return 0x40U; 94} 95static inline u32 gr_intr_firmware_method_f(u32 v) 96{ 97 return (v & 0x1U) << 8U; 98} 99static inline u32 gr_intr_firmware_method_pending_f(void) 100{ 101 return 0x100U; 102} 103static inline u32 gr_intr_firmware_method_reset_f(void) 104{ 105 return 0x100U; 106} 107static inline u32 gr_intr_illegal_class_pending_f(void) 108{ 109 return 0x20U; 110} 111static inline u32 gr_intr_illegal_class_reset_f(void) 112{ 113 return 0x20U; 114} 115static inline u32 gr_intr_fecs_error_pending_f(void) 116{ 117 return 0x80000U; 118} 119static inline u32 gr_intr_fecs_error_reset_f(void) 120{ 121 return 0x80000U; 122} 123static inline u32 gr_intr_class_error_pending_f(void) 124{ 125 return 0x100000U; 126} 127static inline u32 gr_intr_class_error_reset_f(void) 128{ 129 return 0x100000U; 130} 131static inline u32 gr_intr_exception_pending_f(void) 132{ 133 return 0x200000U; 134} 135static inline u32 gr_intr_exception_reset_f(void) 136{ 137 return 0x200000U; 138} 139static inline u32 gr_fecs_intr_r(void) 140{ 141 return 0x00400144U; 142} 143static inline u32 gr_class_error_r(void) 144{ 145 return 0x00400110U; 146} 147static inline u32 gr_class_error_code_v(u32 r) 148{ 149 return (r >> 0U) & 0xffffU; 150} 151static inline u32 gr_intr_nonstall_r(void) 152{ 153 return 0x00400120U; 154} 155static inline u32 gr_intr_nonstall_trap_pending_f(void) 156{ 157 return 0x2U; 158} 159static inline u32 gr_intr_en_r(void) 160{ 161 return 0x0040013cU; 162} 163static inline u32 gr_exception_r(void) 164{ 165 return 0x00400108U; 166} 167static inline u32 gr_exception_fe_m(void) 168{ 169 return 0x1U << 0U; 170} 171static inline u32 gr_exception_gpc_m(void) 172{ 173 return 0x1U << 24U; 174} 175static inline u32 gr_exception_memfmt_m(void) 176{ 177 return 0x1U << 1U; 178} 179static inline u32 gr_exception_ds_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 gr_exception_sked_m(void) 184{ 185 return 0x1U << 8U; 186} 187static inline u32 gr_exception_pd_m(void) 188{ 189 return 0x1U << 2U; 190} 191static inline u32 gr_exception_scc_m(void) 192{ 193 return 0x1U << 3U; 194} 195static inline u32 gr_exception_ssync_m(void) 196{ 197 return 0x1U << 5U; 198} 199static inline u32 gr_exception_mme_m(void) 200{ 201 return 0x1U << 7U; 202} 203static inline u32 gr_exception1_r(void) 204{ 205 return 0x00400118U; 206} 207static inline u32 gr_exception1_gpc_0_pending_f(void) 208{ 209 return 0x1U; 210} 211static inline u32 gr_exception2_r(void) 212{ 213 return 0x0040011cU; 214} 215static inline u32 gr_exception_en_r(void) 216{ 217 return 0x00400138U; 218} 219static inline u32 gr_exception_en_fe_m(void) 220{ 221 return 0x1U << 0U; 222} 223static inline u32 gr_exception1_en_r(void) 224{ 225 return 0x00400130U; 226} 227static inline u32 gr_exception2_en_r(void) 228{ 229 return 0x00400134U; 230} 231static inline u32 gr_gpfifo_ctl_r(void) 232{ 233 return 0x00400500U; 234} 235static inline u32 gr_gpfifo_ctl_access_f(u32 v) 236{ 237 return (v & 0x1U) << 0U; 238} 239static inline u32 gr_gpfifo_ctl_access_disabled_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gr_gpfifo_ctl_access_enabled_f(void) 244{ 245 return 0x1U; 246} 247static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) 248{ 249 return (v & 0x1U) << 16U; 250} 251static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) 252{ 253 return 0x00000001U; 254} 255static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) 256{ 257 return 0x10000U; 258} 259static inline u32 gr_gpfifo_status_r(void) 260{ 261 return 0x00400504U; 262} 263static inline u32 gr_trapped_addr_r(void) 264{ 265 return 0x00400704U; 266} 267static inline u32 gr_trapped_addr_mthd_v(u32 r) 268{ 269 return (r >> 2U) & 0xfffU; 270} 271static inline u32 gr_trapped_addr_subch_v(u32 r) 272{ 273 return (r >> 16U) & 0x7U; 274} 275static inline u32 gr_trapped_addr_mme_generated_v(u32 r) 276{ 277 return (r >> 20U) & 0x1U; 278} 279static inline u32 gr_trapped_addr_datahigh_v(u32 r) 280{ 281 return (r >> 24U) & 0x1U; 282} 283static inline u32 gr_trapped_addr_priv_v(u32 r) 284{ 285 return (r >> 28U) & 0x1U; 286} 287static inline u32 gr_trapped_addr_status_v(u32 r) 288{ 289 return (r >> 31U) & 0x1U; 290} 291static inline u32 gr_trapped_data_lo_r(void) 292{ 293 return 0x00400708U; 294} 295static inline u32 gr_trapped_data_hi_r(void) 296{ 297 return 0x0040070cU; 298} 299static inline u32 gr_trapped_data_mme_r(void) 300{ 301 return 0x00400710U; 302} 303static inline u32 gr_trapped_data_mme_pc_v(u32 r) 304{ 305 return (r >> 0U) & 0x7ffU; 306} 307static inline u32 gr_status_r(void) 308{ 309 return 0x00400700U; 310} 311static inline u32 gr_status_fe_method_upper_v(u32 r) 312{ 313 return (r >> 1U) & 0x1U; 314} 315static inline u32 gr_status_fe_method_lower_v(u32 r) 316{ 317 return (r >> 2U) & 0x1U; 318} 319static inline u32 gr_status_fe_method_lower_idle_v(void) 320{ 321 return 0x00000000U; 322} 323static inline u32 gr_status_fe_gi_v(u32 r) 324{ 325 return (r >> 21U) & 0x1U; 326} 327static inline u32 gr_status_mask_r(void) 328{ 329 return 0x00400610U; 330} 331static inline u32 gr_status_1_r(void) 332{ 333 return 0x00400604U; 334} 335static inline u32 gr_status_2_r(void) 336{ 337 return 0x00400608U; 338} 339static inline u32 gr_engine_status_r(void) 340{ 341 return 0x0040060cU; 342} 343static inline u32 gr_engine_status_value_busy_f(void) 344{ 345 return 0x1U; 346} 347static inline u32 gr_pri_be0_becs_be_exception_r(void) 348{ 349 return 0x00410204U; 350} 351static inline u32 gr_pri_be0_becs_be_exception_en_r(void) 352{ 353 return 0x00410208U; 354} 355static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) 356{ 357 return 0x00502c90U; 358} 359static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) 360{ 361 return 0x00502c94U; 362} 363static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) 364{ 365 return 0x00504508U; 366} 367static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 368{ 369 return 0x0050450cU; 370} 371static inline u32 gr_activity_0_r(void) 372{ 373 return 0x00400380U; 374} 375static inline u32 gr_activity_1_r(void) 376{ 377 return 0x00400384U; 378} 379static inline u32 gr_activity_2_r(void) 380{ 381 return 0x00400388U; 382} 383static inline u32 gr_activity_4_r(void) 384{ 385 return 0x00400390U; 386} 387static inline u32 gr_pri_gpc0_gcc_dbg_r(void) 388{ 389 return 0x00501000U; 390} 391static inline u32 gr_pri_gpcs_gcc_dbg_r(void) 392{ 393 return 0x00419000U; 394} 395static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) 396{ 397 return 0x1U << 1U; 398} 399static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) 400{ 401 return 0x005046a4U; 402} 403static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) 404{ 405 return 0x00419ea4U; 406} 407static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) 408{ 409 return 0x1U << 0U; 410} 411static inline u32 gr_pri_sked_activity_r(void) 412{ 413 return 0x00407054U; 414} 415static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) 416{ 417 return 0x00502c80U; 418} 419static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) 420{ 421 return 0x00502c84U; 422} 423static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) 424{ 425 return 0x00502c88U; 426} 427static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) 428{ 429 return 0x00502c8cU; 430} 431static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) 432{ 433 return 0x00504500U; 434} 435static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) 436{ 437 return 0x00504d00U; 438} 439static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) 440{ 441 return 0x00501d00U; 442} 443static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) 444{ 445 return 0x0041ac80U; 446} 447static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) 448{ 449 return 0x0041ac84U; 450} 451static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) 452{ 453 return 0x0041ac88U; 454} 455static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) 456{ 457 return 0x0041ac8cU; 458} 459static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) 460{ 461 return 0x0041c500U; 462} 463static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) 464{ 465 return 0x0041cd00U; 466} 467static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) 468{ 469 return 0x00419d00U; 470} 471static inline u32 gr_pri_be0_becs_be_activity0_r(void) 472{ 473 return 0x00410200U; 474} 475static inline u32 gr_pri_be1_becs_be_activity0_r(void) 476{ 477 return 0x00410600U; 478} 479static inline u32 gr_pri_bes_becs_be_activity0_r(void) 480{ 481 return 0x00408a00U; 482} 483static inline u32 gr_pri_ds_mpipe_status_r(void) 484{ 485 return 0x00405858U; 486} 487static inline u32 gr_pri_fe_go_idle_on_status_r(void) 488{ 489 return 0x00404150U; 490} 491static inline u32 gr_pri_fe_go_idle_check_r(void) 492{ 493 return 0x00404158U; 494} 495static inline u32 gr_pri_fe_go_idle_info_r(void) 496{ 497 return 0x00404194U; 498} 499static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) 500{ 501 return 0x00504238U; 502} 503static inline u32 gr_pri_be0_crop_status1_r(void) 504{ 505 return 0x00410134U; 506} 507static inline u32 gr_pri_bes_crop_status1_r(void) 508{ 509 return 0x00408934U; 510} 511static inline u32 gr_pri_be0_zrop_status_r(void) 512{ 513 return 0x00410048U; 514} 515static inline u32 gr_pri_be0_zrop_status2_r(void) 516{ 517 return 0x0041004cU; 518} 519static inline u32 gr_pri_bes_zrop_status_r(void) 520{ 521 return 0x00408848U; 522} 523static inline u32 gr_pri_bes_zrop_status2_r(void) 524{ 525 return 0x0040884cU; 526} 527static inline u32 gr_pipe_bundle_address_r(void) 528{ 529 return 0x00400200U; 530} 531static inline u32 gr_pipe_bundle_address_value_v(u32 r) 532{ 533 return (r >> 0U) & 0xffffU; 534} 535static inline u32 gr_pipe_bundle_data_r(void) 536{ 537 return 0x00400204U; 538} 539static inline u32 gr_pipe_bundle_config_r(void) 540{ 541 return 0x00400208U; 542} 543static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) 544{ 545 return 0x0U; 546} 547static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) 548{ 549 return 0x80000000U; 550} 551static inline u32 gr_fe_hww_esr_r(void) 552{ 553 return 0x00404000U; 554} 555static inline u32 gr_fe_hww_esr_reset_active_f(void) 556{ 557 return 0x40000000U; 558} 559static inline u32 gr_fe_hww_esr_en_enable_f(void) 560{ 561 return 0x80000000U; 562} 563static inline u32 gr_fe_hww_esr_info_r(void) 564{ 565 return 0x004041b0U; 566} 567static inline u32 gr_fe_go_idle_timeout_r(void) 568{ 569 return 0x00404154U; 570} 571static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) 572{ 573 return (v & 0xffffffffU) << 0U; 574} 575static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) 576{ 577 return 0x0U; 578} 579static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) 580{ 581 return 0x800U; 582} 583static inline u32 gr_fe_object_table_r(u32 i) 584{ 585 return 0x00404200U + i*4U; 586} 587static inline u32 gr_fe_object_table_nvclass_v(u32 r) 588{ 589 return (r >> 0U) & 0xffffU; 590} 591static inline u32 gr_fe_tpc_fs_r(void) 592{ 593 return 0x004041c4U; 594} 595static inline u32 gr_pri_mme_shadow_raw_index_r(void) 596{ 597 return 0x00404488U; 598} 599static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) 600{ 601 return 0x80000000U; 602} 603static inline u32 gr_pri_mme_shadow_raw_data_r(void) 604{ 605 return 0x0040448cU; 606} 607static inline u32 gr_mme_hww_esr_r(void) 608{ 609 return 0x00404490U; 610} 611static inline u32 gr_mme_hww_esr_reset_active_f(void) 612{ 613 return 0x40000000U; 614} 615static inline u32 gr_mme_hww_esr_en_enable_f(void) 616{ 617 return 0x80000000U; 618} 619static inline u32 gr_mme_hww_esr_info_r(void) 620{ 621 return 0x00404494U; 622} 623static inline u32 gr_memfmt_hww_esr_r(void) 624{ 625 return 0x00404600U; 626} 627static inline u32 gr_memfmt_hww_esr_reset_active_f(void) 628{ 629 return 0x40000000U; 630} 631static inline u32 gr_memfmt_hww_esr_en_enable_f(void) 632{ 633 return 0x80000000U; 634} 635static inline u32 gr_fecs_cpuctl_r(void) 636{ 637 return 0x00409100U; 638} 639static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) 640{ 641 return (v & 0x1U) << 1U; 642} 643static inline u32 gr_fecs_cpuctl_alias_r(void) 644{ 645 return 0x00409130U; 646} 647static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) 648{ 649 return (v & 0x1U) << 1U; 650} 651static inline u32 gr_fecs_dmactl_r(void) 652{ 653 return 0x0040910cU; 654} 655static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) 656{ 657 return (v & 0x1U) << 0U; 658} 659static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) 660{ 661 return 0x1U << 1U; 662} 663static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) 664{ 665 return 0x1U << 2U; 666} 667static inline u32 gr_fecs_os_r(void) 668{ 669 return 0x00409080U; 670} 671static inline u32 gr_fecs_idlestate_r(void) 672{ 673 return 0x0040904cU; 674} 675static inline u32 gr_fecs_mailbox0_r(void) 676{ 677 return 0x00409040U; 678} 679static inline u32 gr_fecs_mailbox1_r(void) 680{ 681 return 0x00409044U; 682} 683static inline u32 gr_fecs_irqstat_r(void) 684{ 685 return 0x00409008U; 686} 687static inline u32 gr_fecs_irqmode_r(void) 688{ 689 return 0x0040900cU; 690} 691static inline u32 gr_fecs_irqmask_r(void) 692{ 693 return 0x00409018U; 694} 695static inline u32 gr_fecs_irqdest_r(void) 696{ 697 return 0x0040901cU; 698} 699static inline u32 gr_fecs_curctx_r(void) 700{ 701 return 0x00409050U; 702} 703static inline u32 gr_fecs_nxtctx_r(void) 704{ 705 return 0x00409054U; 706} 707static inline u32 gr_fecs_engctl_r(void) 708{ 709 return 0x004090a4U; 710} 711static inline u32 gr_fecs_debug1_r(void) 712{ 713 return 0x00409090U; 714} 715static inline u32 gr_fecs_debuginfo_r(void) 716{ 717 return 0x00409094U; 718} 719static inline u32 gr_fecs_icd_cmd_r(void) 720{ 721 return 0x00409200U; 722} 723static inline u32 gr_fecs_icd_cmd_opc_s(void) 724{ 725 return 4U; 726} 727static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) 728{ 729 return (v & 0xfU) << 0U; 730} 731static inline u32 gr_fecs_icd_cmd_opc_m(void) 732{ 733 return 0xfU << 0U; 734} 735static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) 736{ 737 return (r >> 0U) & 0xfU; 738} 739static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) 740{ 741 return 0x8U; 742} 743static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) 744{ 745 return 0xeU; 746} 747static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) 748{ 749 return (v & 0x1fU) << 8U; 750} 751static inline u32 gr_fecs_icd_rdata_r(void) 752{ 753 return 0x0040920cU; 754} 755static inline u32 gr_fecs_imemc_r(u32 i) 756{ 757 return 0x00409180U + i*16U; 758} 759static inline u32 gr_fecs_imemc_offs_f(u32 v) 760{ 761 return (v & 0x3fU) << 2U; 762} 763static inline u32 gr_fecs_imemc_blk_f(u32 v) 764{ 765 return (v & 0xffU) << 8U; 766} 767static inline u32 gr_fecs_imemc_aincw_f(u32 v) 768{ 769 return (v & 0x1U) << 24U; 770} 771static inline u32 gr_fecs_imemd_r(u32 i) 772{ 773 return 0x00409184U + i*16U; 774} 775static inline u32 gr_fecs_imemt_r(u32 i) 776{ 777 return 0x00409188U + i*16U; 778} 779static inline u32 gr_fecs_imemt_tag_f(u32 v) 780{ 781 return (v & 0xffffU) << 0U; 782} 783static inline u32 gr_fecs_dmemc_r(u32 i) 784{ 785 return 0x004091c0U + i*8U; 786} 787static inline u32 gr_fecs_dmemc_offs_s(void) 788{ 789 return 6U; 790} 791static inline u32 gr_fecs_dmemc_offs_f(u32 v) 792{ 793 return (v & 0x3fU) << 2U; 794} 795static inline u32 gr_fecs_dmemc_offs_m(void) 796{ 797 return 0x3fU << 2U; 798} 799static inline u32 gr_fecs_dmemc_offs_v(u32 r) 800{ 801 return (r >> 2U) & 0x3fU; 802} 803static inline u32 gr_fecs_dmemc_blk_f(u32 v) 804{ 805 return (v & 0xffU) << 8U; 806} 807static inline u32 gr_fecs_dmemc_aincw_f(u32 v) 808{ 809 return (v & 0x1U) << 24U; 810} 811static inline u32 gr_fecs_dmemd_r(u32 i) 812{ 813 return 0x004091c4U + i*8U; 814} 815static inline u32 gr_fecs_dmatrfbase_r(void) 816{ 817 return 0x00409110U; 818} 819static inline u32 gr_fecs_dmatrfmoffs_r(void) 820{ 821 return 0x00409114U; 822} 823static inline u32 gr_fecs_dmatrffboffs_r(void) 824{ 825 return 0x0040911cU; 826} 827static inline u32 gr_fecs_dmatrfcmd_r(void) 828{ 829 return 0x00409118U; 830} 831static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) 832{ 833 return (v & 0x1U) << 4U; 834} 835static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) 836{ 837 return (v & 0x1U) << 5U; 838} 839static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) 840{ 841 return (v & 0x7U) << 8U; 842} 843static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) 844{ 845 return (v & 0x7U) << 12U; 846} 847static inline u32 gr_fecs_bootvec_r(void) 848{ 849 return 0x00409104U; 850} 851static inline u32 gr_fecs_bootvec_vec_f(u32 v) 852{ 853 return (v & 0xffffffffU) << 0U; 854} 855static inline u32 gr_fecs_falcon_hwcfg_r(void) 856{ 857 return 0x00409108U; 858} 859static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) 860{ 861 return 0x0041a108U; 862} 863static inline u32 gr_fecs_falcon_rm_r(void) 864{ 865 return 0x00409084U; 866} 867static inline u32 gr_fecs_current_ctx_r(void) 868{ 869 return 0x00409b00U; 870} 871static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) 872{ 873 return (v & 0xfffffffU) << 0U; 874} 875static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) 876{ 877 return (r >> 0U) & 0xfffffffU; 878} 879static inline u32 gr_fecs_current_ctx_target_s(void) 880{ 881 return 2U; 882} 883static inline u32 gr_fecs_current_ctx_target_f(u32 v) 884{ 885 return (v & 0x3U) << 28U; 886} 887static inline u32 gr_fecs_current_ctx_target_m(void) 888{ 889 return 0x3U << 28U; 890} 891static inline u32 gr_fecs_current_ctx_target_v(u32 r) 892{ 893 return (r >> 28U) & 0x3U; 894} 895static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) 896{ 897 return 0x0U; 898} 899static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) 900{ 901 return 0x20000000U; 902} 903static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) 904{ 905 return 0x30000000U; 906} 907static inline u32 gr_fecs_current_ctx_valid_s(void) 908{ 909 return 1U; 910} 911static inline u32 gr_fecs_current_ctx_valid_f(u32 v) 912{ 913 return (v & 0x1U) << 31U; 914} 915static inline u32 gr_fecs_current_ctx_valid_m(void) 916{ 917 return 0x1U << 31U; 918} 919static inline u32 gr_fecs_current_ctx_valid_v(u32 r) 920{ 921 return (r >> 31U) & 0x1U; 922} 923static inline u32 gr_fecs_current_ctx_valid_false_f(void) 924{ 925 return 0x0U; 926} 927static inline u32 gr_fecs_method_data_r(void) 928{ 929 return 0x00409500U; 930} 931static inline u32 gr_fecs_method_push_r(void) 932{ 933 return 0x00409504U; 934} 935static inline u32 gr_fecs_method_push_adr_f(u32 v) 936{ 937 return (v & 0xfffU) << 0U; 938} 939static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) 940{ 941 return 0x00000003U; 942} 943static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) 944{ 945 return 0x3U; 946} 947static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) 948{ 949 return 0x00000010U; 950} 951static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) 952{ 953 return 0x00000009U; 954} 955static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) 956{ 957 return 0x00000015U; 958} 959static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) 960{ 961 return 0x00000016U; 962} 963static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) 964{ 965 return 0x00000025U; 966} 967static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) 968{ 969 return 0x00000030U; 970} 971static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) 972{ 973 return 0x00000031U; 974} 975static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) 976{ 977 return 0x00000032U; 978} 979static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) 980{ 981 return 0x00000038U; 982} 983static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) 984{ 985 return 0x00000039U; 986} 987static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) 988{ 989 return 0x21U; 990} 991static inline u32 gr_fecs_method_push_adr_write_timestamp_record_v(void) 992{ 993 return 0x0000003dU; 994} 995static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) 996{ 997 return 0x00000004U; 998} 999static inline u32 gr_fecs_host_int_status_r(void) 1000{ 1001 return 0x00409c18U; 1002} 1003static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) 1004{ 1005 return (v & 0x1U) << 16U; 1006} 1007static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) 1008{ 1009 return (v & 0x1U) << 17U; 1010} 1011static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) 1012{ 1013 return (v & 0x1U) << 18U; 1014} 1015static inline u32 gr_fecs_host_int_status_watchdog_active_f(void) 1016{ 1017 return 0x80000U; 1018} 1019static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) 1020{ 1021 return (v & 0xffffU) << 0U; 1022} 1023static inline u32 gr_fecs_host_int_clear_r(void) 1024{ 1025 return 0x00409c20U; 1026} 1027static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) 1028{ 1029 return (v & 0x1U) << 1U; 1030} 1031static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) 1032{ 1033 return 0x2U; 1034} 1035static inline u32 gr_fecs_host_int_enable_r(void) 1036{ 1037 return 0x00409c24U; 1038} 1039static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) 1040{ 1041 return 0x2U; 1042} 1043static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) 1044{ 1045 return 0x10000U; 1046} 1047static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) 1048{ 1049 return 0x20000U; 1050} 1051static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) 1052{ 1053 return 0x40000U; 1054} 1055static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) 1056{ 1057 return 0x80000U; 1058} 1059static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) 1060{ 1061 return 0x00409614U; 1062} 1063static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) 1064{ 1065 return 0x0U; 1066} 1067static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) 1068{ 1069 return 0x0U; 1070} 1071static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) 1072{ 1073 return 0x0U; 1074} 1075static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) 1076{ 1077 return 0x10U; 1078} 1079static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) 1080{ 1081 return 0x20U; 1082} 1083static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) 1084{ 1085 return 0x40U; 1086} 1087static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) 1088{ 1089 return 0x0U; 1090} 1091static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) 1092{ 1093 return 0x100U; 1094} 1095static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) 1096{ 1097 return 0x0U; 1098} 1099static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) 1100{ 1101 return 0x200U; 1102} 1103static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) 1104{ 1105 return 1U; 1106} 1107static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) 1108{ 1109 return (v & 0x1U) << 10U; 1110} 1111static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) 1112{ 1113 return 0x1U << 10U; 1114} 1115static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) 1116{ 1117 return (r >> 10U) & 0x1U; 1118} 1119static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) 1120{ 1121 return 0x0U; 1122} 1123static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) 1124{ 1125 return 0x400U; 1126} 1127static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) 1128{ 1129 return 0x0040960cU; 1130} 1131static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) 1132{ 1133 return 0x00409800U + i*4U; 1134} 1135static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) 1136{ 1137 return 0x00000010U; 1138} 1139static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) 1140{ 1141 return (v & 0xffffffffU) << 0U; 1142} 1143static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) 1144{ 1145 return 0x00000001U; 1146} 1147static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) 1148{ 1149 return 0x00000002U; 1150} 1151static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) 1152{ 1153 return 0x004098c0U + i*4U; 1154} 1155static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) 1156{ 1157 return (v & 0xffffffffU) << 0U; 1158} 1159static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) 1160{ 1161 return 0x00409840U + i*4U; 1162} 1163static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) 1164{ 1165 return (v & 0xffffffffU) << 0U; 1166} 1167static inline u32 gr_fecs_fs_r(void) 1168{ 1169 return 0x00409604U; 1170} 1171static inline u32 gr_fecs_fs_num_available_gpcs_s(void) 1172{ 1173 return 5U; 1174} 1175static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) 1176{ 1177 return (v & 0x1fU) << 0U; 1178} 1179static inline u32 gr_fecs_fs_num_available_gpcs_m(void) 1180{ 1181 return 0x1fU << 0U; 1182} 1183static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) 1184{ 1185 return (r >> 0U) & 0x1fU; 1186} 1187static inline u32 gr_fecs_fs_num_available_fbps_s(void) 1188{ 1189 return 5U; 1190} 1191static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) 1192{ 1193 return (v & 0x1fU) << 16U; 1194} 1195static inline u32 gr_fecs_fs_num_available_fbps_m(void) 1196{ 1197 return 0x1fU << 16U; 1198} 1199static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) 1200{ 1201 return (r >> 16U) & 0x1fU; 1202} 1203static inline u32 gr_fecs_cfg_r(void) 1204{ 1205 return 0x00409620U; 1206} 1207static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) 1208{ 1209 return (r >> 0U) & 0xffU; 1210} 1211static inline u32 gr_fecs_rc_lanes_r(void) 1212{ 1213 return 0x00409880U; 1214} 1215static inline u32 gr_fecs_rc_lanes_num_chains_s(void) 1216{ 1217 return 6U; 1218} 1219static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) 1220{ 1221 return (v & 0x3fU) << 0U; 1222} 1223static inline u32 gr_fecs_rc_lanes_num_chains_m(void) 1224{ 1225 return 0x3fU << 0U; 1226} 1227static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) 1228{ 1229 return (r >> 0U) & 0x3fU; 1230} 1231static inline u32 gr_fecs_ctxsw_status_1_r(void) 1232{ 1233 return 0x00409400U; 1234} 1235static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) 1236{ 1237 return 1U; 1238} 1239static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) 1240{ 1241 return (v & 0x1U) << 12U; 1242} 1243static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) 1244{ 1245 return 0x1U << 12U; 1246} 1247static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) 1248{ 1249 return (r >> 12U) & 0x1U; 1250} 1251static inline u32 gr_fecs_arb_ctx_adr_r(void) 1252{ 1253 return 0x00409a24U; 1254} 1255static inline u32 gr_fecs_new_ctx_r(void) 1256{ 1257 return 0x00409b04U; 1258} 1259static inline u32 gr_fecs_new_ctx_ptr_s(void) 1260{ 1261 return 28U; 1262} 1263static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) 1264{ 1265 return (v & 0xfffffffU) << 0U; 1266} 1267static inline u32 gr_fecs_new_ctx_ptr_m(void) 1268{ 1269 return 0xfffffffU << 0U; 1270} 1271static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) 1272{ 1273 return (r >> 0U) & 0xfffffffU; 1274} 1275static inline u32 gr_fecs_new_ctx_target_s(void) 1276{ 1277 return 2U; 1278} 1279static inline u32 gr_fecs_new_ctx_target_f(u32 v) 1280{ 1281 return (v & 0x3U) << 28U; 1282} 1283static inline u32 gr_fecs_new_ctx_target_m(void) 1284{ 1285 return 0x3U << 28U; 1286} 1287static inline u32 gr_fecs_new_ctx_target_v(u32 r) 1288{ 1289 return (r >> 28U) & 0x3U; 1290} 1291static inline u32 gr_fecs_new_ctx_target_vid_mem_f(void) 1292{ 1293 return 0x0U; 1294} 1295static inline u32 gr_fecs_new_ctx_target_sys_mem_ncoh_f(void) 1296{ 1297 return 0x30000000U; 1298} 1299static inline u32 gr_fecs_new_ctx_target_sys_mem_coh_f(void) 1300{ 1301 return 0x20000000U; 1302} 1303static inline u32 gr_fecs_new_ctx_valid_s(void) 1304{ 1305 return 1U; 1306} 1307static inline u32 gr_fecs_new_ctx_valid_f(u32 v) 1308{ 1309 return (v & 0x1U) << 31U; 1310} 1311static inline u32 gr_fecs_new_ctx_valid_m(void) 1312{ 1313 return 0x1U << 31U; 1314} 1315static inline u32 gr_fecs_new_ctx_valid_v(u32 r) 1316{ 1317 return (r >> 31U) & 0x1U; 1318} 1319static inline u32 gr_fecs_arb_ctx_ptr_r(void) 1320{ 1321 return 0x00409a0cU; 1322} 1323static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) 1324{ 1325 return 28U; 1326} 1327static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) 1328{ 1329 return (v & 0xfffffffU) << 0U; 1330} 1331static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) 1332{ 1333 return 0xfffffffU << 0U; 1334} 1335static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) 1336{ 1337 return (r >> 0U) & 0xfffffffU; 1338} 1339static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) 1340{ 1341 return 2U; 1342} 1343static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) 1344{ 1345 return (v & 0x3U) << 28U; 1346} 1347static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) 1348{ 1349 return 0x3U << 28U; 1350} 1351static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) 1352{ 1353 return (r >> 28U) & 0x3U; 1354} 1355static inline u32 gr_fecs_arb_ctx_ptr_target_vid_mem_f(void) 1356{ 1357 return 0x0U; 1358} 1359static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(void) 1360{ 1361 return 0x30000000U; 1362} 1363static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_coh_f(void) 1364{ 1365 return 0x20000000U; 1366} 1367static inline u32 gr_fecs_arb_ctx_cmd_r(void) 1368{ 1369 return 0x00409a10U; 1370} 1371static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) 1372{ 1373 return 5U; 1374} 1375static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) 1376{ 1377 return (v & 0x1fU) << 0U; 1378} 1379static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) 1380{ 1381 return 0x1fU << 0U; 1382} 1383static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) 1384{ 1385 return (r >> 0U) & 0x1fU; 1386} 1387static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) 1388{ 1389 return 0x00409c00U; 1390} 1391static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) 1392{ 1393 return 0x00502c04U; 1394} 1395static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) 1396{ 1397 return 0x00502400U; 1398} 1399static inline u32 gr_gpc0_gpccs_ctxsw_mailbox__size_1_v(void) 1400{ 1401 return 0x00000010U; 1402} 1403static inline u32 gr_fecs_ctxsw_idlestate_r(void) 1404{ 1405 return 0x00409420U; 1406} 1407static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) 1408{ 1409 return 0x00502420U; 1410} 1411static inline u32 gr_rstr2d_gpc_map0_r(void) 1412{ 1413 return 0x0040780cU; 1414} 1415static inline u32 gr_rstr2d_gpc_map1_r(void) 1416{ 1417 return 0x00407810U; 1418} 1419static inline u32 gr_rstr2d_gpc_map2_r(void) 1420{ 1421 return 0x00407814U; 1422} 1423static inline u32 gr_rstr2d_gpc_map3_r(void) 1424{ 1425 return 0x00407818U; 1426} 1427static inline u32 gr_rstr2d_gpc_map4_r(void) 1428{ 1429 return 0x0040781cU; 1430} 1431static inline u32 gr_rstr2d_gpc_map5_r(void) 1432{ 1433 return 0x00407820U; 1434} 1435static inline u32 gr_rstr2d_map_table_cfg_r(void) 1436{ 1437 return 0x004078bcU; 1438} 1439static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) 1440{ 1441 return (v & 0xffU) << 0U; 1442} 1443static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) 1444{ 1445 return (v & 0xffU) << 8U; 1446} 1447static inline u32 gr_pd_hww_esr_r(void) 1448{ 1449 return 0x00406018U; 1450} 1451static inline u32 gr_pd_hww_esr_reset_active_f(void) 1452{ 1453 return 0x40000000U; 1454} 1455static inline u32 gr_pd_hww_esr_en_enable_f(void) 1456{ 1457 return 0x80000000U; 1458} 1459static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) 1460{ 1461 return 0x00406028U + i*4U; 1462} 1463static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) 1464{ 1465 return 0x00000004U; 1466} 1467static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) 1468{ 1469 return (v & 0xfU) << 0U; 1470} 1471static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) 1472{ 1473 return (v & 0xfU) << 4U; 1474} 1475static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) 1476{ 1477 return (v & 0xfU) << 8U; 1478} 1479static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) 1480{ 1481 return (v & 0xfU) << 12U; 1482} 1483static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) 1484{ 1485 return (v & 0xfU) << 16U; 1486} 1487static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) 1488{ 1489 return (v & 0xfU) << 20U; 1490} 1491static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) 1492{ 1493 return (v & 0xfU) << 24U; 1494} 1495static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) 1496{ 1497 return (v & 0xfU) << 28U; 1498} 1499static inline u32 gr_pd_ab_dist_cfg0_r(void) 1500{ 1501 return 0x004064c0U; 1502} 1503static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) 1504{ 1505 return 0x80000000U; 1506} 1507static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) 1508{ 1509 return 0x0U; 1510} 1511static inline u32 gr_pd_ab_dist_cfg1_r(void) 1512{ 1513 return 0x004064c4U; 1514} 1515static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) 1516{ 1517 return 0xffffU; 1518} 1519static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) 1520{ 1521 return (v & 0xffffU) << 16U; 1522} 1523static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) 1524{ 1525 return 0x00000080U; 1526} 1527static inline u32 gr_pd_ab_dist_cfg2_r(void) 1528{ 1529 return 0x004064c8U; 1530} 1531static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) 1532{ 1533 return (v & 0xfffU) << 0U; 1534} 1535static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) 1536{ 1537 return 0x000001c0U; 1538} 1539static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) 1540{ 1541 return (v & 0xfffU) << 16U; 1542} 1543static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) 1544{ 1545 return 0x00000020U; 1546} 1547static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) 1548{ 1549 return 0x00000182U; 1550} 1551static inline u32 gr_pd_pagepool_r(void) 1552{ 1553 return 0x004064ccU; 1554} 1555static inline u32 gr_pd_pagepool_total_pages_f(u32 v) 1556{ 1557 return (v & 0xffU) << 0U; 1558} 1559static inline u32 gr_pd_pagepool_valid_true_f(void) 1560{ 1561 return 0x80000000U; 1562} 1563static inline u32 gr_pd_dist_skip_table_r(u32 i) 1564{ 1565 return 0x004064d0U + i*4U; 1566} 1567static inline u32 gr_pd_dist_skip_table__size_1_v(void) 1568{ 1569 return 0x00000008U; 1570} 1571static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) 1572{ 1573 return (v & 0xffU) << 0U; 1574} 1575static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) 1576{ 1577 return (v & 0xffU) << 8U; 1578} 1579static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) 1580{ 1581 return (v & 0xffU) << 16U; 1582} 1583static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) 1584{ 1585 return (v & 0xffU) << 24U; 1586} 1587static inline u32 gr_ds_debug_r(void) 1588{ 1589 return 0x00405800U; 1590} 1591static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) 1592{ 1593 return 0x0U; 1594} 1595static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) 1596{ 1597 return 0x8000000U; 1598} 1599static inline u32 gr_ds_zbc_color_r_r(void) 1600{ 1601 return 0x00405804U; 1602} 1603static inline u32 gr_ds_zbc_color_r_val_f(u32 v) 1604{ 1605 return (v & 0xffffffffU) << 0U; 1606} 1607static inline u32 gr_ds_zbc_color_g_r(void) 1608{ 1609 return 0x00405808U; 1610} 1611static inline u32 gr_ds_zbc_color_g_val_f(u32 v) 1612{ 1613 return (v & 0xffffffffU) << 0U; 1614} 1615static inline u32 gr_ds_zbc_color_b_r(void) 1616{ 1617 return 0x0040580cU; 1618} 1619static inline u32 gr_ds_zbc_color_b_val_f(u32 v) 1620{ 1621 return (v & 0xffffffffU) << 0U; 1622} 1623static inline u32 gr_ds_zbc_color_a_r(void) 1624{ 1625 return 0x00405810U; 1626} 1627static inline u32 gr_ds_zbc_color_a_val_f(u32 v) 1628{ 1629 return (v & 0xffffffffU) << 0U; 1630} 1631static inline u32 gr_ds_zbc_color_fmt_r(void) 1632{ 1633 return 0x00405814U; 1634} 1635static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) 1636{ 1637 return (v & 0x7fU) << 0U; 1638} 1639static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) 1640{ 1641 return 0x0U; 1642} 1643static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) 1644{ 1645 return 0x00000001U; 1646} 1647static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) 1648{ 1649 return 0x00000002U; 1650} 1651static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) 1652{ 1653 return 0x00000004U; 1654} 1655static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) 1656{ 1657 return 0x00000028U; 1658} 1659static inline u32 gr_ds_zbc_z_r(void) 1660{ 1661 return 0x00405818U; 1662} 1663static inline u32 gr_ds_zbc_z_val_s(void) 1664{ 1665 return 32U; 1666} 1667static inline u32 gr_ds_zbc_z_val_f(u32 v) 1668{ 1669 return (v & 0xffffffffU) << 0U; 1670} 1671static inline u32 gr_ds_zbc_z_val_m(void) 1672{ 1673 return 0xffffffffU << 0U; 1674} 1675static inline u32 gr_ds_zbc_z_val_v(u32 r) 1676{ 1677 return (r >> 0U) & 0xffffffffU; 1678} 1679static inline u32 gr_ds_zbc_z_val__init_v(void) 1680{ 1681 return 0x00000000U; 1682} 1683static inline u32 gr_ds_zbc_z_val__init_f(void) 1684{ 1685 return 0x0U; 1686} 1687static inline u32 gr_ds_zbc_z_fmt_r(void) 1688{ 1689 return 0x0040581cU; 1690} 1691static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) 1692{ 1693 return (v & 0x1U) << 0U; 1694} 1695static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) 1696{ 1697 return 0x0U; 1698} 1699static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) 1700{ 1701 return 0x00000001U; 1702} 1703static inline u32 gr_ds_zbc_tbl_index_r(void) 1704{ 1705 return 0x00405820U; 1706} 1707static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) 1708{ 1709 return (v & 0xfU) << 0U; 1710} 1711static inline u32 gr_ds_zbc_tbl_ld_r(void) 1712{ 1713 return 0x00405824U; 1714} 1715static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) 1716{ 1717 return 0x0U; 1718} 1719static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) 1720{ 1721 return 0x1U; 1722} 1723static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) 1724{ 1725 return 0x0U; 1726} 1727static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) 1728{ 1729 return 0x4U; 1730} 1731static inline u32 gr_ds_tga_constraintlogic_r(void) 1732{ 1733 return 0x00405830U; 1734} 1735static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) 1736{ 1737 return (v & 0xffffU) << 16U; 1738} 1739static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) 1740{ 1741 return (v & 0xffffU) << 0U; 1742} 1743static inline u32 gr_ds_hww_esr_r(void) 1744{ 1745 return 0x00405840U; 1746} 1747static inline u32 gr_ds_hww_esr_reset_s(void) 1748{ 1749 return 1U; 1750} 1751static inline u32 gr_ds_hww_esr_reset_f(u32 v) 1752{ 1753 return (v & 0x1U) << 30U; 1754} 1755static inline u32 gr_ds_hww_esr_reset_m(void) 1756{ 1757 return 0x1U << 30U; 1758} 1759static inline u32 gr_ds_hww_esr_reset_v(u32 r) 1760{ 1761 return (r >> 30U) & 0x1U; 1762} 1763static inline u32 gr_ds_hww_esr_reset_task_v(void) 1764{ 1765 return 0x00000001U; 1766} 1767static inline u32 gr_ds_hww_esr_reset_task_f(void) 1768{ 1769 return 0x40000000U; 1770} 1771static inline u32 gr_ds_hww_esr_en_enabled_f(void) 1772{ 1773 return 0x80000000U; 1774} 1775static inline u32 gr_ds_hww_esr_2_r(void) 1776{ 1777 return 0x00405848U; 1778} 1779static inline u32 gr_ds_hww_esr_2_reset_s(void) 1780{ 1781 return 1U; 1782} 1783static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) 1784{ 1785 return (v & 0x1U) << 30U; 1786} 1787static inline u32 gr_ds_hww_esr_2_reset_m(void) 1788{ 1789 return 0x1U << 30U; 1790} 1791static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) 1792{ 1793 return (r >> 30U) & 0x1U; 1794} 1795static inline u32 gr_ds_hww_esr_2_reset_task_v(void) 1796{ 1797 return 0x00000001U; 1798} 1799static inline u32 gr_ds_hww_esr_2_reset_task_f(void) 1800{ 1801 return 0x40000000U; 1802} 1803static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) 1804{ 1805 return 0x80000000U; 1806} 1807static inline u32 gr_ds_hww_report_mask_r(void) 1808{ 1809 return 0x00405844U; 1810} 1811static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) 1812{ 1813 return 0x1U; 1814} 1815static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) 1816{ 1817 return 0x2U; 1818} 1819static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) 1820{ 1821 return 0x4U; 1822} 1823static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) 1824{ 1825 return 0x8U; 1826} 1827static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) 1828{ 1829 return 0x10U; 1830} 1831static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) 1832{ 1833 return 0x20U; 1834} 1835static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) 1836{ 1837 return 0x40U; 1838} 1839static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) 1840{ 1841 return 0x80U; 1842} 1843static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) 1844{ 1845 return 0x100U; 1846} 1847static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) 1848{ 1849 return 0x200U; 1850} 1851static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) 1852{ 1853 return 0x400U; 1854} 1855static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) 1856{ 1857 return 0x800U; 1858} 1859static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) 1860{ 1861 return 0x1000U; 1862} 1863static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) 1864{ 1865 return 0x2000U; 1866} 1867static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) 1868{ 1869 return 0x4000U; 1870} 1871static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) 1872{ 1873 return 0x8000U; 1874} 1875static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) 1876{ 1877 return 0x10000U; 1878} 1879static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) 1880{ 1881 return 0x20000U; 1882} 1883static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) 1884{ 1885 return 0x40000U; 1886} 1887static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) 1888{ 1889 return 0x80000U; 1890} 1891static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) 1892{ 1893 return 0x100000U; 1894} 1895static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) 1896{ 1897 return 0x200000U; 1898} 1899static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) 1900{ 1901 return 0x400000U; 1902} 1903static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) 1904{ 1905 return 0x800000U; 1906} 1907static inline u32 gr_ds_hww_report_mask_2_r(void) 1908{ 1909 return 0x0040584cU; 1910} 1911static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) 1912{ 1913 return 0x1U; 1914} 1915static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) 1916{ 1917 return 0x00405870U + i*4U; 1918} 1919static inline u32 gr_scc_bundle_cb_base_r(void) 1920{ 1921 return 0x00408004U; 1922} 1923static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) 1924{ 1925 return (v & 0xffffffffU) << 0U; 1926} 1927static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) 1928{ 1929 return 0x00000008U; 1930} 1931static inline u32 gr_scc_bundle_cb_size_r(void) 1932{ 1933 return 0x00408008U; 1934} 1935static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) 1936{ 1937 return (v & 0x7ffU) << 0U; 1938} 1939static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) 1940{ 1941 return 0x00000018U; 1942} 1943static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) 1944{ 1945 return 0x00000100U; 1946} 1947static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) 1948{ 1949 return 0x00000000U; 1950} 1951static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) 1952{ 1953 return 0x0U; 1954} 1955static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) 1956{ 1957 return 0x80000000U; 1958} 1959static inline u32 gr_scc_pagepool_base_r(void) 1960{ 1961 return 0x0040800cU; 1962} 1963static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) 1964{ 1965 return (v & 0xffffffffU) << 0U; 1966} 1967static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) 1968{ 1969 return 0x00000008U; 1970} 1971static inline u32 gr_scc_pagepool_r(void) 1972{ 1973 return 0x00408010U; 1974} 1975static inline u32 gr_scc_pagepool_total_pages_f(u32 v) 1976{ 1977 return (v & 0xffU) << 0U; 1978} 1979static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) 1980{ 1981 return 0x00000000U; 1982} 1983static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) 1984{ 1985 return 0x00000080U; 1986} 1987static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) 1988{ 1989 return 0x00000100U; 1990} 1991static inline u32 gr_scc_pagepool_max_valid_pages_s(void) 1992{ 1993 return 8U; 1994} 1995static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) 1996{ 1997 return (v & 0xffU) << 8U; 1998} 1999static inline u32 gr_scc_pagepool_max_valid_pages_m(void) 2000{ 2001 return 0xffU << 8U; 2002} 2003static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) 2004{ 2005 return (r >> 8U) & 0xffU; 2006} 2007static inline u32 gr_scc_pagepool_valid_true_f(void) 2008{ 2009 return 0x80000000U; 2010} 2011static inline u32 gr_scc_init_r(void) 2012{ 2013 return 0x0040802cU; 2014} 2015static inline u32 gr_scc_init_ram_trigger_f(void) 2016{ 2017 return 0x1U; 2018} 2019static inline u32 gr_scc_hww_esr_r(void) 2020{ 2021 return 0x00408030U; 2022} 2023static inline u32 gr_scc_hww_esr_reset_active_f(void) 2024{ 2025 return 0x40000000U; 2026} 2027static inline u32 gr_scc_hww_esr_en_enable_f(void) 2028{ 2029 return 0x80000000U; 2030} 2031static inline u32 gr_sked_hww_esr_r(void) 2032{ 2033 return 0x00407020U; 2034} 2035static inline u32 gr_sked_hww_esr_reset_active_f(void) 2036{ 2037 return 0x40000000U; 2038} 2039static inline u32 gr_cwd_fs_r(void) 2040{ 2041 return 0x00405b00U; 2042} 2043static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) 2044{ 2045 return (v & 0xffU) << 0U; 2046} 2047static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) 2048{ 2049 return (v & 0xffU) << 8U; 2050} 2051static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) 2052{ 2053 return 0x00405b60U + i*4U; 2054} 2055static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) 2056{ 2057 return 4U; 2058} 2059static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) 2060{ 2061 return (v & 0xfU) << 0U; 2062} 2063static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) 2064{ 2065 return 4U; 2066} 2067static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) 2068{ 2069 return (v & 0xfU) << 4U; 2070} 2071static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) 2072{ 2073 return (v & 0xfU) << 8U; 2074} 2075static inline u32 gr_cwd_sm_id_r(u32 i) 2076{ 2077 return 0x00405ba0U + i*4U; 2078} 2079static inline u32 gr_cwd_sm_id__size_1_v(void) 2080{ 2081 return 0x00000006U; 2082} 2083static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) 2084{ 2085 return (v & 0xffU) << 0U; 2086} 2087static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) 2088{ 2089 return (v & 0xffU) << 8U; 2090} 2091static inline u32 gr_gpc0_fs_gpc_r(void) 2092{ 2093 return 0x00502608U; 2094} 2095static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) 2096{ 2097 return (r >> 0U) & 0x1fU; 2098} 2099static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) 2100{ 2101 return (r >> 16U) & 0x1fU; 2102} 2103static inline u32 gr_gpc0_cfg_r(void) 2104{ 2105 return 0x00502620U; 2106} 2107static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) 2108{ 2109 return (r >> 0U) & 0xffU; 2110} 2111static inline u32 gr_gpccs_rc_lanes_r(void) 2112{ 2113 return 0x00502880U; 2114} 2115static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) 2116{ 2117 return 6U; 2118} 2119static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) 2120{ 2121 return (v & 0x3fU) << 0U; 2122} 2123static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) 2124{ 2125 return 0x3fU << 0U; 2126} 2127static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) 2128{ 2129 return (r >> 0U) & 0x3fU; 2130} 2131static inline u32 gr_gpccs_rc_lane_size_r(u32 i) 2132{ 2133 return 0x00502910U + i*0U; 2134} 2135static inline u32 gr_gpccs_rc_lane_size__size_1_v(void) 2136{ 2137 return 0x00000010U; 2138} 2139static inline u32 gr_gpccs_rc_lane_size_v_s(void) 2140{ 2141 return 24U; 2142} 2143static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) 2144{ 2145 return (v & 0xffffffU) << 0U; 2146} 2147static inline u32 gr_gpccs_rc_lane_size_v_m(void) 2148{ 2149 return 0xffffffU << 0U; 2150} 2151static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) 2152{ 2153 return (r >> 0U) & 0xffffffU; 2154} 2155static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) 2156{ 2157 return 0x00000000U; 2158} 2159static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) 2160{ 2161 return 0x0U; 2162} 2163static inline u32 gr_gpc0_zcull_fs_r(void) 2164{ 2165 return 0x00500910U; 2166} 2167static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) 2168{ 2169 return (v & 0x1ffU) << 0U; 2170} 2171static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) 2172{ 2173 return (v & 0xfU) << 16U; 2174} 2175static inline u32 gr_gpc0_zcull_ram_addr_r(void) 2176{ 2177 return 0x00500914U; 2178} 2179static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) 2180{ 2181 return (v & 0xfU) << 0U; 2182} 2183static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) 2184{ 2185 return (v & 0xfU) << 8U; 2186} 2187static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) 2188{ 2189 return 0x00500918U; 2190} 2191static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) 2192{ 2193 return (v & 0xffffffU) << 0U; 2194} 2195static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) 2196{ 2197 return 0x00800000U; 2198} 2199static inline u32 gr_gpc0_zcull_total_ram_size_r(void) 2200{ 2201 return 0x00500920U; 2202} 2203static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) 2204{ 2205 return (v & 0xffffU) << 0U; 2206} 2207static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) 2208{ 2209 return 0x00500a04U + i*32U; 2210} 2211static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) 2212{ 2213 return 0x00000040U; 2214} 2215static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) 2216{ 2217 return 0x00000010U; 2218} 2219static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) 2220{ 2221 return 0x00500c10U + i*4U; 2222} 2223static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) 2224{ 2225 return (v & 0xffU) << 0U; 2226} 2227static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) 2228{ 2229 return 0x00500c30U + i*4U; 2230} 2231static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) 2232{ 2233 return (r >> 0U) & 0xffU; 2234} 2235static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) 2236{ 2237 return 0x00504088U; 2238} 2239static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) 2240{ 2241 return (v & 0xffffU) << 0U; 2242} 2243static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) 2244{ 2245 return 0x00504698U; 2246} 2247static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_f(u32 v) 2248{ 2249 return (v & 0xffffU) << 0U; 2250} 2251static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_v(u32 r) 2252{ 2253 return (r >> 0U) & 0xffffU; 2254} 2255static inline u32 gr_gpc0_tpc0_sm_arch_r(void) 2256{ 2257 return 0x0050469cU; 2258} 2259static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) 2260{ 2261 return (r >> 0U) & 0xffU; 2262} 2263static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) 2264{ 2265 return (r >> 8U) & 0xfffU; 2266} 2267static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) 2268{ 2269 return (r >> 20U) & 0xfffU; 2270} 2271static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) 2272{ 2273 return 0x00503018U; 2274} 2275static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) 2276{ 2277 return 0x1U << 0U; 2278} 2279static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) 2280{ 2281 return 0x1U; 2282} 2283static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) 2284{ 2285 return 0x005030c0U; 2286} 2287static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) 2288{ 2289 return (v & 0xffffU) << 0U; 2290} 2291static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) 2292{ 2293 return 0xffffU << 0U; 2294} 2295static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) 2296{ 2297 return 0x00000400U; 2298} 2299static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) 2300{ 2301 return 0x00000020U; 2302} 2303static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) 2304{ 2305 return 0x005030f4U; 2306} 2307static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) 2308{ 2309 return 0x005030e4U; 2310} 2311static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) 2312{ 2313 return (v & 0xffffU) << 0U; 2314} 2315static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) 2316{ 2317 return 0xffffU << 0U; 2318} 2319static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) 2320{ 2321 return 0x00000800U; 2322} 2323static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) 2324{ 2325 return 0x00000020U; 2326} 2327static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) 2328{ 2329 return 0x005030f8U; 2330} 2331static inline u32 gr_gpcs_tpcs_tex_m_dbg2_r(void) 2332{ 2333 return 0x00419a3cU; 2334} 2335static inline u32 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(u32 v) 2336{ 2337 return (v & 0x1U) << 2U; 2338} 2339static inline u32 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(void) 2340{ 2341 return 0x1U << 2U; 2342} 2343static inline u32 gr_gpcs_tpcs_tex_m_dbg2_su_rd_coalesce_en_f(u32 v) 2344{ 2345 return (v & 0x1U) << 4U; 2346} 2347static inline u32 gr_gpcs_tpcs_tex_m_dbg2_su_rd_coalesce_en_m(void) 2348{ 2349 return 0x1U << 4U; 2350} 2351static inline u32 gr_gpcs_tpcs_tex_m_dbg2_tex_rd_coalesce_en_f(u32 v) 2352{ 2353 return (v & 0x1U) << 5U; 2354} 2355static inline u32 gr_gpcs_tpcs_tex_m_dbg2_tex_rd_coalesce_en_m(void) 2356{ 2357 return 0x1U << 5U; 2358} 2359static inline u32 gr_gpccs_falcon_addr_r(void) 2360{ 2361 return 0x0041a0acU; 2362} 2363static inline u32 gr_gpccs_falcon_addr_lsb_s(void) 2364{ 2365 return 6U; 2366} 2367static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) 2368{ 2369 return (v & 0x3fU) << 0U; 2370} 2371static inline u32 gr_gpccs_falcon_addr_lsb_m(void) 2372{ 2373 return 0x3fU << 0U; 2374} 2375static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) 2376{ 2377 return (r >> 0U) & 0x3fU; 2378} 2379static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) 2380{ 2381 return 0x00000000U; 2382} 2383static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) 2384{ 2385 return 0x0U; 2386} 2387static inline u32 gr_gpccs_falcon_addr_msb_s(void) 2388{ 2389 return 6U; 2390} 2391static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) 2392{ 2393 return (v & 0x3fU) << 6U; 2394} 2395static inline u32 gr_gpccs_falcon_addr_msb_m(void) 2396{ 2397 return 0x3fU << 6U; 2398} 2399static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) 2400{ 2401 return (r >> 6U) & 0x3fU; 2402} 2403static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) 2404{ 2405 return 0x00000000U; 2406} 2407static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) 2408{ 2409 return 0x0U; 2410} 2411static inline u32 gr_gpccs_falcon_addr_ext_s(void) 2412{ 2413 return 12U; 2414} 2415static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) 2416{ 2417 return (v & 0xfffU) << 0U; 2418} 2419static inline u32 gr_gpccs_falcon_addr_ext_m(void) 2420{ 2421 return 0xfffU << 0U; 2422} 2423static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) 2424{ 2425 return (r >> 0U) & 0xfffU; 2426} 2427static inline u32 gr_gpccs_cpuctl_r(void) 2428{ 2429 return 0x0041a100U; 2430} 2431static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) 2432{ 2433 return (v & 0x1U) << 1U; 2434} 2435static inline u32 gr_gpccs_dmactl_r(void) 2436{ 2437 return 0x0041a10cU; 2438} 2439static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) 2440{ 2441 return (v & 0x1U) << 0U; 2442} 2443static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) 2444{ 2445 return 0x1U << 1U; 2446} 2447static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) 2448{ 2449 return 0x1U << 2U; 2450} 2451static inline u32 gr_gpccs_imemc_r(u32 i) 2452{ 2453 return 0x0041a180U + i*16U; 2454} 2455static inline u32 gr_gpccs_imemc_offs_f(u32 v) 2456{ 2457 return (v & 0x3fU) << 2U; 2458} 2459static inline u32 gr_gpccs_imemc_blk_f(u32 v) 2460{ 2461 return (v & 0xffU) << 8U; 2462} 2463static inline u32 gr_gpccs_imemc_aincw_f(u32 v) 2464{ 2465 return (v & 0x1U) << 24U; 2466} 2467static inline u32 gr_gpccs_imemd_r(u32 i) 2468{ 2469 return 0x0041a184U + i*16U; 2470} 2471static inline u32 gr_gpccs_imemt_r(u32 i) 2472{ 2473 return 0x0041a188U + i*16U; 2474} 2475static inline u32 gr_gpccs_imemt__size_1_v(void) 2476{ 2477 return 0x00000004U; 2478} 2479static inline u32 gr_gpccs_imemt_tag_f(u32 v) 2480{ 2481 return (v & 0xffffU) << 0U; 2482} 2483static inline u32 gr_gpccs_dmemc_r(u32 i) 2484{ 2485 return 0x0041a1c0U + i*8U; 2486} 2487static inline u32 gr_gpccs_dmemc_offs_f(u32 v) 2488{ 2489 return (v & 0x3fU) << 2U; 2490} 2491static inline u32 gr_gpccs_dmemc_blk_f(u32 v) 2492{ 2493 return (v & 0xffU) << 8U; 2494} 2495static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) 2496{ 2497 return (v & 0x1U) << 24U; 2498} 2499static inline u32 gr_gpccs_dmemd_r(u32 i) 2500{ 2501 return 0x0041a1c4U + i*8U; 2502} 2503static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) 2504{ 2505 return 0x0041a800U + i*4U; 2506} 2507static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) 2508{ 2509 return (v & 0xffffffffU) << 0U; 2510} 2511static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) 2512{ 2513 return 0x00418e24U; 2514} 2515static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) 2516{ 2517 return 32U; 2518} 2519static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) 2520{ 2521 return (v & 0xffffffffU) << 0U; 2522} 2523static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) 2524{ 2525 return 0xffffffffU << 0U; 2526} 2527static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) 2528{ 2529 return (r >> 0U) & 0xffffffffU; 2530} 2531static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) 2532{ 2533 return 0x00000000U; 2534} 2535static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) 2536{ 2537 return 0x0U; 2538} 2539static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) 2540{ 2541 return 0x00418e28U; 2542} 2543static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) 2544{ 2545 return 11U; 2546} 2547static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) 2548{ 2549 return (v & 0x7ffU) << 0U; 2550} 2551static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) 2552{ 2553 return 0x7ffU << 0U; 2554} 2555static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) 2556{ 2557 return (r >> 0U) & 0x7ffU; 2558} 2559static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) 2560{ 2561 return 0x00000018U; 2562} 2563static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) 2564{ 2565 return 0x18U; 2566} 2567static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) 2568{ 2569 return 1U; 2570} 2571static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) 2572{ 2573 return (v & 0x1U) << 31U; 2574} 2575static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) 2576{ 2577 return 0x1U << 31U; 2578} 2579static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) 2580{ 2581 return (r >> 31U) & 0x1U; 2582} 2583static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) 2584{ 2585 return 0x00000000U; 2586} 2587static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) 2588{ 2589 return 0x0U; 2590} 2591static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) 2592{ 2593 return 0x00000001U; 2594} 2595static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) 2596{ 2597 return 0x80000000U; 2598} 2599static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) 2600{ 2601 return 0x00418ea0U + i*4U; 2602} 2603static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) 2604{ 2605 return (v & 0xffffU) << 0U; 2606} 2607static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) 2608{ 2609 return 0xffffU << 0U; 2610} 2611static inline u32 gr_gpcs_swdx_tc_beta_cb_size_div3_f(u32 v) 2612{ 2613 return (v & 0xffffU) << 16U; 2614} 2615static inline u32 gr_gpcs_swdx_tc_beta_cb_size_div3_m(void) 2616{ 2617 return 0xffffU << 16U; 2618} 2619static inline u32 gr_gpcs_swdx_rm_pagepool_r(void) 2620{ 2621 return 0x00418e30U; 2622} 2623static inline u32 gr_gpcs_swdx_rm_pagepool_total_pages_f(u32 v) 2624{ 2625 return (v & 0xffU) << 0U; 2626} 2627static inline u32 gr_gpcs_swdx_rm_pagepool_valid_true_f(void) 2628{ 2629 return 0x80000000U; 2630} 2631static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) 2632{ 2633 return 0x00418810U; 2634} 2635static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) 2636{ 2637 return (v & 0xfffffffU) << 0U; 2638} 2639static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) 2640{ 2641 return 0x0000000cU; 2642} 2643static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) 2644{ 2645 return 0x80000000U; 2646} 2647static inline u32 gr_crstr_gpc_map0_r(void) 2648{ 2649 return 0x00418b08U; 2650} 2651static inline u32 gr_crstr_gpc_map0_tile0_f(u32 v) 2652{ 2653 return (v & 0x7U) << 0U; 2654} 2655static inline u32 gr_crstr_gpc_map0_tile1_f(u32 v) 2656{ 2657 return (v & 0x7U) << 5U; 2658} 2659static inline u32 gr_crstr_gpc_map0_tile2_f(u32 v) 2660{ 2661 return (v & 0x7U) << 10U; 2662} 2663static inline u32 gr_crstr_gpc_map0_tile3_f(u32 v) 2664{ 2665 return (v & 0x7U) << 15U; 2666} 2667static inline u32 gr_crstr_gpc_map0_tile4_f(u32 v) 2668{ 2669 return (v & 0x7U) << 20U; 2670} 2671static inline u32 gr_crstr_gpc_map0_tile5_f(u32 v) 2672{ 2673 return (v & 0x7U) << 25U; 2674} 2675static inline u32 gr_crstr_gpc_map1_r(void) 2676{ 2677 return 0x00418b0cU; 2678} 2679static inline u32 gr_crstr_gpc_map1_tile6_f(u32 v) 2680{ 2681 return (v & 0x7U) << 0U; 2682} 2683static inline u32 gr_crstr_gpc_map1_tile7_f(u32 v) 2684{ 2685 return (v & 0x7U) << 5U; 2686} 2687static inline u32 gr_crstr_gpc_map1_tile8_f(u32 v) 2688{ 2689 return (v & 0x7U) << 10U; 2690} 2691static inline u32 gr_crstr_gpc_map1_tile9_f(u32 v) 2692{ 2693 return (v & 0x7U) << 15U; 2694} 2695static inline u32 gr_crstr_gpc_map1_tile10_f(u32 v) 2696{ 2697 return (v & 0x7U) << 20U; 2698} 2699static inline u32 gr_crstr_gpc_map1_tile11_f(u32 v) 2700{ 2701 return (v & 0x7U) << 25U; 2702} 2703static inline u32 gr_crstr_gpc_map2_r(void) 2704{ 2705 return 0x00418b10U; 2706} 2707static inline u32 gr_crstr_gpc_map2_tile12_f(u32 v) 2708{ 2709 return (v & 0x7U) << 0U; 2710} 2711static inline u32 gr_crstr_gpc_map2_tile13_f(u32 v) 2712{ 2713 return (v & 0x7U) << 5U; 2714} 2715static inline u32 gr_crstr_gpc_map2_tile14_f(u32 v) 2716{ 2717 return (v & 0x7U) << 10U; 2718} 2719static inline u32 gr_crstr_gpc_map2_tile15_f(u32 v) 2720{ 2721 return (v & 0x7U) << 15U; 2722} 2723static inline u32 gr_crstr_gpc_map2_tile16_f(u32 v) 2724{ 2725 return (v & 0x7U) << 20U; 2726} 2727static inline u32 gr_crstr_gpc_map2_tile17_f(u32 v) 2728{ 2729 return (v & 0x7U) << 25U; 2730} 2731static inline u32 gr_crstr_gpc_map3_r(void) 2732{ 2733 return 0x00418b14U; 2734} 2735static inline u32 gr_crstr_gpc_map3_tile18_f(u32 v) 2736{ 2737 return (v & 0x7U) << 0U; 2738} 2739static inline u32 gr_crstr_gpc_map3_tile19_f(u32 v) 2740{ 2741 return (v & 0x7U) << 5U; 2742} 2743static inline u32 gr_crstr_gpc_map3_tile20_f(u32 v) 2744{ 2745 return (v & 0x7U) << 10U; 2746} 2747static inline u32 gr_crstr_gpc_map3_tile21_f(u32 v) 2748{ 2749 return (v & 0x7U) << 15U; 2750} 2751static inline u32 gr_crstr_gpc_map3_tile22_f(u32 v) 2752{ 2753 return (v & 0x7U) << 20U; 2754} 2755static inline u32 gr_crstr_gpc_map3_tile23_f(u32 v) 2756{ 2757 return (v & 0x7U) << 25U; 2758} 2759static inline u32 gr_crstr_gpc_map4_r(void) 2760{ 2761 return 0x00418b18U; 2762} 2763static inline u32 gr_crstr_gpc_map4_tile24_f(u32 v) 2764{ 2765 return (v & 0x7U) << 0U; 2766} 2767static inline u32 gr_crstr_gpc_map4_tile25_f(u32 v) 2768{ 2769 return (v & 0x7U) << 5U; 2770} 2771static inline u32 gr_crstr_gpc_map4_tile26_f(u32 v) 2772{ 2773 return (v & 0x7U) << 10U; 2774} 2775static inline u32 gr_crstr_gpc_map4_tile27_f(u32 v) 2776{ 2777 return (v & 0x7U) << 15U; 2778} 2779static inline u32 gr_crstr_gpc_map4_tile28_f(u32 v) 2780{ 2781 return (v & 0x7U) << 20U; 2782} 2783static inline u32 gr_crstr_gpc_map4_tile29_f(u32 v) 2784{ 2785 return (v & 0x7U) << 25U; 2786} 2787static inline u32 gr_crstr_gpc_map5_r(void) 2788{ 2789 return 0x00418b1cU; 2790} 2791static inline u32 gr_crstr_gpc_map5_tile30_f(u32 v) 2792{ 2793 return (v & 0x7U) << 0U; 2794} 2795static inline u32 gr_crstr_gpc_map5_tile31_f(u32 v) 2796{ 2797 return (v & 0x7U) << 5U; 2798} 2799static inline u32 gr_crstr_gpc_map5_tile32_f(u32 v) 2800{ 2801 return (v & 0x7U) << 10U; 2802} 2803static inline u32 gr_crstr_gpc_map5_tile33_f(u32 v) 2804{ 2805 return (v & 0x7U) << 15U; 2806} 2807static inline u32 gr_crstr_gpc_map5_tile34_f(u32 v) 2808{ 2809 return (v & 0x7U) << 20U; 2810} 2811static inline u32 gr_crstr_gpc_map5_tile35_f(u32 v) 2812{ 2813 return (v & 0x7U) << 25U; 2814} 2815static inline u32 gr_crstr_map_table_cfg_r(void) 2816{ 2817 return 0x00418bb8U; 2818} 2819static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) 2820{ 2821 return (v & 0xffU) << 0U; 2822} 2823static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) 2824{ 2825 return (v & 0xffU) << 8U; 2826} 2827static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_r(void) 2828{ 2829 return 0x00418980U; 2830} 2831static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f(u32 v) 2832{ 2833 return (v & 0x7U) << 0U; 2834} 2835static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_1_f(u32 v) 2836{ 2837 return (v & 0x7U) << 4U; 2838} 2839static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_2_f(u32 v) 2840{ 2841 return (v & 0x7U) << 8U; 2842} 2843static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_3_f(u32 v) 2844{ 2845 return (v & 0x7U) << 12U; 2846} 2847static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_4_f(u32 v) 2848{ 2849 return (v & 0x7U) << 16U; 2850} 2851static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_5_f(u32 v) 2852{ 2853 return (v & 0x7U) << 20U; 2854} 2855static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_6_f(u32 v) 2856{ 2857 return (v & 0x7U) << 24U; 2858} 2859static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_7_f(u32 v) 2860{ 2861 return (v & 0x7U) << 28U; 2862} 2863static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_r(void) 2864{ 2865 return 0x00418984U; 2866} 2867static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f(u32 v) 2868{ 2869 return (v & 0x7U) << 0U; 2870} 2871static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_9_f(u32 v) 2872{ 2873 return (v & 0x7U) << 4U; 2874} 2875static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_10_f(u32 v) 2876{ 2877 return (v & 0x7U) << 8U; 2878} 2879static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_11_f(u32 v) 2880{ 2881 return (v & 0x7U) << 12U; 2882} 2883static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_12_f(u32 v) 2884{ 2885 return (v & 0x7U) << 16U; 2886} 2887static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_13_f(u32 v) 2888{ 2889 return (v & 0x7U) << 20U; 2890} 2891static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_14_f(u32 v) 2892{ 2893 return (v & 0x7U) << 24U; 2894} 2895static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_15_f(u32 v) 2896{ 2897 return (v & 0x7U) << 28U; 2898} 2899static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_r(void) 2900{ 2901 return 0x00418988U; 2902} 2903static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f(u32 v) 2904{ 2905 return (v & 0x7U) << 0U; 2906} 2907static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_17_f(u32 v) 2908{ 2909 return (v & 0x7U) << 4U; 2910} 2911static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_18_f(u32 v) 2912{ 2913 return (v & 0x7U) << 8U; 2914} 2915static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_19_f(u32 v) 2916{ 2917 return (v & 0x7U) << 12U; 2918} 2919static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_20_f(u32 v) 2920{ 2921 return (v & 0x7U) << 16U; 2922} 2923static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_21_f(u32 v) 2924{ 2925 return (v & 0x7U) << 20U; 2926} 2927static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_22_f(u32 v) 2928{ 2929 return (v & 0x7U) << 24U; 2930} 2931static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_s(void) 2932{ 2933 return 3U; 2934} 2935static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_f(u32 v) 2936{ 2937 return (v & 0x7U) << 28U; 2938} 2939static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_m(void) 2940{ 2941 return 0x7U << 28U; 2942} 2943static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_v(u32 r) 2944{ 2945 return (r >> 28U) & 0x7U; 2946} 2947static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_r(void) 2948{ 2949 return 0x0041898cU; 2950} 2951static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f(u32 v) 2952{ 2953 return (v & 0x7U) << 0U; 2954} 2955static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_25_f(u32 v) 2956{ 2957 return (v & 0x7U) << 4U; 2958} 2959static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_26_f(u32 v) 2960{ 2961 return (v & 0x7U) << 8U; 2962} 2963static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_27_f(u32 v) 2964{ 2965 return (v & 0x7U) << 12U; 2966} 2967static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_28_f(u32 v) 2968{ 2969 return (v & 0x7U) << 16U; 2970} 2971static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_29_f(u32 v) 2972{ 2973 return (v & 0x7U) << 20U; 2974} 2975static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_30_f(u32 v) 2976{ 2977 return (v & 0x7U) << 24U; 2978} 2979static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_31_f(u32 v) 2980{ 2981 return (v & 0x7U) << 28U; 2982} 2983static inline u32 gr_gpcs_gpm_pd_cfg_r(void) 2984{ 2985 return 0x00418c6cU; 2986} 2987static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_disable_f(void) 2988{ 2989 return 0x0U; 2990} 2991static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_enable_f(void) 2992{ 2993 return 0x1U; 2994} 2995static inline u32 gr_gpcs_gcc_pagepool_base_r(void) 2996{ 2997 return 0x00419004U; 2998} 2999static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) 3000{ 3001 return (v & 0xffffffffU) << 0U; 3002} 3003static inline u32 gr_gpcs_gcc_pagepool_r(void) 3004{ 3005 return 0x00419008U; 3006} 3007static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) 3008{ 3009 return (v & 0xffU) << 0U; 3010} 3011static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) 3012{ 3013 return 0x0041980cU; 3014} 3015static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) 3016{ 3017 return 0x10U; 3018} 3019static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) 3020{ 3021 return 0x00419848U; 3022} 3023static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) 3024{ 3025 return (v & 0xfffffffU) << 0U; 3026} 3027static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) 3028{ 3029 return (v & 0x1U) << 28U; 3030} 3031static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) 3032{ 3033 return 0x10000000U; 3034} 3035static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) 3036{ 3037 return 0x00419c00U; 3038} 3039static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) 3040{ 3041 return 0x0U; 3042} 3043static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) 3044{ 3045 return 0x8U; 3046} 3047static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) 3048{ 3049 return 0x00419c2cU; 3050} 3051static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) 3052{ 3053 return (v & 0xfffffffU) << 0U; 3054} 3055static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) 3056{ 3057 return (v & 0x1U) << 28U; 3058} 3059static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) 3060{ 3061 return 0x10000000U; 3062} 3063static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(void) 3064{ 3065 return 0x00419e44U; 3066} 3067static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_error_report_f(void) 3068{ 3069 return 0x2U; 3070} 3071static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_api_stack_error_report_f(void) 3072{ 3073 return 0x4U; 3074} 3075static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_ret_empty_stack_error_report_f(void) 3076{ 3077 return 0x8U; 3078} 3079static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_wrap_report_f(void) 3080{ 3081 return 0x10U; 3082} 3083static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_pc_report_f(void) 3084{ 3085 return 0x20U; 3086} 3087static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_overflow_report_f(void) 3088{ 3089 return 0x40U; 3090} 3091static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_immc_addr_report_f(void) 3092{ 3093 return 0x80U; 3094} 3095static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_reg_report_f(void) 3096{ 3097 return 0x100U; 3098} 3099static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) 3100{ 3101 return 0x200U; 3102} 3103static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_sph_instr_combo_report_f(void) 3104{ 3105 return 0x400U; 3106} 3107static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) 3108{ 3109 return 0x800U; 3110} 3111static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_report_f(void) 3112{ 3113 return 0x1000U; 3114} 3115static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_reg_report_f(void) 3116{ 3117 return 0x2000U; 3118} 3119static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_addr_report_f(void) 3120{ 3121 return 0x4000U; 3122} 3123static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_addr_report_f(void) 3124{ 3125 return 0x8000U; 3126} 3127static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) 3128{ 3129 return 0x10000U; 3130} 3131static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param2_report_f(void) 3132{ 3133 return 0x20000U; 3134} 3135static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) 3136{ 3137 return 0x40000U; 3138} 3139static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_mmu_fault_report_f(void) 3140{ 3141 return 0x800000U; 3142} 3143static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_overflow_report_f(void) 3144{ 3145 return 0x400000U; 3146} 3147static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_geometry_sm_error_report_f(void) 3148{ 3149 return 0x80000U; 3150} 3151static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_divergent_report_f(void) 3152{ 3153 return 0x100000U; 3154} 3155static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_report_mask_r(void) 3156{ 3157 return 0x00504644U; 3158} 3159static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r(void) 3160{ 3161 return 0x00419e4cU; 3162} 3163static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_sm_to_sm_fault_report_f(void) 3164{ 3165 return 0x1U; 3166} 3167static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_l1_error_report_f(void) 3168{ 3169 return 0x2U; 3170} 3171static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) 3172{ 3173 return 0x4U; 3174} 3175static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_physical_stack_overflow_error_report_f(void) 3176{ 3177 return 0x8U; 3178} 3179static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_int_report_f(void) 3180{ 3181 return 0x10U; 3182} 3183static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_pause_report_f(void) 3184{ 3185 return 0x20U; 3186} 3187static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_single_step_complete_report_f(void) 3188{ 3189 return 0x40U; 3190} 3191static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_report_mask_r(void) 3192{ 3193 return 0x0050464cU; 3194} 3195static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) 3196{ 3197 return 0x00419d0cU; 3198} 3199static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) 3200{ 3201 return 0x2U; 3202} 3203static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) 3204{ 3205 return 0x1U; 3206} 3207static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 3208{ 3209 return 0x0050450cU; 3210} 3211static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) 3212{ 3213 return 0x2U; 3214} 3215static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) 3216{ 3217 return (r >> 1U) & 0x1U; 3218} 3219static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) 3220{ 3221 return 0x0041ac94U; 3222} 3223static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) 3224{ 3225 return (v & 0xffU) << 16U; 3226} 3227static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) 3228{ 3229 return 0x00502c90U; 3230} 3231static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) 3232{ 3233 return (r >> 2U) & 0x1U; 3234} 3235static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) 3236{ 3237 return (r >> 16U) & 0xffU; 3238} 3239static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) 3240{ 3241 return 0x00000001U; 3242} 3243static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) 3244{ 3245 return 0x00504508U; 3246} 3247static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) 3248{ 3249 return (r >> 0U) & 0x1U; 3250} 3251static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) 3252{ 3253 return 0x00000001U; 3254} 3255static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) 3256{ 3257 return (r >> 1U) & 0x1U; 3258} 3259static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) 3260{ 3261 return 0x00000001U; 3262} 3263static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_r(void) 3264{ 3265 return 0x00504610U; 3266} 3267static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_m(void) 3268{ 3269 return 0x1U << 0U; 3270} 3271static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(u32 r) 3272{ 3273 return (r >> 0U) & 0x1U; 3274} 3275static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v(void) 3276{ 3277 return 0x00000001U; 3278} 3279static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_f(void) 3280{ 3281 return 0x1U; 3282} 3283static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_v(void) 3284{ 3285 return 0x00000000U; 3286} 3287static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_f(void) 3288{ 3289 return 0x0U; 3290} 3291static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(void) 3292{ 3293 return 0x80000000U; 3294} 3295static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_disable_f(void) 3296{ 3297 return 0x0U; 3298} 3299static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_single_step_mode_enable_f(void) 3300{ 3301 return 0x8U; 3302} 3303static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_single_step_mode_disable_f(void) 3304{ 3305 return 0x0U; 3306} 3307static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_run_trigger_task_f(void) 3308{ 3309 return 0x40000000U; 3310} 3311static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_m(void) 3312{ 3313 return 0x1U << 1U; 3314} 3315static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_v(u32 r) 3316{ 3317 return (r >> 1U) & 0x1U; 3318} 3319static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_disable_f(void) 3320{ 3321 return 0x0U; 3322} 3323static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_m(void) 3324{ 3325 return 0x1U << 2U; 3326} 3327static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_v(u32 r) 3328{ 3329 return (r >> 2U) & 0x1U; 3330} 3331static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_disable_f(void) 3332{ 3333 return 0x0U; 3334} 3335static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_stop_on_any_warp_disable_v(void) 3336{ 3337 return 0x00000000U; 3338} 3339static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_stop_on_any_sm_disable_v(void) 3340{ 3341 return 0x00000000U; 3342} 3343static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_r(void) 3344{ 3345 return 0x00504614U; 3346} 3347static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_1_r(void) 3348{ 3349 return 0x00504618U; 3350} 3351static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_2_r(void) 3352{ 3353 return 0x0050461cU; 3354} 3355static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r(void) 3356{ 3357 return 0x00504624U; 3358} 3359static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_1_r(void) 3360{ 3361 return 0x00504628U; 3362} 3363static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_2_r(void) 3364{ 3365 return 0x00504750U; 3366} 3367static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r(void) 3368{ 3369 return 0x00504634U; 3370} 3371static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r(void) 3372{ 3373 return 0x00504638U; 3374} 3375static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_2_r(void) 3376{ 3377 return 0x00504758U; 3378} 3379static inline u32 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_r(void) 3380{ 3381 return 0x00419e24U; 3382} 3383static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_r(void) 3384{ 3385 return 0x0050460cU; 3386} 3387static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_sm_in_trap_mode_v(u32 r) 3388{ 3389 return (r >> 0U) & 0x1U; 3390} 3391static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(u32 r) 3392{ 3393 return (r >> 4U) & 0x1U; 3394} 3395static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v(void) 3396{ 3397 return 0x00000001U; 3398} 3399static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_r(void) 3400{ 3401 return 0x00419e50U; 3402} 3403static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_int_pending_f(void) 3404{ 3405 return 0x10U; 3406} 3407static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_pause_pending_f(void) 3408{ 3409 return 0x20U; 3410} 3411static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_single_step_complete_pending_f(void) 3412{ 3413 return 0x40U; 3414} 3415static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_sm_to_sm_fault_pending_f(void) 3416{ 3417 return 0x1U; 3418} 3419static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_l1_error_pending_f(void) 3420{ 3421 return 0x2U; 3422} 3423static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_multiple_warp_errors_pending_f(void) 3424{ 3425 return 0x4U; 3426} 3427static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_physical_stack_overflow_error_pending_f(void) 3428{ 3429 return 0x8U; 3430} 3431static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_timeout_error_pending_f(void) 3432{ 3433 return 0x80000000U; 3434} 3435static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_r(void) 3436{ 3437 return 0x00504650U; 3438} 3439static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f(void) 3440{ 3441 return 0x10U; 3442} 3443static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f(void) 3444{ 3445 return 0x20U; 3446} 3447static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(void) 3448{ 3449 return 0x40U; 3450} 3451static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_sm_to_sm_fault_pending_f(void) 3452{ 3453 return 0x1U; 3454} 3455static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_l1_error_pending_f(void) 3456{ 3457 return 0x2U; 3458} 3459static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_multiple_warp_errors_pending_f(void) 3460{ 3461 return 0x4U; 3462} 3463static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_physical_stack_overflow_error_pending_f(void) 3464{ 3465 return 0x8U; 3466} 3467static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_timeout_error_pending_f(void) 3468{ 3469 return 0x80000000U; 3470} 3471static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_r(void) 3472{ 3473 return 0x00504224U; 3474} 3475static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_intr_pending_f(void) 3476{ 3477 return 0x1U; 3478} 3479static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_r(void) 3480{ 3481 return 0x00504648U; 3482} 3483static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_v(u32 r) 3484{ 3485 return (r >> 0U) & 0xffffU; 3486} 3487static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v(void) 3488{ 3489 return 0x00000000U; 3490} 3491static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f(void) 3492{ 3493 return 0x0U; 3494} 3495static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_pc_r(void) 3496{ 3497 return 0x00504654U; 3498} 3499static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) 3500{ 3501 return 0x00504770U; 3502} 3503static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) 3504{ 3505 return 0x00419f70U; 3506} 3507static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) 3508{ 3509 return 0x1U << 4U; 3510} 3511static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) 3512{ 3513 return (v & 0x1U) << 4U; 3514} 3515static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) 3516{ 3517 return 0x0050477cU; 3518} 3519static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) 3520{ 3521 return 0x00419f7cU; 3522} 3523static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) 3524{ 3525 return 0x1U << 0U; 3526} 3527static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) 3528{ 3529 return (v & 0x1U) << 0U; 3530} 3531static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) 3532{ 3533 return 0x0041be08U; 3534} 3535static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) 3536{ 3537 return 0x4U; 3538} 3539static inline u32 gr_ppcs_wwdx_map_gpc_map0_r(void) 3540{ 3541 return 0x0041bf00U; 3542} 3543static inline u32 gr_ppcs_wwdx_map_gpc_map1_r(void) 3544{ 3545 return 0x0041bf04U; 3546} 3547static inline u32 gr_ppcs_wwdx_map_gpc_map2_r(void) 3548{ 3549 return 0x0041bf08U; 3550} 3551static inline u32 gr_ppcs_wwdx_map_gpc_map3_r(void) 3552{ 3553 return 0x0041bf0cU; 3554} 3555static inline u32 gr_ppcs_wwdx_map_gpc_map4_r(void) 3556{ 3557 return 0x0041bf10U; 3558} 3559static inline u32 gr_ppcs_wwdx_map_gpc_map5_r(void) 3560{ 3561 return 0x0041bf14U; 3562} 3563static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) 3564{ 3565 return 0x0041bfd0U; 3566} 3567static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) 3568{ 3569 return (v & 0xffU) << 0U; 3570} 3571static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) 3572{ 3573 return (v & 0xffU) << 8U; 3574} 3575static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) 3576{ 3577 return (v & 0x1fU) << 16U; 3578} 3579static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) 3580{ 3581 return (v & 0x7U) << 21U; 3582} 3583static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff5_mod_value_f(u32 v) 3584{ 3585 return (v & 0x1fU) << 24U; 3586} 3587static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) 3588{ 3589 return 0x0041bfd4U; 3590} 3591static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) 3592{ 3593 return (v & 0xffffffU) << 0U; 3594} 3595static inline u32 gr_ppcs_wwdx_map_table_cfg2_r(void) 3596{ 3597 return 0x0041bfe4U; 3598} 3599static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff6_mod_value_f(u32 v) 3600{ 3601 return (v & 0x1fU) << 0U; 3602} 3603static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff7_mod_value_f(u32 v) 3604{ 3605 return (v & 0x1fU) << 5U; 3606} 3607static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff8_mod_value_f(u32 v) 3608{ 3609 return (v & 0x1fU) << 10U; 3610} 3611static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff9_mod_value_f(u32 v) 3612{ 3613 return (v & 0x1fU) << 15U; 3614} 3615static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff10_mod_value_f(u32 v) 3616{ 3617 return (v & 0x1fU) << 20U; 3618} 3619static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff11_mod_value_f(u32 v) 3620{ 3621 return (v & 0x1fU) << 25U; 3622} 3623static inline u32 gr_bes_zrop_settings_r(void) 3624{ 3625 return 0x00408850U; 3626} 3627static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) 3628{ 3629 return (v & 0xfU) << 0U; 3630} 3631static inline u32 gr_be0_crop_debug3_r(void) 3632{ 3633 return 0x00410108U; 3634} 3635static inline u32 gr_bes_crop_debug3_r(void) 3636{ 3637 return 0x00408908U; 3638} 3639static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) 3640{ 3641 return 0x1U << 31U; 3642} 3643static inline u32 gr_bes_crop_settings_r(void) 3644{ 3645 return 0x00408958U; 3646} 3647static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) 3648{ 3649 return (v & 0xfU) << 0U; 3650} 3651static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) 3652{ 3653 return 0x00000020U; 3654} 3655static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) 3656{ 3657 return 0x00000020U; 3658} 3659static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) 3660{ 3661 return 0x000000c0U; 3662} 3663static inline u32 gr_zcull_subregion_qty_v(void) 3664{ 3665 return 0x00000010U; 3666} 3667static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel0_r(void) 3668{ 3669 return 0x00504604U; 3670} 3671static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel1_r(void) 3672{ 3673 return 0x00504608U; 3674} 3675static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r(void) 3676{ 3677 return 0x0050465cU; 3678} 3679static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control1_r(void) 3680{ 3681 return 0x00504660U; 3682} 3683static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control2_r(void) 3684{ 3685 return 0x00504664U; 3686} 3687static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control3_r(void) 3688{ 3689 return 0x00504668U; 3690} 3691static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control4_r(void) 3692{ 3693 return 0x0050466cU; 3694} 3695static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control5_r(void) 3696{ 3697 return 0x00504658U; 3698} 3699static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_control_r(void) 3700{ 3701 return 0x00504730U; 3702} 3703static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_control_r(void) 3704{ 3705 return 0x00504734U; 3706} 3707static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_control_r(void) 3708{ 3709 return 0x00504738U; 3710} 3711static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_control_r(void) 3712{ 3713 return 0x0050473cU; 3714} 3715static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_control_r(void) 3716{ 3717 return 0x00504740U; 3718} 3719static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_control_r(void) 3720{ 3721 return 0x00504744U; 3722} 3723static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_control_r(void) 3724{ 3725 return 0x00504748U; 3726} 3727static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_control_r(void) 3728{ 3729 return 0x0050474cU; 3730} 3731static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status_s1_r(void) 3732{ 3733 return 0x00504678U; 3734} 3735static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status1_r(void) 3736{ 3737 return 0x00504694U; 3738} 3739static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_s0_r(void) 3740{ 3741 return 0x005046f0U; 3742} 3743static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_s1_r(void) 3744{ 3745 return 0x00504700U; 3746} 3747static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_s0_r(void) 3748{ 3749 return 0x005046f4U; 3750} 3751static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_s1_r(void) 3752{ 3753 return 0x00504704U; 3754} 3755static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_s0_r(void) 3756{ 3757 return 0x005046f8U; 3758} 3759static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_s1_r(void) 3760{ 3761 return 0x00504708U; 3762} 3763static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_s0_r(void) 3764{ 3765 return 0x005046fcU; 3766} 3767static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_s1_r(void) 3768{ 3769 return 0x0050470cU; 3770} 3771static inline u32 gr_fe_pwr_mode_r(void) 3772{ 3773 return 0x00404170U; 3774} 3775static inline u32 gr_fe_pwr_mode_mode_auto_f(void) 3776{ 3777 return 0x0U; 3778} 3779static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) 3780{ 3781 return 0x2U; 3782} 3783static inline u32 gr_fe_pwr_mode_req_v(u32 r) 3784{ 3785 return (r >> 4U) & 0x1U; 3786} 3787static inline u32 gr_fe_pwr_mode_req_send_f(void) 3788{ 3789 return 0x10U; 3790} 3791static inline u32 gr_fe_pwr_mode_req_done_v(void) 3792{ 3793 return 0x00000000U; 3794} 3795static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) 3796{ 3797 return 0x00418880U; 3798} 3799static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) 3800{ 3801 return 0x1U << 0U; 3802} 3803static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) 3804{ 3805 return 0x1U << 11U; 3806} 3807static inline u32 gr_gpcs_pri_mmu_ctrl_use_full_comp_tag_line_m(void) 3808{ 3809 return 0x1U << 12U; 3810} 3811static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) 3812{ 3813 return 0x1U << 1U; 3814} 3815static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) 3816{ 3817 return 0x1U << 2U; 3818} 3819static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) 3820{ 3821 return 0x3U << 3U; 3822} 3823static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) 3824{ 3825 return 0x3U << 5U; 3826} 3827static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) 3828{ 3829 return 0x3U << 28U; 3830} 3831static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) 3832{ 3833 return 0x1U << 30U; 3834} 3835static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) 3836{ 3837 return 0x1U << 31U; 3838} 3839static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) 3840{ 3841 return 0x00418890U; 3842} 3843static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) 3844{ 3845 return 0x00418894U; 3846} 3847static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) 3848{ 3849 return 0x004188b0U; 3850} 3851static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_m(void) 3852{ 3853 return 0x1U << 16U; 3854} 3855static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) 3856{ 3857 return (r >> 16U) & 0x1U; 3858} 3859static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) 3860{ 3861 return 0x00000001U; 3862} 3863static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_f(void) 3864{ 3865 return 0x10000U; 3866} 3867static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_disabled_v(void) 3868{ 3869 return 0x00000000U; 3870} 3871static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_disabled_f(void) 3872{ 3873 return 0x0U; 3874} 3875static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) 3876{ 3877 return 0x004188b4U; 3878} 3879static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) 3880{ 3881 return 0x004188b8U; 3882} 3883static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) 3884{ 3885 return 0x004188acU; 3886} 3887static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_r(void) 3888{ 3889 return 0x00419e10U; 3890} 3891static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_f(u32 v) 3892{ 3893 return (v & 0x1U) << 0U; 3894} 3895static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_on_v(void) 3896{ 3897 return 0x00000001U; 3898} 3899static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_m(void) 3900{ 3901 return 0x1U << 31U; 3902} 3903static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_v(u32 r) 3904{ 3905 return (r >> 31U) & 0x1U; 3906} 3907static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(void) 3908{ 3909 return 0x80000000U; 3910} 3911static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_disable_f(void) 3912{ 3913 return 0x0U; 3914} 3915static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_m(void) 3916{ 3917 return 0x1U << 3U; 3918} 3919static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f(void) 3920{ 3921 return 0x8U; 3922} 3923static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_disable_f(void) 3924{ 3925 return 0x0U; 3926} 3927static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_m(void) 3928{ 3929 return 0x1U << 30U; 3930} 3931static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_v(u32 r) 3932{ 3933 return (r >> 30U) & 0x1U; 3934} 3935static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_task_f(void) 3936{ 3937 return 0x40000000U; 3938} 3939#endif
diff --git a/include/nvgpu/hw/gm20b/hw_ltc_gm20b.h b/include/nvgpu/hw/gm20b/hw_ltc_gm20b.h
deleted file mode 100644
index 2c3ebb4..0000000
--- a/include/nvgpu/hw/gm20b/hw_ltc_gm20b.h
+++ /dev/null
@@ -1,527 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ltc_gm20b_h_ 57#define _hw_ltc_gm20b_h_ 58 59static inline u32 ltc_pltcg_base_v(void) 60{ 61 return 0x00140000U; 62} 63static inline u32 ltc_pltcg_extent_v(void) 64{ 65 return 0x0017ffffU; 66} 67static inline u32 ltc_ltc0_ltss_v(void) 68{ 69 return 0x00140200U; 70} 71static inline u32 ltc_ltc0_lts0_v(void) 72{ 73 return 0x00140400U; 74} 75static inline u32 ltc_ltcs_ltss_v(void) 76{ 77 return 0x0017e200U; 78} 79static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) 80{ 81 return 0x0014046cU; 82} 83static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) 84{ 85 return 0x00140518U; 86} 87static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) 88{ 89 return 0x0017e318U; 90} 91static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) 92{ 93 return 0x1U << 15U; 94} 95static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) 96{ 97 return 0x00140494U; 98} 99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) 100{ 101 return (r >> 0U) & 0xffffU; 102} 103static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) 104{ 105 return (r >> 16U) & 0x3U; 106} 107static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) 108{ 109 return 0x00000000U; 110} 111static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) 120{ 121 return 0x0017e26cU; 122} 123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) 124{ 125 return 0x1U; 126} 127static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) 128{ 129 return 0x2U; 130} 131static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) 132{ 133 return (r >> 2U) & 0x1U; 134} 135static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) 140{ 141 return 0x4U; 142} 143static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) 144{ 145 return 0x0014046cU; 146} 147static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) 148{ 149 return 0x0017e270U; 150} 151static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) 152{ 153 return (v & 0x1ffffU) << 0U; 154} 155static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) 156{ 157 return 0x0017e274U; 158} 159static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) 160{ 161 return (v & 0x1ffffU) << 0U; 162} 163static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) 164{ 165 return 0x0001ffffU; 166} 167static inline u32 ltc_ltcs_ltss_cbc_base_r(void) 168{ 169 return 0x0017e278U; 170} 171static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) 172{ 173 return 0x0000000bU; 174} 175static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) 176{ 177 return (r >> 0U) & 0x3ffffffU; 178} 179static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) 180{ 181 return 0x0017e27cU; 182} 183static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) 184{ 185 return 0x0017e000U; 186} 187static inline u32 ltc_ltcs_ltss_cbc_param_r(void) 188{ 189 return 0x0017e280U; 190} 191static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) 192{ 193 return (r >> 0U) & 0xffffU; 194} 195static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) 196{ 197 return (r >> 24U) & 0xfU; 198} 199static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) 200{ 201 return (r >> 28U) & 0xfU; 202} 203static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) 204{ 205 return 0x0017e2acU; 206} 207static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) 208{ 209 return (v & 0x1fU) << 16U; 210} 211static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) 212{ 213 return 0x0017e338U; 214} 215static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) 216{ 217 return (v & 0xfU) << 0U; 218} 219static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) 220{ 221 return 0x0017e33cU + i*4U; 222} 223static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) 224{ 225 return 0x00000004U; 226} 227static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) 228{ 229 return 0x0017e34cU; 230} 231static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) 232{ 233 return 32U; 234} 235static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) 236{ 237 return (v & 0xffffffffU) << 0U; 238} 239static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) 240{ 241 return 0xffffffffU << 0U; 242} 243static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) 244{ 245 return (r >> 0U) & 0xffffffffU; 246} 247static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) 248{ 249 return 0x0017e2b0U; 250} 251static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) 252{ 253 return 0x10000000U; 254} 255static inline u32 ltc_ltcs_ltss_g_elpg_r(void) 256{ 257 return 0x0017e214U; 258} 259static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) 260{ 261 return (r >> 0U) & 0x1U; 262} 263static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) 264{ 265 return 0x00000001U; 266} 267static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 ltc_ltc0_ltss_g_elpg_r(void) 272{ 273 return 0x00140214U; 274} 275static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) 280{ 281 return 0x00000001U; 282} 283static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) 284{ 285 return 0x1U; 286} 287static inline u32 ltc_ltc1_ltss_g_elpg_r(void) 288{ 289 return 0x00142214U; 290} 291static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) 292{ 293 return (r >> 0U) & 0x1U; 294} 295static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) 296{ 297 return 0x00000001U; 298} 299static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) 300{ 301 return 0x1U; 302} 303static inline u32 ltc_ltcs_ltss_intr_r(void) 304{ 305 return 0x0017e20cU; 306} 307static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) 308{ 309 return 0x1U << 20U; 310} 311static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) 312{ 313 return 0x1U << 30U; 314} 315static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_m(void) 316{ 317 return 0x1U << 21U; 318} 319static inline u32 ltc_ltc0_lts0_intr_r(void) 320{ 321 return 0x0014040cU; 322} 323static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) 324{ 325 return 0x0017e2a0U; 326} 327static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) 328{ 329 return (r >> 0U) & 0x1U; 330} 331static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) 332{ 333 return 0x00000001U; 334} 335static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) 336{ 337 return 0x1U; 338} 339static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) 340{ 341 return (r >> 8U) & 0xfU; 342} 343static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) 344{ 345 return 0x00000003U; 346} 347static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) 348{ 349 return 0x300U; 350} 351static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) 352{ 353 return (r >> 28U) & 0x1U; 354} 355static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) 356{ 357 return 0x00000001U; 358} 359static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) 360{ 361 return 0x10000000U; 362} 363static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) 364{ 365 return (r >> 29U) & 0x1U; 366} 367static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) 368{ 369 return 0x00000001U; 370} 371static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) 372{ 373 return 0x20000000U; 374} 375static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) 376{ 377 return (r >> 30U) & 0x1U; 378} 379static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) 384{ 385 return 0x40000000U; 386} 387static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) 388{ 389 return 0x0017e2a4U; 390} 391static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) 392{ 393 return (r >> 0U) & 0x1U; 394} 395static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) 396{ 397 return 0x00000001U; 398} 399static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) 400{ 401 return 0x1U; 402} 403static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) 404{ 405 return (r >> 8U) & 0xfU; 406} 407static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) 408{ 409 return 0x00000003U; 410} 411static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) 412{ 413 return 0x300U; 414} 415static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) 416{ 417 return (r >> 16U) & 0x1U; 418} 419static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) 420{ 421 return 0x00000001U; 422} 423static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) 424{ 425 return 0x10000U; 426} 427static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) 428{ 429 return (r >> 28U) & 0x1U; 430} 431static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) 432{ 433 return 0x00000001U; 434} 435static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) 436{ 437 return 0x10000000U; 438} 439static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) 440{ 441 return (r >> 29U) & 0x1U; 442} 443static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) 444{ 445 return 0x00000001U; 446} 447static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) 448{ 449 return 0x20000000U; 450} 451static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) 452{ 453 return (r >> 30U) & 0x1U; 454} 455static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) 456{ 457 return 0x00000001U; 458} 459static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) 460{ 461 return 0x40000000U; 462} 463static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) 464{ 465 return 0x001402a0U; 466} 467static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) 468{ 469 return (r >> 0U) & 0x1U; 470} 471static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) 472{ 473 return 0x00000001U; 474} 475static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) 476{ 477 return 0x1U; 478} 479static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) 480{ 481 return 0x001402a4U; 482} 483static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) 484{ 485 return (r >> 0U) & 0x1U; 486} 487static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) 488{ 489 return 0x00000001U; 490} 491static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) 492{ 493 return 0x1U; 494} 495static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) 496{ 497 return 0x001422a0U; 498} 499static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) 500{ 501 return (r >> 0U) & 0x1U; 502} 503static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) 504{ 505 return 0x00000001U; 506} 507static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) 508{ 509 return 0x1U; 510} 511static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) 512{ 513 return 0x001422a4U; 514} 515static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) 516{ 517 return (r >> 0U) & 0x1U; 518} 519static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) 520{ 521 return 0x00000001U; 522} 523static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) 524{ 525 return 0x1U; 526} 527#endif
diff --git a/include/nvgpu/hw/gm20b/hw_mc_gm20b.h b/include/nvgpu/hw/gm20b/hw_mc_gm20b.h
deleted file mode 100644
index 0264803..0000000
--- a/include/nvgpu/hw/gm20b/hw_mc_gm20b.h
+++ /dev/null
@@ -1,287 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_mc_gm20b_h_ 57#define _hw_mc_gm20b_h_ 58 59static inline u32 mc_boot_0_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 mc_boot_0_architecture_v(u32 r) 64{ 65 return (r >> 24U) & 0x1fU; 66} 67static inline u32 mc_boot_0_implementation_v(u32 r) 68{ 69 return (r >> 20U) & 0xfU; 70} 71static inline u32 mc_boot_0_major_revision_v(u32 r) 72{ 73 return (r >> 4U) & 0xfU; 74} 75static inline u32 mc_boot_0_minor_revision_v(u32 r) 76{ 77 return (r >> 0U) & 0xfU; 78} 79static inline u32 mc_intr_r(u32 i) 80{ 81 return 0x00000100U + i*4U; 82} 83static inline u32 mc_intr_pfifo_pending_f(void) 84{ 85 return 0x100U; 86} 87static inline u32 mc_intr_pmu_pending_f(void) 88{ 89 return 0x1000000U; 90} 91static inline u32 mc_intr_ltc_pending_f(void) 92{ 93 return 0x2000000U; 94} 95static inline u32 mc_intr_priv_ring_pending_f(void) 96{ 97 return 0x40000000U; 98} 99static inline u32 mc_intr_pbus_pending_f(void) 100{ 101 return 0x10000000U; 102} 103static inline u32 mc_intr_mask_0_r(void) 104{ 105 return 0x00000640U; 106} 107static inline u32 mc_intr_mask_0_pmu_enabled_f(void) 108{ 109 return 0x1000000U; 110} 111static inline u32 mc_intr_en_0_r(void) 112{ 113 return 0x00000140U; 114} 115static inline u32 mc_intr_en_0_inta_disabled_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 mc_intr_en_0_inta_hardware_f(void) 120{ 121 return 0x1U; 122} 123static inline u32 mc_intr_mask_1_r(void) 124{ 125 return 0x00000644U; 126} 127static inline u32 mc_intr_mask_1_pmu_s(void) 128{ 129 return 1U; 130} 131static inline u32 mc_intr_mask_1_pmu_f(u32 v) 132{ 133 return (v & 0x1U) << 24U; 134} 135static inline u32 mc_intr_mask_1_pmu_m(void) 136{ 137 return 0x1U << 24U; 138} 139static inline u32 mc_intr_mask_1_pmu_v(u32 r) 140{ 141 return (r >> 24U) & 0x1U; 142} 143static inline u32 mc_intr_mask_1_pmu_enabled_f(void) 144{ 145 return 0x1000000U; 146} 147static inline u32 mc_intr_en_1_r(void) 148{ 149 return 0x00000144U; 150} 151static inline u32 mc_intr_en_1_inta_disabled_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 mc_intr_en_1_inta_hardware_f(void) 156{ 157 return 0x1U; 158} 159static inline u32 mc_enable_r(void) 160{ 161 return 0x00000200U; 162} 163static inline u32 mc_enable_xbar_enabled_f(void) 164{ 165 return 0x4U; 166} 167static inline u32 mc_enable_l2_enabled_f(void) 168{ 169 return 0x8U; 170} 171static inline u32 mc_enable_pmedia_s(void) 172{ 173 return 1U; 174} 175static inline u32 mc_enable_pmedia_f(u32 v) 176{ 177 return (v & 0x1U) << 4U; 178} 179static inline u32 mc_enable_pmedia_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 mc_enable_pmedia_v(u32 r) 184{ 185 return (r >> 4U) & 0x1U; 186} 187static inline u32 mc_enable_priv_ring_enabled_f(void) 188{ 189 return 0x20U; 190} 191static inline u32 mc_enable_ce0_m(void) 192{ 193 return 0x1U << 6U; 194} 195static inline u32 mc_enable_pfifo_enabled_f(void) 196{ 197 return 0x100U; 198} 199static inline u32 mc_enable_pgraph_enabled_f(void) 200{ 201 return 0x1000U; 202} 203static inline u32 mc_enable_pwr_v(u32 r) 204{ 205 return (r >> 13U) & 0x1U; 206} 207static inline u32 mc_enable_pwr_disabled_v(void) 208{ 209 return 0x00000000U; 210} 211static inline u32 mc_enable_pwr_enabled_f(void) 212{ 213 return 0x2000U; 214} 215static inline u32 mc_enable_pfb_enabled_f(void) 216{ 217 return 0x100000U; 218} 219static inline u32 mc_enable_ce2_m(void) 220{ 221 return 0x1U << 21U; 222} 223static inline u32 mc_enable_ce2_enabled_f(void) 224{ 225 return 0x200000U; 226} 227static inline u32 mc_enable_blg_enabled_f(void) 228{ 229 return 0x8000000U; 230} 231static inline u32 mc_enable_perfmon_enabled_f(void) 232{ 233 return 0x10000000U; 234} 235static inline u32 mc_enable_hub_enabled_f(void) 236{ 237 return 0x20000000U; 238} 239static inline u32 mc_intr_ltc_r(void) 240{ 241 return 0x0000017cU; 242} 243static inline u32 mc_enable_pb_r(void) 244{ 245 return 0x00000204U; 246} 247static inline u32 mc_enable_pb_0_s(void) 248{ 249 return 1U; 250} 251static inline u32 mc_enable_pb_0_f(u32 v) 252{ 253 return (v & 0x1U) << 0U; 254} 255static inline u32 mc_enable_pb_0_m(void) 256{ 257 return 0x1U << 0U; 258} 259static inline u32 mc_enable_pb_0_v(u32 r) 260{ 261 return (r >> 0U) & 0x1U; 262} 263static inline u32 mc_enable_pb_0_enabled_v(void) 264{ 265 return 0x00000001U; 266} 267static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) 268{ 269 return (v & 0x1U) << (0U + i*1U); 270} 271static inline u32 mc_elpg_enable_r(void) 272{ 273 return 0x0000020cU; 274} 275static inline u32 mc_elpg_enable_xbar_enabled_f(void) 276{ 277 return 0x4U; 278} 279static inline u32 mc_elpg_enable_pfb_enabled_f(void) 280{ 281 return 0x100000U; 282} 283static inline u32 mc_elpg_enable_hub_enabled_f(void) 284{ 285 return 0x20000000U; 286} 287#endif
diff --git a/include/nvgpu/hw/gm20b/hw_pbdma_gm20b.h b/include/nvgpu/hw/gm20b/hw_pbdma_gm20b.h
deleted file mode 100644
index 10ed9ec..0000000
--- a/include/nvgpu/hw/gm20b/hw_pbdma_gm20b.h
+++ /dev/null
@@ -1,579 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pbdma_gm20b_h_ 57#define _hw_pbdma_gm20b_h_ 58 59static inline u32 pbdma_gp_entry1_r(void) 60{ 61 return 0x10000004U; 62} 63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) 64{ 65 return (r >> 0U) & 0xffU; 66} 67static inline u32 pbdma_gp_entry1_length_f(u32 v) 68{ 69 return (v & 0x1fffffU) << 10U; 70} 71static inline u32 pbdma_gp_entry1_length_v(u32 r) 72{ 73 return (r >> 10U) & 0x1fffffU; 74} 75static inline u32 pbdma_gp_base_r(u32 i) 76{ 77 return 0x00040048U + i*8192U; 78} 79static inline u32 pbdma_gp_base__size_1_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 pbdma_gp_base_offset_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 3U; 86} 87static inline u32 pbdma_gp_base_rsvd_s(void) 88{ 89 return 3U; 90} 91static inline u32 pbdma_gp_base_hi_r(u32 i) 92{ 93 return 0x0004004cU + i*8192U; 94} 95static inline u32 pbdma_gp_base_hi_offset_f(u32 v) 96{ 97 return (v & 0xffU) << 0U; 98} 99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) 100{ 101 return (v & 0x1fU) << 16U; 102} 103static inline u32 pbdma_gp_fetch_r(u32 i) 104{ 105 return 0x00040050U + i*8192U; 106} 107static inline u32 pbdma_gp_get_r(u32 i) 108{ 109 return 0x00040014U + i*8192U; 110} 111static inline u32 pbdma_gp_put_r(u32 i) 112{ 113 return 0x00040000U + i*8192U; 114} 115static inline u32 pbdma_pb_fetch_r(u32 i) 116{ 117 return 0x00040054U + i*8192U; 118} 119static inline u32 pbdma_pb_fetch_hi_r(u32 i) 120{ 121 return 0x00040058U + i*8192U; 122} 123static inline u32 pbdma_get_r(u32 i) 124{ 125 return 0x00040018U + i*8192U; 126} 127static inline u32 pbdma_get_hi_r(u32 i) 128{ 129 return 0x0004001cU + i*8192U; 130} 131static inline u32 pbdma_put_r(u32 i) 132{ 133 return 0x0004005cU + i*8192U; 134} 135static inline u32 pbdma_put_hi_r(u32 i) 136{ 137 return 0x00040060U + i*8192U; 138} 139static inline u32 pbdma_formats_r(u32 i) 140{ 141 return 0x0004009cU + i*8192U; 142} 143static inline u32 pbdma_formats_gp_fermi0_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 pbdma_formats_pb_fermi1_f(void) 148{ 149 return 0x100U; 150} 151static inline u32 pbdma_formats_mp_fermi0_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 pbdma_pb_header_r(u32 i) 156{ 157 return 0x00040084U + i*8192U; 158} 159static inline u32 pbdma_pb_header_priv_user_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 pbdma_pb_header_method_zero_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 pbdma_pb_header_subchannel_zero_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 pbdma_pb_header_level_main_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 pbdma_pb_header_first_true_f(void) 176{ 177 return 0x400000U; 178} 179static inline u32 pbdma_pb_header_type_inc_f(void) 180{ 181 return 0x20000000U; 182} 183static inline u32 pbdma_pb_header_type_non_inc_f(void) 184{ 185 return 0x60000000U; 186} 187static inline u32 pbdma_hdr_shadow_r(u32 i) 188{ 189 return 0x00040118U + i*8192U; 190} 191static inline u32 pbdma_gp_shadow_0_r(u32 i) 192{ 193 return 0x00040110U + i*8192U; 194} 195static inline u32 pbdma_gp_shadow_1_r(u32 i) 196{ 197 return 0x00040114U + i*8192U; 198} 199static inline u32 pbdma_subdevice_r(u32 i) 200{ 201 return 0x00040094U + i*8192U; 202} 203static inline u32 pbdma_subdevice_id_f(u32 v) 204{ 205 return (v & 0xfffU) << 0U; 206} 207static inline u32 pbdma_subdevice_status_active_f(void) 208{ 209 return 0x10000000U; 210} 211static inline u32 pbdma_subdevice_channel_dma_enable_f(void) 212{ 213 return 0x20000000U; 214} 215static inline u32 pbdma_method0_r(u32 i) 216{ 217 return 0x000400c0U + i*8192U; 218} 219static inline u32 pbdma_method0_fifo_size_v(void) 220{ 221 return 0x00000004U; 222} 223static inline u32 pbdma_method0_addr_f(u32 v) 224{ 225 return (v & 0xfffU) << 2U; 226} 227static inline u32 pbdma_method0_addr_v(u32 r) 228{ 229 return (r >> 2U) & 0xfffU; 230} 231static inline u32 pbdma_method0_subch_v(u32 r) 232{ 233 return (r >> 16U) & 0x7U; 234} 235static inline u32 pbdma_method0_first_true_f(void) 236{ 237 return 0x400000U; 238} 239static inline u32 pbdma_method0_valid_true_f(void) 240{ 241 return 0x80000000U; 242} 243static inline u32 pbdma_method1_r(u32 i) 244{ 245 return 0x000400c8U + i*8192U; 246} 247static inline u32 pbdma_method2_r(u32 i) 248{ 249 return 0x000400d0U + i*8192U; 250} 251static inline u32 pbdma_method3_r(u32 i) 252{ 253 return 0x000400d8U + i*8192U; 254} 255static inline u32 pbdma_data0_r(u32 i) 256{ 257 return 0x000400c4U + i*8192U; 258} 259static inline u32 pbdma_target_r(u32 i) 260{ 261 return 0x000400acU + i*8192U; 262} 263static inline u32 pbdma_target_engine_sw_f(void) 264{ 265 return 0x1fU; 266} 267static inline u32 pbdma_acquire_r(u32 i) 268{ 269 return 0x00040030U + i*8192U; 270} 271static inline u32 pbdma_acquire_retry_man_2_f(void) 272{ 273 return 0x2U; 274} 275static inline u32 pbdma_acquire_retry_exp_2_f(void) 276{ 277 return 0x100U; 278} 279static inline u32 pbdma_acquire_timeout_exp_f(u32 v) 280{ 281 return (v & 0xfU) << 11U; 282} 283static inline u32 pbdma_acquire_timeout_exp_max_v(void) 284{ 285 return 0x0000000fU; 286} 287static inline u32 pbdma_acquire_timeout_exp_max_f(void) 288{ 289 return 0x7800U; 290} 291static inline u32 pbdma_acquire_timeout_man_f(u32 v) 292{ 293 return (v & 0xffffU) << 15U; 294} 295static inline u32 pbdma_acquire_timeout_man_max_v(void) 296{ 297 return 0x0000ffffU; 298} 299static inline u32 pbdma_acquire_timeout_man_max_f(void) 300{ 301 return 0x7fff8000U; 302} 303static inline u32 pbdma_acquire_timeout_en_enable_f(void) 304{ 305 return 0x80000000U; 306} 307static inline u32 pbdma_acquire_timeout_en_disable_f(void) 308{ 309 return 0x0U; 310} 311static inline u32 pbdma_status_r(u32 i) 312{ 313 return 0x00040100U + i*8192U; 314} 315static inline u32 pbdma_channel_r(u32 i) 316{ 317 return 0x00040120U + i*8192U; 318} 319static inline u32 pbdma_signature_r(u32 i) 320{ 321 return 0x00040010U + i*8192U; 322} 323static inline u32 pbdma_signature_hw_valid_f(void) 324{ 325 return 0xfaceU; 326} 327static inline u32 pbdma_signature_sw_zero_f(void) 328{ 329 return 0x0U; 330} 331static inline u32 pbdma_userd_r(u32 i) 332{ 333 return 0x00040008U + i*8192U; 334} 335static inline u32 pbdma_userd_target_vid_mem_f(void) 336{ 337 return 0x0U; 338} 339static inline u32 pbdma_userd_target_sys_mem_coh_f(void) 340{ 341 return 0x2U; 342} 343static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) 344{ 345 return 0x3U; 346} 347static inline u32 pbdma_userd_addr_f(u32 v) 348{ 349 return (v & 0x7fffffU) << 9U; 350} 351static inline u32 pbdma_userd_hi_r(u32 i) 352{ 353 return 0x0004000cU + i*8192U; 354} 355static inline u32 pbdma_userd_hi_addr_f(u32 v) 356{ 357 return (v & 0xffU) << 0U; 358} 359static inline u32 pbdma_hce_ctrl_r(u32 i) 360{ 361 return 0x000400e4U + i*8192U; 362} 363static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) 364{ 365 return 0x20U; 366} 367static inline u32 pbdma_intr_0_r(u32 i) 368{ 369 return 0x00040108U + i*8192U; 370} 371static inline u32 pbdma_intr_0_memreq_v(u32 r) 372{ 373 return (r >> 0U) & 0x1U; 374} 375static inline u32 pbdma_intr_0_memreq_pending_f(void) 376{ 377 return 0x1U; 378} 379static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) 380{ 381 return 0x2U; 382} 383static inline u32 pbdma_intr_0_memack_extra_pending_f(void) 384{ 385 return 0x4U; 386} 387static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) 388{ 389 return 0x8U; 390} 391static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) 392{ 393 return 0x10U; 394} 395static inline u32 pbdma_intr_0_memflush_pending_f(void) 396{ 397 return 0x20U; 398} 399static inline u32 pbdma_intr_0_memop_pending_f(void) 400{ 401 return 0x40U; 402} 403static inline u32 pbdma_intr_0_lbconnect_pending_f(void) 404{ 405 return 0x80U; 406} 407static inline u32 pbdma_intr_0_lbreq_pending_f(void) 408{ 409 return 0x100U; 410} 411static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) 412{ 413 return 0x200U; 414} 415static inline u32 pbdma_intr_0_lback_extra_pending_f(void) 416{ 417 return 0x400U; 418} 419static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) 420{ 421 return 0x800U; 422} 423static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) 424{ 425 return 0x1000U; 426} 427static inline u32 pbdma_intr_0_gpfifo_pending_f(void) 428{ 429 return 0x2000U; 430} 431static inline u32 pbdma_intr_0_gpptr_pending_f(void) 432{ 433 return 0x4000U; 434} 435static inline u32 pbdma_intr_0_gpentry_pending_f(void) 436{ 437 return 0x8000U; 438} 439static inline u32 pbdma_intr_0_gpcrc_pending_f(void) 440{ 441 return 0x10000U; 442} 443static inline u32 pbdma_intr_0_pbptr_pending_f(void) 444{ 445 return 0x20000U; 446} 447static inline u32 pbdma_intr_0_pbentry_pending_f(void) 448{ 449 return 0x40000U; 450} 451static inline u32 pbdma_intr_0_pbcrc_pending_f(void) 452{ 453 return 0x80000U; 454} 455static inline u32 pbdma_intr_0_xbarconnect_pending_f(void) 456{ 457 return 0x100000U; 458} 459static inline u32 pbdma_intr_0_method_pending_f(void) 460{ 461 return 0x200000U; 462} 463static inline u32 pbdma_intr_0_methodcrc_pending_f(void) 464{ 465 return 0x400000U; 466} 467static inline u32 pbdma_intr_0_device_pending_f(void) 468{ 469 return 0x800000U; 470} 471static inline u32 pbdma_intr_0_semaphore_pending_f(void) 472{ 473 return 0x2000000U; 474} 475static inline u32 pbdma_intr_0_acquire_pending_f(void) 476{ 477 return 0x4000000U; 478} 479static inline u32 pbdma_intr_0_pri_pending_f(void) 480{ 481 return 0x8000000U; 482} 483static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) 484{ 485 return 0x20000000U; 486} 487static inline u32 pbdma_intr_0_pbseg_pending_f(void) 488{ 489 return 0x40000000U; 490} 491static inline u32 pbdma_intr_0_signature_pending_f(void) 492{ 493 return 0x80000000U; 494} 495static inline u32 pbdma_intr_1_r(u32 i) 496{ 497 return 0x00040148U + i*8192U; 498} 499static inline u32 pbdma_intr_en_0_r(u32 i) 500{ 501 return 0x0004010cU + i*8192U; 502} 503static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) 504{ 505 return 0x100U; 506} 507static inline u32 pbdma_intr_en_1_r(u32 i) 508{ 509 return 0x0004014cU + i*8192U; 510} 511static inline u32 pbdma_intr_stall_r(u32 i) 512{ 513 return 0x0004013cU + i*8192U; 514} 515static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) 516{ 517 return 0x100U; 518} 519static inline u32 pbdma_intr_stall_1_r(u32 i) 520{ 521 return 0x00040140U + i*8192U; 522} 523static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void) 524{ 525 return 0x1U; 526} 527static inline u32 pbdma_udma_nop_r(void) 528{ 529 return 0x00000008U; 530} 531static inline u32 pbdma_syncpointa_r(u32 i) 532{ 533 return 0x000400a4U + i*8192U; 534} 535static inline u32 pbdma_syncpointa_payload_v(u32 r) 536{ 537 return (r >> 0U) & 0xffffffffU; 538} 539static inline u32 pbdma_syncpointb_r(u32 i) 540{ 541 return 0x000400a8U + i*8192U; 542} 543static inline u32 pbdma_syncpointb_op_v(u32 r) 544{ 545 return (r >> 0U) & 0x3U; 546} 547static inline u32 pbdma_syncpointb_op_wait_v(void) 548{ 549 return 0x00000000U; 550} 551static inline u32 pbdma_syncpointb_wait_switch_v(u32 r) 552{ 553 return (r >> 4U) & 0x1U; 554} 555static inline u32 pbdma_syncpointb_wait_switch_en_v(void) 556{ 557 return 0x00000001U; 558} 559static inline u32 pbdma_syncpointb_syncpt_index_v(u32 r) 560{ 561 return (r >> 8U) & 0xffU; 562} 563static inline u32 pbdma_runlist_timeslice_r(u32 i) 564{ 565 return 0x000400f8U + i*8192U; 566} 567static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) 568{ 569 return 0x80U; 570} 571static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) 572{ 573 return 0x3000U; 574} 575static inline u32 pbdma_runlist_timeslice_enable_true_f(void) 576{ 577 return 0x10000000U; 578} 579#endif
diff --git a/include/nvgpu/hw/gm20b/hw_perf_gm20b.h b/include/nvgpu/hw/gm20b/hw_perf_gm20b.h
deleted file mode 100644
index a94ba30..0000000
--- a/include/nvgpu/hw/gm20b/hw_perf_gm20b.h
+++ /dev/null
@@ -1,219 +0,0 @@ 1/* 2 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_perf_gm20b_h_ 57#define _hw_perf_gm20b_h_ 58 59static inline u32 perf_pmmsys_base_v(void) 60{ 61 return 0x001b0000U; 62} 63static inline u32 perf_pmmsys_extent_v(void) 64{ 65 return 0x001b0fffU; 66} 67static inline u32 perf_pmasys_control_r(void) 68{ 69 return 0x001b4000U; 70} 71static inline u32 perf_pmasys_control_membuf_status_v(u32 r) 72{ 73 return (r >> 4U) & 0x1U; 74} 75static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) 76{ 77 return 0x00000001U; 78} 79static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) 84{ 85 return (v & 0x1U) << 5U; 86} 87static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) 88{ 89 return (r >> 5U) & 0x1U; 90} 91static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) 92{ 93 return 0x00000001U; 94} 95static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) 96{ 97 return 0x20U; 98} 99static inline u32 perf_pmasys_mem_block_r(void) 100{ 101 return 0x001b4070U; 102} 103static inline u32 perf_pmasys_mem_block_base_f(u32 v) 104{ 105 return (v & 0xfffffffU) << 0U; 106} 107static inline u32 perf_pmasys_mem_block_target_f(u32 v) 108{ 109 return (v & 0x3U) << 28U; 110} 111static inline u32 perf_pmasys_mem_block_target_v(u32 r) 112{ 113 return (r >> 28U) & 0x3U; 114} 115static inline u32 perf_pmasys_mem_block_target_lfb_v(void) 116{ 117 return 0x00000000U; 118} 119static inline u32 perf_pmasys_mem_block_target_lfb_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) 128{ 129 return 0x20000000U; 130} 131static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) 132{ 133 return 0x00000003U; 134} 135static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) 136{ 137 return 0x30000000U; 138} 139static inline u32 perf_pmasys_mem_block_valid_f(u32 v) 140{ 141 return (v & 0x1U) << 31U; 142} 143static inline u32 perf_pmasys_mem_block_valid_v(u32 r) 144{ 145 return (r >> 31U) & 0x1U; 146} 147static inline u32 perf_pmasys_mem_block_valid_true_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 perf_pmasys_mem_block_valid_true_f(void) 152{ 153 return 0x80000000U; 154} 155static inline u32 perf_pmasys_mem_block_valid_false_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 perf_pmasys_mem_block_valid_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 perf_pmasys_outbase_r(void) 164{ 165 return 0x001b4074U; 166} 167static inline u32 perf_pmasys_outbase_ptr_f(u32 v) 168{ 169 return (v & 0x7ffffffU) << 5U; 170} 171static inline u32 perf_pmasys_outbaseupper_r(void) 172{ 173 return 0x001b4078U; 174} 175static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) 176{ 177 return (v & 0xffU) << 0U; 178} 179static inline u32 perf_pmasys_outsize_r(void) 180{ 181 return 0x001b407cU; 182} 183static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) 184{ 185 return (v & 0x7ffffffU) << 5U; 186} 187static inline u32 perf_pmasys_mem_bytes_r(void) 188{ 189 return 0x001b4084U; 190} 191static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) 192{ 193 return (v & 0xfffffffU) << 4U; 194} 195static inline u32 perf_pmasys_mem_bump_r(void) 196{ 197 return 0x001b4088U; 198} 199static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) 200{ 201 return (v & 0xfffffffU) << 4U; 202} 203static inline u32 perf_pmasys_enginestatus_r(void) 204{ 205 return 0x001b40a4U; 206} 207static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) 208{ 209 return (v & 0x1U) << 4U; 210} 211static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) 216{ 217 return 0x10U; 218} 219#endif
diff --git a/include/nvgpu/hw/gm20b/hw_pram_gm20b.h b/include/nvgpu/hw/gm20b/hw_pram_gm20b.h
deleted file mode 100644
index 47a6bfa..0000000
--- a/include/nvgpu/hw/gm20b/hw_pram_gm20b.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pram_gm20b_h_ 57#define _hw_pram_gm20b_h_ 58 59static inline u32 pram_data032_r(u32 i) 60{ 61 return 0x00700000U + i*4U; 62} 63#endif
diff --git a/include/nvgpu/hw/gm20b/hw_pri_ringmaster_gm20b.h b/include/nvgpu/hw/gm20b/hw_pri_ringmaster_gm20b.h
deleted file mode 100644
index c6f08ed..0000000
--- a/include/nvgpu/hw/gm20b/hw_pri_ringmaster_gm20b.h
+++ /dev/null
@@ -1,167 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringmaster_gm20b_h_ 57#define _hw_pri_ringmaster_gm20b_h_ 58 59static inline u32 pri_ringmaster_command_r(void) 60{ 61 return 0x0012004cU; 62} 63static inline u32 pri_ringmaster_command_cmd_m(void) 64{ 65 return 0x3fU << 0U; 66} 67static inline u32 pri_ringmaster_command_cmd_v(u32 r) 68{ 69 return (r >> 0U) & 0x3fU; 70} 71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 pri_ringmaster_command_data_r(void) 92{ 93 return 0x00120048U; 94} 95static inline u32 pri_ringmaster_start_results_r(void) 96{ 97 return 0x00120050U; 98} 99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 pri_ringmaster_intr_status0_r(void) 108{ 109 return 0x00120058U; 110} 111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) 112{ 113 return (r >> 0U) & 0x1U; 114} 115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) 116{ 117 return (r >> 1U) & 0x1U; 118} 119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) 120{ 121 return (r >> 2U) & 0x1U; 122} 123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) 124{ 125 return (r >> 8U) & 0x1U; 126} 127static inline u32 pri_ringmaster_intr_status1_r(void) 128{ 129 return 0x0012005cU; 130} 131static inline u32 pri_ringmaster_global_ctl_r(void) 132{ 133 return 0x00120060U; 134} 135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) 136{ 137 return 0x1U; 138} 139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 pri_ringmaster_enum_fbp_r(void) 144{ 145 return 0x00120074U; 146} 147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) 148{ 149 return (r >> 0U) & 0x1fU; 150} 151static inline u32 pri_ringmaster_enum_gpc_r(void) 152{ 153 return 0x00120078U; 154} 155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) 156{ 157 return (r >> 0U) & 0x1fU; 158} 159static inline u32 pri_ringmaster_enum_ltc_r(void) 160{ 161 return 0x0012006cU; 162} 163static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) 164{ 165 return (r >> 0U) & 0x1fU; 166} 167#endif
diff --git a/include/nvgpu/hw/gm20b/hw_pri_ringstation_gpc_gm20b.h b/include/nvgpu/hw/gm20b/hw_pri_ringstation_gpc_gm20b.h
deleted file mode 100644
index 8d1ffb2..0000000
--- a/include/nvgpu/hw/gm20b/hw_pri_ringstation_gpc_gm20b.h
+++ /dev/null
@@ -1,79 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_gpc_gm20b_h_ 57#define _hw_pri_ringstation_gpc_gm20b_h_ 58 59static inline u32 pri_ringstation_gpc_master_config_r(u32 i) 60{ 61 return 0x00128300U + i*4U; 62} 63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) 64{ 65 return 0x00128120U; 66} 67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) 68{ 69 return 0x00128124U; 70} 71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) 72{ 73 return 0x00128128U; 74} 75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) 76{ 77 return 0x0012812cU; 78} 79#endif
diff --git a/include/nvgpu/hw/gm20b/hw_pri_ringstation_sys_gm20b.h b/include/nvgpu/hw/gm20b/hw_pri_ringstation_sys_gm20b.h
deleted file mode 100644
index ac1d245..0000000
--- a/include/nvgpu/hw/gm20b/hw_pri_ringstation_sys_gm20b.h
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_sys_gm20b_h_ 57#define _hw_pri_ringstation_sys_gm20b_h_ 58 59static inline u32 pri_ringstation_sys_master_config_r(u32 i) 60{ 61 return 0x00122300U + i*4U; 62} 63static inline u32 pri_ringstation_sys_decode_config_r(void) 64{ 65 return 0x00122204U; 66} 67static inline u32 pri_ringstation_sys_decode_config_ring_m(void) 68{ 69 return 0x7U << 0U; 70} 71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 pri_ringstation_sys_priv_error_adr_r(void) 76{ 77 return 0x00122120U; 78} 79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) 80{ 81 return 0x00122124U; 82} 83static inline u32 pri_ringstation_sys_priv_error_info_r(void) 84{ 85 return 0x00122128U; 86} 87static inline u32 pri_ringstation_sys_priv_error_code_r(void) 88{ 89 return 0x0012212cU; 90} 91#endif
diff --git a/include/nvgpu/hw/gm20b/hw_proj_gm20b.h b/include/nvgpu/hw/gm20b/hw_proj_gm20b.h
deleted file mode 100644
index 8129ea6..0000000
--- a/include/nvgpu/hw/gm20b/hw_proj_gm20b.h
+++ /dev/null
@@ -1,171 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_proj_gm20b_h_ 57#define _hw_proj_gm20b_h_ 58 59static inline u32 proj_gpc_base_v(void) 60{ 61 return 0x00500000U; 62} 63static inline u32 proj_gpc_shared_base_v(void) 64{ 65 return 0x00418000U; 66} 67static inline u32 proj_gpc_stride_v(void) 68{ 69 return 0x00008000U; 70} 71static inline u32 proj_gpc_priv_stride_v(void) 72{ 73 return 0x00000800U; 74} 75static inline u32 proj_ltc_stride_v(void) 76{ 77 return 0x00002000U; 78} 79static inline u32 proj_lts_stride_v(void) 80{ 81 return 0x00000200U; 82} 83static inline u32 proj_fbpa_stride_v(void) 84{ 85 return 0x00001000U; 86} 87static inline u32 proj_ppc_in_gpc_base_v(void) 88{ 89 return 0x00003000U; 90} 91static inline u32 proj_ppc_in_gpc_shared_base_v(void) 92{ 93 return 0x00003e00U; 94} 95static inline u32 proj_ppc_in_gpc_stride_v(void) 96{ 97 return 0x00000200U; 98} 99static inline u32 proj_rop_base_v(void) 100{ 101 return 0x00410000U; 102} 103static inline u32 proj_rop_shared_base_v(void) 104{ 105 return 0x00408800U; 106} 107static inline u32 proj_rop_stride_v(void) 108{ 109 return 0x00000400U; 110} 111static inline u32 proj_tpc_in_gpc_base_v(void) 112{ 113 return 0x00004000U; 114} 115static inline u32 proj_tpc_in_gpc_stride_v(void) 116{ 117 return 0x00000800U; 118} 119static inline u32 proj_tpc_in_gpc_shared_base_v(void) 120{ 121 return 0x00001800U; 122} 123static inline u32 proj_host_num_engines_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 proj_host_num_pbdma_v(void) 128{ 129 return 0x00000001U; 130} 131static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) 132{ 133 return 0x00000002U; 134} 135static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 proj_scal_litter_num_fbps_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 proj_scal_litter_num_fbpas_v(void) 144{ 145 return 0x00000001U; 146} 147static inline u32 proj_scal_litter_num_gpcs_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) 156{ 157 return 0x00000002U; 158} 159static inline u32 proj_scal_litter_num_zcull_banks_v(void) 160{ 161 return 0x00000004U; 162} 163static inline u32 proj_scal_max_gpcs_v(void) 164{ 165 return 0x00000020U; 166} 167static inline u32 proj_scal_max_tpc_per_gpc_v(void) 168{ 169 return 0x00000008U; 170} 171#endif
diff --git a/include/nvgpu/hw/gm20b/hw_pwr_gm20b.h b/include/nvgpu/hw/gm20b/hw_pwr_gm20b.h
deleted file mode 100644
index a7c409d..0000000
--- a/include/nvgpu/hw/gm20b/hw_pwr_gm20b.h
+++ /dev/null
@@ -1,879 +0,0 @@ 1/* 2 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pwr_gm20b_h_ 57#define _hw_pwr_gm20b_h_ 58 59static inline u32 pwr_falcon_irqsset_r(void) 60{ 61 return 0x0010a000U; 62} 63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pwr_falcon_irqsclr_r(void) 68{ 69 return 0x0010a004U; 70} 71static inline u32 pwr_falcon_irqstat_r(void) 72{ 73 return 0x0010a008U; 74} 75static inline u32 pwr_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pwr_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pwr_falcon_irqmode_r(void) 88{ 89 return 0x0010a00cU; 90} 91static inline u32 pwr_falcon_irqmset_r(void) 92{ 93 return 0x0010a010U; 94} 95static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 pwr_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 pwr_falcon_irqmclr_r(void) 128{ 129 return 0x0010a014U; 130} 131static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 pwr_falcon_irqmask_r(void) 168{ 169 return 0x0010a018U; 170} 171static inline u32 pwr_falcon_irqdest_r(void) 172{ 173 return 0x0010a01cU; 174} 175static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 pwr_falcon_curctx_r(void) 248{ 249 return 0x0010a050U; 250} 251static inline u32 pwr_falcon_nxtctx_r(void) 252{ 253 return 0x0010a054U; 254} 255static inline u32 pwr_falcon_mailbox0_r(void) 256{ 257 return 0x0010a040U; 258} 259static inline u32 pwr_falcon_mailbox1_r(void) 260{ 261 return 0x0010a044U; 262} 263static inline u32 pwr_falcon_itfen_r(void) 264{ 265 return 0x0010a048U; 266} 267static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 pwr_falcon_idlestate_r(void) 272{ 273 return 0x0010a04cU; 274} 275static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 pwr_falcon_os_r(void) 284{ 285 return 0x0010a080U; 286} 287static inline u32 pwr_falcon_engctl_r(void) 288{ 289 return 0x0010a0a4U; 290} 291static inline u32 pwr_falcon_cpuctl_r(void) 292{ 293 return 0x0010a100U; 294} 295static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) 300{ 301 return (v & 0x1U) << 4U; 302} 303static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) 304{ 305 return 0x1U << 4U; 306} 307static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) 308{ 309 return (r >> 4U) & 0x1U; 310} 311static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 312{ 313 return (v & 0x1U) << 6U; 314} 315static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) 316{ 317 return 0x1U << 6U; 318} 319static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 320{ 321 return (r >> 6U) & 0x1U; 322} 323static inline u32 pwr_falcon_cpuctl_alias_r(void) 324{ 325 return 0x0010a130U; 326} 327static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) 328{ 329 return (v & 0x1U) << 1U; 330} 331static inline u32 pwr_pmu_scpctl_stat_r(void) 332{ 333 return 0x0010ac08U; 334} 335static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) 336{ 337 return (v & 0x1U) << 20U; 338} 339static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) 340{ 341 return 0x1U << 20U; 342} 343static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) 344{ 345 return (r >> 20U) & 0x1U; 346} 347static inline u32 pwr_falcon_imemc_r(u32 i) 348{ 349 return 0x0010a180U + i*16U; 350} 351static inline u32 pwr_falcon_imemc_offs_f(u32 v) 352{ 353 return (v & 0x3fU) << 2U; 354} 355static inline u32 pwr_falcon_imemc_blk_f(u32 v) 356{ 357 return (v & 0xffU) << 8U; 358} 359static inline u32 pwr_falcon_imemc_aincw_f(u32 v) 360{ 361 return (v & 0x1U) << 24U; 362} 363static inline u32 pwr_falcon_imemd_r(u32 i) 364{ 365 return 0x0010a184U + i*16U; 366} 367static inline u32 pwr_falcon_imemt_r(u32 i) 368{ 369 return 0x0010a188U + i*16U; 370} 371static inline u32 pwr_falcon_sctl_r(void) 372{ 373 return 0x0010a240U; 374} 375static inline u32 pwr_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 pwr_falcon_bootvec_r(void) 380{ 381 return 0x0010a104U; 382} 383static inline u32 pwr_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 pwr_falcon_dmactl_r(void) 388{ 389 return 0x0010a10cU; 390} 391static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 pwr_falcon_hwcfg_r(void) 400{ 401 return 0x0010a108U; 402} 403static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) 404{ 405 return (r >> 0U) & 0x1ffU; 406} 407static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) 408{ 409 return (r >> 9U) & 0x1ffU; 410} 411static inline u32 pwr_falcon_dmatrfbase_r(void) 412{ 413 return 0x0010a110U; 414} 415static inline u32 pwr_falcon_dmatrfmoffs_r(void) 416{ 417 return 0x0010a114U; 418} 419static inline u32 pwr_falcon_dmatrfcmd_r(void) 420{ 421 return 0x0010a118U; 422} 423static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) 424{ 425 return (v & 0x1U) << 4U; 426} 427static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) 428{ 429 return (v & 0x1U) << 5U; 430} 431static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) 432{ 433 return (v & 0x7U) << 8U; 434} 435static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) 436{ 437 return (v & 0x7U) << 12U; 438} 439static inline u32 pwr_falcon_dmatrffboffs_r(void) 440{ 441 return 0x0010a11cU; 442} 443static inline u32 pwr_falcon_exterraddr_r(void) 444{ 445 return 0x0010a168U; 446} 447static inline u32 pwr_falcon_exterrstat_r(void) 448{ 449 return 0x0010a16cU; 450} 451static inline u32 pwr_falcon_exterrstat_valid_m(void) 452{ 453 return 0x1U << 31U; 454} 455static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) 456{ 457 return (r >> 31U) & 0x1U; 458} 459static inline u32 pwr_falcon_exterrstat_valid_true_v(void) 460{ 461 return 0x00000001U; 462} 463static inline u32 pwr_pmu_falcon_icd_cmd_r(void) 464{ 465 return 0x0010a200U; 466} 467static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) 468{ 469 return 4U; 470} 471static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) 472{ 473 return (v & 0xfU) << 0U; 474} 475static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) 476{ 477 return 0xfU << 0U; 478} 479static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) 480{ 481 return (r >> 0U) & 0xfU; 482} 483static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) 484{ 485 return 0x8U; 486} 487static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) 488{ 489 return 0xeU; 490} 491static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) 492{ 493 return (v & 0x1fU) << 8U; 494} 495static inline u32 pwr_pmu_falcon_icd_rdata_r(void) 496{ 497 return 0x0010a20cU; 498} 499static inline u32 pwr_falcon_dmemc_r(u32 i) 500{ 501 return 0x0010a1c0U + i*8U; 502} 503static inline u32 pwr_falcon_dmemc_offs_f(u32 v) 504{ 505 return (v & 0x3fU) << 2U; 506} 507static inline u32 pwr_falcon_dmemc_offs_m(void) 508{ 509 return 0x3fU << 2U; 510} 511static inline u32 pwr_falcon_dmemc_blk_f(u32 v) 512{ 513 return (v & 0xffU) << 8U; 514} 515static inline u32 pwr_falcon_dmemc_blk_m(void) 516{ 517 return 0xffU << 8U; 518} 519static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) 520{ 521 return (v & 0x1U) << 24U; 522} 523static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) 524{ 525 return (v & 0x1U) << 25U; 526} 527static inline u32 pwr_falcon_dmemd_r(u32 i) 528{ 529 return 0x0010a1c4U + i*8U; 530} 531static inline u32 pwr_pmu_new_instblk_r(void) 532{ 533 return 0x0010a480U; 534} 535static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) 536{ 537 return (v & 0xfffffffU) << 0U; 538} 539static inline u32 pwr_pmu_new_instblk_target_fb_f(void) 540{ 541 return 0x0U; 542} 543static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) 544{ 545 return 0x20000000U; 546} 547static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) 548{ 549 return 0x30000000U; 550} 551static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) 552{ 553 return (v & 0x1U) << 30U; 554} 555static inline u32 pwr_pmu_mutex_id_r(void) 556{ 557 return 0x0010a488U; 558} 559static inline u32 pwr_pmu_mutex_id_value_v(u32 r) 560{ 561 return (r >> 0U) & 0xffU; 562} 563static inline u32 pwr_pmu_mutex_id_value_init_v(void) 564{ 565 return 0x00000000U; 566} 567static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) 568{ 569 return 0x000000ffU; 570} 571static inline u32 pwr_pmu_mutex_id_release_r(void) 572{ 573 return 0x0010a48cU; 574} 575static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) 576{ 577 return (v & 0xffU) << 0U; 578} 579static inline u32 pwr_pmu_mutex_id_release_value_m(void) 580{ 581 return 0xffU << 0U; 582} 583static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) 584{ 585 return 0x00000000U; 586} 587static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) 588{ 589 return 0x0U; 590} 591static inline u32 pwr_pmu_mutex_r(u32 i) 592{ 593 return 0x0010a580U + i*4U; 594} 595static inline u32 pwr_pmu_mutex__size_1_v(void) 596{ 597 return 0x00000010U; 598} 599static inline u32 pwr_pmu_mutex_value_f(u32 v) 600{ 601 return (v & 0xffU) << 0U; 602} 603static inline u32 pwr_pmu_mutex_value_v(u32 r) 604{ 605 return (r >> 0U) & 0xffU; 606} 607static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) 608{ 609 return 0x0U; 610} 611static inline u32 pwr_pmu_queue_head_r(u32 i) 612{ 613 return 0x0010a4a0U + i*4U; 614} 615static inline u32 pwr_pmu_queue_head__size_1_v(void) 616{ 617 return 0x00000004U; 618} 619static inline u32 pwr_pmu_queue_head_address_f(u32 v) 620{ 621 return (v & 0xffffffffU) << 0U; 622} 623static inline u32 pwr_pmu_queue_head_address_v(u32 r) 624{ 625 return (r >> 0U) & 0xffffffffU; 626} 627static inline u32 pwr_pmu_queue_tail_r(u32 i) 628{ 629 return 0x0010a4b0U + i*4U; 630} 631static inline u32 pwr_pmu_queue_tail__size_1_v(void) 632{ 633 return 0x00000004U; 634} 635static inline u32 pwr_pmu_queue_tail_address_f(u32 v) 636{ 637 return (v & 0xffffffffU) << 0U; 638} 639static inline u32 pwr_pmu_queue_tail_address_v(u32 r) 640{ 641 return (r >> 0U) & 0xffffffffU; 642} 643static inline u32 pwr_pmu_msgq_head_r(void) 644{ 645 return 0x0010a4c8U; 646} 647static inline u32 pwr_pmu_msgq_head_val_f(u32 v) 648{ 649 return (v & 0xffffffffU) << 0U; 650} 651static inline u32 pwr_pmu_msgq_head_val_v(u32 r) 652{ 653 return (r >> 0U) & 0xffffffffU; 654} 655static inline u32 pwr_pmu_msgq_tail_r(void) 656{ 657 return 0x0010a4ccU; 658} 659static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) 660{ 661 return (v & 0xffffffffU) << 0U; 662} 663static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) 664{ 665 return (r >> 0U) & 0xffffffffU; 666} 667static inline u32 pwr_pmu_idle_mask_r(u32 i) 668{ 669 return 0x0010a504U + i*16U; 670} 671static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) 672{ 673 return 0x1U; 674} 675static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) 676{ 677 return 0x200000U; 678} 679static inline u32 pwr_pmu_idle_mask_1_r(u32 i) 680{ 681 return 0x0010aa34U + i*8U; 682} 683static inline u32 pwr_pmu_idle_count_r(u32 i) 684{ 685 return 0x0010a508U + i*16U; 686} 687static inline u32 pwr_pmu_idle_count_value_f(u32 v) 688{ 689 return (v & 0x7fffffffU) << 0U; 690} 691static inline u32 pwr_pmu_idle_count_value_v(u32 r) 692{ 693 return (r >> 0U) & 0x7fffffffU; 694} 695static inline u32 pwr_pmu_idle_count_reset_f(u32 v) 696{ 697 return (v & 0x1U) << 31U; 698} 699static inline u32 pwr_pmu_idle_ctrl_r(u32 i) 700{ 701 return 0x0010a50cU + i*16U; 702} 703static inline u32 pwr_pmu_idle_ctrl_value_m(void) 704{ 705 return 0x3U << 0U; 706} 707static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) 708{ 709 return 0x2U; 710} 711static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) 712{ 713 return 0x3U; 714} 715static inline u32 pwr_pmu_idle_ctrl_filter_m(void) 716{ 717 return 0x1U << 2U; 718} 719static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) 720{ 721 return 0x0U; 722} 723static inline u32 pwr_pmu_idle_threshold_r(u32 i) 724{ 725 return 0x0010a8a0U + i*4U; 726} 727static inline u32 pwr_pmu_idle_threshold_value_f(u32 v) 728{ 729 return (v & 0x7fffffffU) << 0U; 730} 731static inline u32 pwr_pmu_idle_intr_r(void) 732{ 733 return 0x0010a9e8U; 734} 735static inline u32 pwr_pmu_idle_intr_en_f(u32 v) 736{ 737 return (v & 0x1U) << 0U; 738} 739static inline u32 pwr_pmu_idle_intr_en_disabled_v(void) 740{ 741 return 0x00000000U; 742} 743static inline u32 pwr_pmu_idle_intr_en_enabled_v(void) 744{ 745 return 0x00000001U; 746} 747static inline u32 pwr_pmu_idle_intr_status_r(void) 748{ 749 return 0x0010a9ecU; 750} 751static inline u32 pwr_pmu_idle_intr_status_intr_f(u32 v) 752{ 753 return (v & 0x1U) << 0U; 754} 755static inline u32 pwr_pmu_idle_intr_status_intr_m(void) 756{ 757 return 0x1U << 0U; 758} 759static inline u32 pwr_pmu_idle_intr_status_intr_v(u32 r) 760{ 761 return (r >> 0U) & 0x1U; 762} 763static inline u32 pwr_pmu_idle_intr_status_intr_pending_v(void) 764{ 765 return 0x00000001U; 766} 767static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void) 768{ 769 return 0x00000001U; 770} 771static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) 772{ 773 return 0x0010a9f0U + i*8U; 774} 775static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) 776{ 777 return 0x0010a9f4U + i*8U; 778} 779static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) 780{ 781 return 0x0010aa30U + i*8U; 782} 783static inline u32 pwr_pmu_debug_r(u32 i) 784{ 785 return 0x0010a5c0U + i*4U; 786} 787static inline u32 pwr_pmu_debug__size_1_v(void) 788{ 789 return 0x00000004U; 790} 791static inline u32 pwr_pmu_mailbox_r(u32 i) 792{ 793 return 0x0010a450U + i*4U; 794} 795static inline u32 pwr_pmu_mailbox__size_1_v(void) 796{ 797 return 0x0000000cU; 798} 799static inline u32 pwr_pmu_bar0_addr_r(void) 800{ 801 return 0x0010a7a0U; 802} 803static inline u32 pwr_pmu_bar0_data_r(void) 804{ 805 return 0x0010a7a4U; 806} 807static inline u32 pwr_pmu_bar0_ctl_r(void) 808{ 809 return 0x0010a7acU; 810} 811static inline u32 pwr_pmu_bar0_timeout_r(void) 812{ 813 return 0x0010a7a8U; 814} 815static inline u32 pwr_pmu_bar0_fecs_error_r(void) 816{ 817 return 0x0010a988U; 818} 819static inline u32 pwr_pmu_bar0_error_status_r(void) 820{ 821 return 0x0010a7b0U; 822} 823static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) 824{ 825 return 0x0010a6c0U + i*4U; 826} 827static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) 828{ 829 return 0x0010a6e8U + i*4U; 830} 831static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) 832{ 833 return 0x0010a710U + i*4U; 834} 835static inline u32 pwr_pmu_pg_intren_r(u32 i) 836{ 837 return 0x0010a760U + i*4U; 838} 839static inline u32 pwr_fbif_transcfg_r(u32 i) 840{ 841 return 0x0010ae00U + i*4U; 842} 843static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) 844{ 845 return 0x0U; 846} 847static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) 848{ 849 return 0x1U; 850} 851static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) 852{ 853 return 0x2U; 854} 855static inline u32 pwr_fbif_transcfg_mem_type_s(void) 856{ 857 return 1U; 858} 859static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) 860{ 861 return (v & 0x1U) << 2U; 862} 863static inline u32 pwr_fbif_transcfg_mem_type_m(void) 864{ 865 return 0x1U << 2U; 866} 867static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) 868{ 869 return (r >> 2U) & 0x1U; 870} 871static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) 872{ 873 return 0x0U; 874} 875static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) 876{ 877 return 0x4U; 878} 879#endif
diff --git a/include/nvgpu/hw/gm20b/hw_ram_gm20b.h b/include/nvgpu/hw/gm20b/hw_ram_gm20b.h
deleted file mode 100644
index 2414abf..0000000
--- a/include/nvgpu/hw/gm20b/hw_ram_gm20b.h
+++ /dev/null
@@ -1,459 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ram_gm20b_h_ 57#define _hw_ram_gm20b_h_ 58 59static inline u32 ram_in_ramfc_s(void) 60{ 61 return 4096U; 62} 63static inline u32 ram_in_ramfc_w(void) 64{ 65 return 0U; 66} 67static inline u32 ram_in_page_dir_base_target_f(u32 v) 68{ 69 return (v & 0x3U) << 0U; 70} 71static inline u32 ram_in_page_dir_base_target_w(void) 72{ 73 return 128U; 74} 75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 ram_in_page_dir_base_vol_w(void) 88{ 89 return 128U; 90} 91static inline u32 ram_in_page_dir_base_vol_true_f(void) 92{ 93 return 0x4U; 94} 95static inline u32 ram_in_big_page_size_f(u32 v) 96{ 97 return (v & 0x1U) << 11U; 98} 99static inline u32 ram_in_big_page_size_m(void) 100{ 101 return 0x1U << 11U; 102} 103static inline u32 ram_in_big_page_size_w(void) 104{ 105 return 128U; 106} 107static inline u32 ram_in_big_page_size_128kb_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 ram_in_big_page_size_64kb_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ram_in_page_dir_base_lo_f(u32 v) 116{ 117 return (v & 0xfffffU) << 12U; 118} 119static inline u32 ram_in_page_dir_base_lo_w(void) 120{ 121 return 128U; 122} 123static inline u32 ram_in_page_dir_base_hi_f(u32 v) 124{ 125 return (v & 0xffU) << 0U; 126} 127static inline u32 ram_in_page_dir_base_hi_w(void) 128{ 129 return 129U; 130} 131static inline u32 ram_in_adr_limit_lo_f(u32 v) 132{ 133 return (v & 0xfffffU) << 12U; 134} 135static inline u32 ram_in_adr_limit_lo_w(void) 136{ 137 return 130U; 138} 139static inline u32 ram_in_adr_limit_hi_f(u32 v) 140{ 141 return (v & 0xffU) << 0U; 142} 143static inline u32 ram_in_adr_limit_hi_w(void) 144{ 145 return 131U; 146} 147static inline u32 ram_in_engine_cs_w(void) 148{ 149 return 132U; 150} 151static inline u32 ram_in_engine_cs_wfi_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 ram_in_engine_cs_wfi_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 ram_in_engine_cs_fg_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 ram_in_engine_cs_fg_f(void) 164{ 165 return 0x8U; 166} 167static inline u32 ram_in_gr_cs_w(void) 168{ 169 return 132U; 170} 171static inline u32 ram_in_gr_cs_wfi_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 ram_in_gr_wfi_target_w(void) 176{ 177 return 132U; 178} 179static inline u32 ram_in_gr_wfi_mode_w(void) 180{ 181 return 132U; 182} 183static inline u32 ram_in_gr_wfi_mode_physical_v(void) 184{ 185 return 0x00000000U; 186} 187static inline u32 ram_in_gr_wfi_mode_physical_f(void) 188{ 189 return 0x0U; 190} 191static inline u32 ram_in_gr_wfi_mode_virtual_v(void) 192{ 193 return 0x00000001U; 194} 195static inline u32 ram_in_gr_wfi_mode_virtual_f(void) 196{ 197 return 0x4U; 198} 199static inline u32 ram_in_gr_wfi_ptr_lo_f(u32 v) 200{ 201 return (v & 0xfffffU) << 12U; 202} 203static inline u32 ram_in_gr_wfi_ptr_lo_w(void) 204{ 205 return 132U; 206} 207static inline u32 ram_in_gr_wfi_ptr_hi_f(u32 v) 208{ 209 return (v & 0xffU) << 0U; 210} 211static inline u32 ram_in_gr_wfi_ptr_hi_w(void) 212{ 213 return 133U; 214} 215static inline u32 ram_in_base_shift_v(void) 216{ 217 return 0x0000000cU; 218} 219static inline u32 ram_in_alloc_size_v(void) 220{ 221 return 0x00001000U; 222} 223static inline u32 ram_fc_size_val_v(void) 224{ 225 return 0x00000200U; 226} 227static inline u32 ram_fc_gp_put_w(void) 228{ 229 return 0U; 230} 231static inline u32 ram_fc_userd_w(void) 232{ 233 return 2U; 234} 235static inline u32 ram_fc_userd_hi_w(void) 236{ 237 return 3U; 238} 239static inline u32 ram_fc_signature_w(void) 240{ 241 return 4U; 242} 243static inline u32 ram_fc_gp_get_w(void) 244{ 245 return 5U; 246} 247static inline u32 ram_fc_pb_get_w(void) 248{ 249 return 6U; 250} 251static inline u32 ram_fc_pb_get_hi_w(void) 252{ 253 return 7U; 254} 255static inline u32 ram_fc_pb_top_level_get_w(void) 256{ 257 return 8U; 258} 259static inline u32 ram_fc_pb_top_level_get_hi_w(void) 260{ 261 return 9U; 262} 263static inline u32 ram_fc_acquire_w(void) 264{ 265 return 12U; 266} 267static inline u32 ram_fc_semaphorea_w(void) 268{ 269 return 14U; 270} 271static inline u32 ram_fc_semaphoreb_w(void) 272{ 273 return 15U; 274} 275static inline u32 ram_fc_semaphorec_w(void) 276{ 277 return 16U; 278} 279static inline u32 ram_fc_semaphored_w(void) 280{ 281 return 17U; 282} 283static inline u32 ram_fc_gp_base_w(void) 284{ 285 return 18U; 286} 287static inline u32 ram_fc_gp_base_hi_w(void) 288{ 289 return 19U; 290} 291static inline u32 ram_fc_gp_fetch_w(void) 292{ 293 return 20U; 294} 295static inline u32 ram_fc_pb_fetch_w(void) 296{ 297 return 21U; 298} 299static inline u32 ram_fc_pb_fetch_hi_w(void) 300{ 301 return 22U; 302} 303static inline u32 ram_fc_pb_put_w(void) 304{ 305 return 23U; 306} 307static inline u32 ram_fc_pb_put_hi_w(void) 308{ 309 return 24U; 310} 311static inline u32 ram_fc_pb_header_w(void) 312{ 313 return 33U; 314} 315static inline u32 ram_fc_pb_count_w(void) 316{ 317 return 34U; 318} 319static inline u32 ram_fc_subdevice_w(void) 320{ 321 return 37U; 322} 323static inline u32 ram_fc_formats_w(void) 324{ 325 return 39U; 326} 327static inline u32 ram_fc_syncpointa_w(void) 328{ 329 return 41U; 330} 331static inline u32 ram_fc_syncpointb_w(void) 332{ 333 return 42U; 334} 335static inline u32 ram_fc_target_w(void) 336{ 337 return 43U; 338} 339static inline u32 ram_fc_hce_ctrl_w(void) 340{ 341 return 57U; 342} 343static inline u32 ram_fc_chid_w(void) 344{ 345 return 58U; 346} 347static inline u32 ram_fc_chid_id_f(u32 v) 348{ 349 return (v & 0xfffU) << 0U; 350} 351static inline u32 ram_fc_chid_id_w(void) 352{ 353 return 0U; 354} 355static inline u32 ram_fc_runlist_timeslice_w(void) 356{ 357 return 62U; 358} 359static inline u32 ram_userd_base_shift_v(void) 360{ 361 return 0x00000009U; 362} 363static inline u32 ram_userd_chan_size_v(void) 364{ 365 return 0x00000200U; 366} 367static inline u32 ram_userd_put_w(void) 368{ 369 return 16U; 370} 371static inline u32 ram_userd_get_w(void) 372{ 373 return 17U; 374} 375static inline u32 ram_userd_ref_w(void) 376{ 377 return 18U; 378} 379static inline u32 ram_userd_put_hi_w(void) 380{ 381 return 19U; 382} 383static inline u32 ram_userd_ref_threshold_w(void) 384{ 385 return 20U; 386} 387static inline u32 ram_userd_top_level_get_w(void) 388{ 389 return 22U; 390} 391static inline u32 ram_userd_top_level_get_hi_w(void) 392{ 393 return 23U; 394} 395static inline u32 ram_userd_get_hi_w(void) 396{ 397 return 24U; 398} 399static inline u32 ram_userd_gp_get_w(void) 400{ 401 return 34U; 402} 403static inline u32 ram_userd_gp_put_w(void) 404{ 405 return 35U; 406} 407static inline u32 ram_userd_gp_top_level_get_w(void) 408{ 409 return 22U; 410} 411static inline u32 ram_userd_gp_top_level_get_hi_w(void) 412{ 413 return 23U; 414} 415static inline u32 ram_rl_entry_size_v(void) 416{ 417 return 0x00000008U; 418} 419static inline u32 ram_rl_entry_chid_f(u32 v) 420{ 421 return (v & 0xfffU) << 0U; 422} 423static inline u32 ram_rl_entry_id_f(u32 v) 424{ 425 return (v & 0xfffU) << 0U; 426} 427static inline u32 ram_rl_entry_type_f(u32 v) 428{ 429 return (v & 0x1U) << 13U; 430} 431static inline u32 ram_rl_entry_type_chid_f(void) 432{ 433 return 0x0U; 434} 435static inline u32 ram_rl_entry_type_tsg_f(void) 436{ 437 return 0x2000U; 438} 439static inline u32 ram_rl_entry_timeslice_scale_f(u32 v) 440{ 441 return (v & 0xfU) << 14U; 442} 443static inline u32 ram_rl_entry_timeslice_scale_3_f(void) 444{ 445 return 0xc000U; 446} 447static inline u32 ram_rl_entry_timeslice_timeout_f(u32 v) 448{ 449 return (v & 0xffU) << 18U; 450} 451static inline u32 ram_rl_entry_timeslice_timeout_128_f(void) 452{ 453 return 0x2000000U; 454} 455static inline u32 ram_rl_entry_tsg_length_f(u32 v) 456{ 457 return (v & 0x3fU) << 26U; 458} 459#endif
diff --git a/include/nvgpu/hw/gm20b/hw_therm_gm20b.h b/include/nvgpu/hw/gm20b/hw_therm_gm20b.h
deleted file mode 100644
index fc1cd51..0000000
--- a/include/nvgpu/hw/gm20b/hw_therm_gm20b.h
+++ /dev/null
@@ -1,355 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_therm_gm20b_h_ 57#define _hw_therm_gm20b_h_ 58 59static inline u32 therm_use_a_r(void) 60{ 61 return 0x00020798U; 62} 63static inline u32 therm_use_a_ext_therm_0_enable_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 therm_use_a_ext_therm_1_enable_f(void) 68{ 69 return 0x2U; 70} 71static inline u32 therm_use_a_ext_therm_2_enable_f(void) 72{ 73 return 0x4U; 74} 75static inline u32 therm_evt_ext_therm_0_r(void) 76{ 77 return 0x00020700U; 78} 79static inline u32 therm_evt_ext_therm_0_slow_factor_f(u32 v) 80{ 81 return (v & 0x3fU) << 8U; 82} 83static inline u32 therm_evt_ext_therm_0_slow_factor_init_v(void) 84{ 85 return 0x00000000U; 86} 87static inline u32 therm_evt_ext_therm_1_r(void) 88{ 89 return 0x00020704U; 90} 91static inline u32 therm_evt_ext_therm_1_slow_factor_f(u32 v) 92{ 93 return (v & 0x3fU) << 8U; 94} 95static inline u32 therm_evt_ext_therm_1_slow_factor_init_v(void) 96{ 97 return 0x00000000U; 98} 99static inline u32 therm_evt_ext_therm_2_r(void) 100{ 101 return 0x00020708U; 102} 103static inline u32 therm_evt_ext_therm_2_slow_factor_f(u32 v) 104{ 105 return (v & 0x3fU) << 8U; 106} 107static inline u32 therm_evt_ext_therm_2_slow_factor_init_v(void) 108{ 109 return 0x00000000U; 110} 111static inline u32 therm_weight_1_r(void) 112{ 113 return 0x00020024U; 114} 115static inline u32 therm_config1_r(void) 116{ 117 return 0x00020050U; 118} 119static inline u32 therm_config2_r(void) 120{ 121 return 0x00020130U; 122} 123static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) 124{ 125 return (v & 0x1U) << 24U; 126} 127static inline u32 therm_config2_grad_enable_f(u32 v) 128{ 129 return (v & 0x1U) << 31U; 130} 131static inline u32 therm_gate_ctrl_r(u32 i) 132{ 133 return 0x00020200U + i*4U; 134} 135static inline u32 therm_gate_ctrl_eng_clk_m(void) 136{ 137 return 0x3U << 0U; 138} 139static inline u32 therm_gate_ctrl_eng_clk_run_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) 148{ 149 return 0x2U; 150} 151static inline u32 therm_gate_ctrl_blk_clk_m(void) 152{ 153 return 0x3U << 2U; 154} 155static inline u32 therm_gate_ctrl_blk_clk_run_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) 160{ 161 return 0x4U; 162} 163static inline u32 therm_gate_ctrl_eng_pwr_m(void) 164{ 165 return 0x3U << 4U; 166} 167static inline u32 therm_gate_ctrl_eng_pwr_auto_f(void) 168{ 169 return 0x10U; 170} 171static inline u32 therm_gate_ctrl_eng_pwr_off_v(void) 172{ 173 return 0x00000002U; 174} 175static inline u32 therm_gate_ctrl_eng_pwr_off_f(void) 176{ 177 return 0x20U; 178} 179static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) 180{ 181 return (v & 0x1fU) << 8U; 182} 183static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) 184{ 185 return 0x1fU << 8U; 186} 187static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) 188{ 189 return (v & 0x7U) << 13U; 190} 191static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) 192{ 193 return 0x7U << 13U; 194} 195static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) 196{ 197 return (v & 0xfU) << 16U; 198} 199static inline u32 therm_gate_ctrl_eng_delay_before_m(void) 200{ 201 return 0xfU << 16U; 202} 203static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) 204{ 205 return (v & 0xfU) << 20U; 206} 207static inline u32 therm_gate_ctrl_eng_delay_after_m(void) 208{ 209 return 0xfU << 20U; 210} 211static inline u32 therm_fecs_idle_filter_r(void) 212{ 213 return 0x00020288U; 214} 215static inline u32 therm_fecs_idle_filter_value_m(void) 216{ 217 return 0xffffffffU << 0U; 218} 219static inline u32 therm_hubmmu_idle_filter_r(void) 220{ 221 return 0x0002028cU; 222} 223static inline u32 therm_hubmmu_idle_filter_value_m(void) 224{ 225 return 0xffffffffU << 0U; 226} 227static inline u32 therm_clk_slowdown_r(u32 i) 228{ 229 return 0x00020160U + i*4U; 230} 231static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) 232{ 233 return (v & 0x3fU) << 16U; 234} 235static inline u32 therm_clk_slowdown_idle_factor_m(void) 236{ 237 return 0x3fU << 16U; 238} 239static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) 240{ 241 return (r >> 16U) & 0x3fU; 242} 243static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) 244{ 245 return 0x0U; 246} 247static inline u32 therm_grad_stepping_table_r(u32 i) 248{ 249 return 0x000202c8U + i*4U; 250} 251static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) 252{ 253 return (v & 0x3fU) << 0U; 254} 255static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) 256{ 257 return 0x3fU << 0U; 258} 259static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) 260{ 261 return 0x1U; 262} 263static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) 264{ 265 return 0x2U; 266} 267static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) 268{ 269 return 0x6U; 270} 271static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) 272{ 273 return 0xeU; 274} 275static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) 276{ 277 return (v & 0x3fU) << 6U; 278} 279static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) 280{ 281 return 0x3fU << 6U; 282} 283static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) 284{ 285 return (v & 0x3fU) << 12U; 286} 287static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) 288{ 289 return 0x3fU << 12U; 290} 291static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) 292{ 293 return (v & 0x3fU) << 18U; 294} 295static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) 296{ 297 return 0x3fU << 18U; 298} 299static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) 300{ 301 return (v & 0x3fU) << 24U; 302} 303static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) 304{ 305 return 0x3fU << 24U; 306} 307static inline u32 therm_grad_stepping0_r(void) 308{ 309 return 0x000202c0U; 310} 311static inline u32 therm_grad_stepping0_feature_s(void) 312{ 313 return 1U; 314} 315static inline u32 therm_grad_stepping0_feature_f(u32 v) 316{ 317 return (v & 0x1U) << 0U; 318} 319static inline u32 therm_grad_stepping0_feature_m(void) 320{ 321 return 0x1U << 0U; 322} 323static inline u32 therm_grad_stepping0_feature_v(u32 r) 324{ 325 return (r >> 0U) & 0x1U; 326} 327static inline u32 therm_grad_stepping0_feature_enable_f(void) 328{ 329 return 0x1U; 330} 331static inline u32 therm_grad_stepping1_r(void) 332{ 333 return 0x000202c4U; 334} 335static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) 336{ 337 return (v & 0x1ffffU) << 0U; 338} 339static inline u32 therm_clk_timing_r(u32 i) 340{ 341 return 0x000203c0U + i*4U; 342} 343static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) 344{ 345 return (v & 0x1U) << 16U; 346} 347static inline u32 therm_clk_timing_grad_slowdown_m(void) 348{ 349 return 0x1U << 16U; 350} 351static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) 352{ 353 return 0x10000U; 354} 355#endif
diff --git a/include/nvgpu/hw/gm20b/hw_timer_gm20b.h b/include/nvgpu/hw/gm20b/hw_timer_gm20b.h
deleted file mode 100644
index f409367..0000000
--- a/include/nvgpu/hw/gm20b/hw_timer_gm20b.h
+++ /dev/null
@@ -1,127 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_timer_gm20b_h_ 57#define _hw_timer_gm20b_h_ 58 59static inline u32 timer_pri_timeout_r(void) 60{ 61 return 0x00009080U; 62} 63static inline u32 timer_pri_timeout_period_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 timer_pri_timeout_period_m(void) 68{ 69 return 0xffffffU << 0U; 70} 71static inline u32 timer_pri_timeout_period_v(u32 r) 72{ 73 return (r >> 0U) & 0xffffffU; 74} 75static inline u32 timer_pri_timeout_en_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 timer_pri_timeout_en_m(void) 80{ 81 return 0x1U << 31U; 82} 83static inline u32 timer_pri_timeout_en_v(u32 r) 84{ 85 return (r >> 31U) & 0x1U; 86} 87static inline u32 timer_pri_timeout_en_en_enabled_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 timer_pri_timeout_en_en_disabled_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 timer_pri_timeout_save_0_r(void) 96{ 97 return 0x00009084U; 98} 99static inline u32 timer_pri_timeout_save_0_fecs_tgt_v(u32 r) 100{ 101 return (r >> 31U) & 0x1U; 102} 103static inline u32 timer_pri_timeout_save_0_addr_v(u32 r) 104{ 105 return (r >> 2U) & 0x3fffffU; 106} 107static inline u32 timer_pri_timeout_save_0_write_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 timer_pri_timeout_save_1_r(void) 112{ 113 return 0x00009088U; 114} 115static inline u32 timer_pri_timeout_fecs_errcode_r(void) 116{ 117 return 0x0000908cU; 118} 119static inline u32 timer_time_0_r(void) 120{ 121 return 0x00009400U; 122} 123static inline u32 timer_time_1_r(void) 124{ 125 return 0x00009410U; 126} 127#endif
diff --git a/include/nvgpu/hw/gm20b/hw_top_gm20b.h b/include/nvgpu/hw/gm20b/hw_top_gm20b.h
deleted file mode 100644
index 6d48839..0000000
--- a/include/nvgpu/hw/gm20b/hw_top_gm20b.h
+++ /dev/null
@@ -1,235 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_top_gm20b_h_ 57#define _hw_top_gm20b_h_ 58 59static inline u32 top_num_gpcs_r(void) 60{ 61 return 0x00022430U; 62} 63static inline u32 top_num_gpcs_value_v(u32 r) 64{ 65 return (r >> 0U) & 0x1fU; 66} 67static inline u32 top_tpc_per_gpc_r(void) 68{ 69 return 0x00022434U; 70} 71static inline u32 top_tpc_per_gpc_value_v(u32 r) 72{ 73 return (r >> 0U) & 0x1fU; 74} 75static inline u32 top_num_fbps_r(void) 76{ 77 return 0x00022438U; 78} 79static inline u32 top_num_fbps_value_v(u32 r) 80{ 81 return (r >> 0U) & 0x1fU; 82} 83static inline u32 top_ltc_per_fbp_r(void) 84{ 85 return 0x00022450U; 86} 87static inline u32 top_ltc_per_fbp_value_v(u32 r) 88{ 89 return (r >> 0U) & 0x1fU; 90} 91static inline u32 top_slices_per_ltc_r(void) 92{ 93 return 0x0002245cU; 94} 95static inline u32 top_slices_per_ltc_value_v(u32 r) 96{ 97 return (r >> 0U) & 0x1fU; 98} 99static inline u32 top_num_ltcs_r(void) 100{ 101 return 0x00022454U; 102} 103static inline u32 top_device_info_r(u32 i) 104{ 105 return 0x00022700U + i*4U; 106} 107static inline u32 top_device_info__size_1_v(void) 108{ 109 return 0x00000040U; 110} 111static inline u32 top_device_info_chain_v(u32 r) 112{ 113 return (r >> 31U) & 0x1U; 114} 115static inline u32 top_device_info_chain_enable_v(void) 116{ 117 return 0x00000001U; 118} 119static inline u32 top_device_info_engine_enum_v(u32 r) 120{ 121 return (r >> 26U) & 0xfU; 122} 123static inline u32 top_device_info_runlist_enum_v(u32 r) 124{ 125 return (r >> 21U) & 0xfU; 126} 127static inline u32 top_device_info_intr_enum_v(u32 r) 128{ 129 return (r >> 15U) & 0x1fU; 130} 131static inline u32 top_device_info_reset_enum_v(u32 r) 132{ 133 return (r >> 9U) & 0x1fU; 134} 135static inline u32 top_device_info_type_enum_v(u32 r) 136{ 137 return (r >> 2U) & 0x1fffffffU; 138} 139static inline u32 top_device_info_type_enum_graphics_v(void) 140{ 141 return 0x00000000U; 142} 143static inline u32 top_device_info_type_enum_graphics_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 top_device_info_type_enum_copy0_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 top_device_info_type_enum_copy0_f(void) 152{ 153 return 0x4U; 154} 155static inline u32 top_device_info_type_enum_copy1_v(void) 156{ 157 return 0x00000002U; 158} 159static inline u32 top_device_info_type_enum_copy1_f(void) 160{ 161 return 0x8U; 162} 163static inline u32 top_device_info_type_enum_copy2_v(void) 164{ 165 return 0x00000003U; 166} 167static inline u32 top_device_info_type_enum_copy2_f(void) 168{ 169 return 0xcU; 170} 171static inline u32 top_device_info_engine_v(u32 r) 172{ 173 return (r >> 5U) & 0x1U; 174} 175static inline u32 top_device_info_runlist_v(u32 r) 176{ 177 return (r >> 4U) & 0x1U; 178} 179static inline u32 top_device_info_intr_v(u32 r) 180{ 181 return (r >> 3U) & 0x1U; 182} 183static inline u32 top_device_info_reset_v(u32 r) 184{ 185 return (r >> 2U) & 0x1U; 186} 187static inline u32 top_device_info_entry_v(u32 r) 188{ 189 return (r >> 0U) & 0x3U; 190} 191static inline u32 top_device_info_entry_not_valid_v(void) 192{ 193 return 0x00000000U; 194} 195static inline u32 top_device_info_entry_enum_v(void) 196{ 197 return 0x00000002U; 198} 199static inline u32 top_device_info_entry_engine_type_v(void) 200{ 201 return 0x00000003U; 202} 203static inline u32 top_device_info_entry_data_v(void) 204{ 205 return 0x00000001U; 206} 207static inline u32 top_device_info_data_type_v(u32 r) 208{ 209 return (r >> 30U) & 0x1U; 210} 211static inline u32 top_device_info_data_type_enum2_v(void) 212{ 213 return 0x00000000U; 214} 215static inline u32 top_device_info_data_pri_base_v(u32 r) 216{ 217 return (r >> 12U) & 0x7ffU; 218} 219static inline u32 top_device_info_data_pri_base_align_v(void) 220{ 221 return 0x0000000cU; 222} 223static inline u32 top_device_info_data_fault_id_enum_v(u32 r) 224{ 225 return (r >> 3U) & 0x1fU; 226} 227static inline u32 top_device_info_data_fault_id_v(u32 r) 228{ 229 return (r >> 2U) & 0x1U; 230} 231static inline u32 top_device_info_data_fault_id_valid_v(void) 232{ 233 return 0x00000001U; 234} 235#endif
diff --git a/include/nvgpu/hw/gm20b/hw_trim_gm20b.h b/include/nvgpu/hw/gm20b/hw_trim_gm20b.h
deleted file mode 100644
index 8f0a77a..0000000
--- a/include/nvgpu/hw/gm20b/hw_trim_gm20b.h
+++ /dev/null
@@ -1,503 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_trim_gm20b_h_ 57#define _hw_trim_gm20b_h_ 58 59static inline u32 trim_sys_gpcpll_cfg_r(void) 60{ 61 return 0x00137000U; 62} 63static inline u32 trim_sys_gpcpll_cfg_enable_m(void) 64{ 65 return 0x1U << 0U; 66} 67static inline u32 trim_sys_gpcpll_cfg_enable_v(u32 r) 68{ 69 return (r >> 0U) & 0x1U; 70} 71static inline u32 trim_sys_gpcpll_cfg_enable_no_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 trim_sys_gpcpll_cfg_enable_yes_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 trim_sys_gpcpll_cfg_iddq_m(void) 80{ 81 return 0x1U << 1U; 82} 83static inline u32 trim_sys_gpcpll_cfg_iddq_v(u32 r) 84{ 85 return (r >> 1U) & 0x1U; 86} 87static inline u32 trim_sys_gpcpll_cfg_iddq_power_on_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 trim_sys_gpcpll_cfg_sync_mode_m(void) 92{ 93 return 0x1U << 2U; 94} 95static inline u32 trim_sys_gpcpll_cfg_sync_mode_v(u32 r) 96{ 97 return (r >> 2U) & 0x1U; 98} 99static inline u32 trim_sys_gpcpll_cfg_sync_mode_enable_f(void) 100{ 101 return 0x4U; 102} 103static inline u32 trim_sys_gpcpll_cfg_sync_mode_disable_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 trim_sys_gpcpll_cfg_enb_lckdet_m(void) 108{ 109 return 0x1U << 4U; 110} 111static inline u32 trim_sys_gpcpll_cfg_enb_lckdet_power_on_f(void) 112{ 113 return 0x0U; 114} 115static inline u32 trim_sys_gpcpll_cfg_enb_lckdet_power_off_f(void) 116{ 117 return 0x10U; 118} 119static inline u32 trim_sys_gpcpll_cfg_pll_lock_v(u32 r) 120{ 121 return (r >> 17U) & 0x1U; 122} 123static inline u32 trim_sys_gpcpll_cfg_pll_lock_true_f(void) 124{ 125 return 0x20000U; 126} 127static inline u32 trim_sys_gpcpll_coeff_r(void) 128{ 129 return 0x00137004U; 130} 131static inline u32 trim_sys_gpcpll_coeff_mdiv_f(u32 v) 132{ 133 return (v & 0xffU) << 0U; 134} 135static inline u32 trim_sys_gpcpll_coeff_mdiv_m(void) 136{ 137 return 0xffU << 0U; 138} 139static inline u32 trim_sys_gpcpll_coeff_mdiv_v(u32 r) 140{ 141 return (r >> 0U) & 0xffU; 142} 143static inline u32 trim_sys_gpcpll_coeff_ndiv_f(u32 v) 144{ 145 return (v & 0xffU) << 8U; 146} 147static inline u32 trim_sys_gpcpll_coeff_ndiv_m(void) 148{ 149 return 0xffU << 8U; 150} 151static inline u32 trim_sys_gpcpll_coeff_ndiv_v(u32 r) 152{ 153 return (r >> 8U) & 0xffU; 154} 155static inline u32 trim_sys_gpcpll_coeff_pldiv_f(u32 v) 156{ 157 return (v & 0x3fU) << 16U; 158} 159static inline u32 trim_sys_gpcpll_coeff_pldiv_m(void) 160{ 161 return 0x3fU << 16U; 162} 163static inline u32 trim_sys_gpcpll_coeff_pldiv_v(u32 r) 164{ 165 return (r >> 16U) & 0x3fU; 166} 167static inline u32 trim_sys_sel_vco_r(void) 168{ 169 return 0x00137100U; 170} 171static inline u32 trim_sys_sel_vco_gpc2clk_out_m(void) 172{ 173 return 0x1U << 0U; 174} 175static inline u32 trim_sys_sel_vco_gpc2clk_out_init_v(void) 176{ 177 return 0x00000000U; 178} 179static inline u32 trim_sys_sel_vco_gpc2clk_out_init_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 trim_sys_sel_vco_gpc2clk_out_bypass_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 trim_sys_sel_vco_gpc2clk_out_vco_f(void) 188{ 189 return 0x1U; 190} 191static inline u32 trim_sys_gpc2clk_out_r(void) 192{ 193 return 0x00137250U; 194} 195static inline u32 trim_sys_gpc2clk_out_bypdiv_s(void) 196{ 197 return 6U; 198} 199static inline u32 trim_sys_gpc2clk_out_bypdiv_f(u32 v) 200{ 201 return (v & 0x3fU) << 0U; 202} 203static inline u32 trim_sys_gpc2clk_out_bypdiv_m(void) 204{ 205 return 0x3fU << 0U; 206} 207static inline u32 trim_sys_gpc2clk_out_bypdiv_v(u32 r) 208{ 209 return (r >> 0U) & 0x3fU; 210} 211static inline u32 trim_sys_gpc2clk_out_bypdiv_by31_f(void) 212{ 213 return 0x3cU; 214} 215static inline u32 trim_sys_gpc2clk_out_vcodiv_s(void) 216{ 217 return 6U; 218} 219static inline u32 trim_sys_gpc2clk_out_vcodiv_f(u32 v) 220{ 221 return (v & 0x3fU) << 8U; 222} 223static inline u32 trim_sys_gpc2clk_out_vcodiv_m(void) 224{ 225 return 0x3fU << 8U; 226} 227static inline u32 trim_sys_gpc2clk_out_vcodiv_v(u32 r) 228{ 229 return (r >> 8U) & 0x3fU; 230} 231static inline u32 trim_sys_gpc2clk_out_vcodiv_by1_f(void) 232{ 233 return 0x0U; 234} 235static inline u32 trim_sys_gpc2clk_out_sdiv14_m(void) 236{ 237 return 0x1U << 31U; 238} 239static inline u32 trim_sys_gpc2clk_out_sdiv14_indiv4_mode_f(void) 240{ 241 return 0x80000000U; 242} 243static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_r(u32 i) 244{ 245 return 0x00134124U + i*512U; 246} 247static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_noofipclks_f(u32 v) 248{ 249 return (v & 0x3fffU) << 0U; 250} 251static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_write_en_asserted_f(void) 252{ 253 return 0x10000U; 254} 255static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_enable_asserted_f(void) 256{ 257 return 0x100000U; 258} 259static inline u32 trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f(void) 260{ 261 return 0x1000000U; 262} 263static inline u32 trim_gpc_clk_cntr_ncgpcclk_cnt_r(u32 i) 264{ 265 return 0x00134128U + i*512U; 266} 267static inline u32 trim_gpc_clk_cntr_ncgpcclk_cnt_value_v(u32 r) 268{ 269 return (r >> 0U) & 0xfffffU; 270} 271static inline u32 trim_sys_gpcpll_cfg2_r(void) 272{ 273 return 0x0013700cU; 274} 275static inline u32 trim_sys_gpcpll_cfg2_sdm_din_f(u32 v) 276{ 277 return (v & 0xffU) << 0U; 278} 279static inline u32 trim_sys_gpcpll_cfg2_sdm_din_m(void) 280{ 281 return 0xffU << 0U; 282} 283static inline u32 trim_sys_gpcpll_cfg2_sdm_din_v(u32 r) 284{ 285 return (r >> 0U) & 0xffU; 286} 287static inline u32 trim_sys_gpcpll_cfg2_sdm_din_new_f(u32 v) 288{ 289 return (v & 0xffU) << 8U; 290} 291static inline u32 trim_sys_gpcpll_cfg2_sdm_din_new_m(void) 292{ 293 return 0xffU << 8U; 294} 295static inline u32 trim_sys_gpcpll_cfg2_sdm_din_new_v(u32 r) 296{ 297 return (r >> 8U) & 0xffU; 298} 299static inline u32 trim_sys_gpcpll_cfg2_pll_stepa_f(u32 v) 300{ 301 return (v & 0xffU) << 24U; 302} 303static inline u32 trim_sys_gpcpll_cfg2_pll_stepa_m(void) 304{ 305 return 0xffU << 24U; 306} 307static inline u32 trim_sys_gpcpll_cfg3_r(void) 308{ 309 return 0x00137018U; 310} 311static inline u32 trim_sys_gpcpll_cfg3_vco_ctrl_f(u32 v) 312{ 313 return (v & 0x1ffU) << 0U; 314} 315static inline u32 trim_sys_gpcpll_cfg3_vco_ctrl_m(void) 316{ 317 return 0x1ffU << 0U; 318} 319static inline u32 trim_sys_gpcpll_cfg3_pll_stepb_f(u32 v) 320{ 321 return (v & 0xffU) << 16U; 322} 323static inline u32 trim_sys_gpcpll_cfg3_pll_stepb_m(void) 324{ 325 return 0xffU << 16U; 326} 327static inline u32 trim_sys_gpcpll_cfg3_dfs_testout_v(u32 r) 328{ 329 return (r >> 24U) & 0x7fU; 330} 331static inline u32 trim_sys_gpcpll_dvfs0_r(void) 332{ 333 return 0x00137010U; 334} 335static inline u32 trim_sys_gpcpll_dvfs0_dfs_coeff_f(u32 v) 336{ 337 return (v & 0x7fU) << 0U; 338} 339static inline u32 trim_sys_gpcpll_dvfs0_dfs_coeff_m(void) 340{ 341 return 0x7fU << 0U; 342} 343static inline u32 trim_sys_gpcpll_dvfs0_dfs_coeff_v(u32 r) 344{ 345 return (r >> 0U) & 0x7fU; 346} 347static inline u32 trim_sys_gpcpll_dvfs0_dfs_det_max_f(u32 v) 348{ 349 return (v & 0x7fU) << 8U; 350} 351static inline u32 trim_sys_gpcpll_dvfs0_dfs_det_max_m(void) 352{ 353 return 0x7fU << 8U; 354} 355static inline u32 trim_sys_gpcpll_dvfs0_dfs_det_max_v(u32 r) 356{ 357 return (r >> 8U) & 0x7fU; 358} 359static inline u32 trim_sys_gpcpll_dvfs0_dfs_dc_offset_f(u32 v) 360{ 361 return (v & 0x3fU) << 16U; 362} 363static inline u32 trim_sys_gpcpll_dvfs0_dfs_dc_offset_m(void) 364{ 365 return 0x3fU << 16U; 366} 367static inline u32 trim_sys_gpcpll_dvfs0_dfs_dc_offset_v(u32 r) 368{ 369 return (r >> 16U) & 0x3fU; 370} 371static inline u32 trim_sys_gpcpll_dvfs0_mode_m(void) 372{ 373 return 0x1U << 28U; 374} 375static inline u32 trim_sys_gpcpll_dvfs0_mode_dvfspll_f(void) 376{ 377 return 0x0U; 378} 379static inline u32 trim_sys_gpcpll_dvfs1_r(void) 380{ 381 return 0x00137014U; 382} 383static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_det_f(u32 v) 384{ 385 return (v & 0x7fU) << 0U; 386} 387static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_det_m(void) 388{ 389 return 0x7fU << 0U; 390} 391static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_det_v(u32 r) 392{ 393 return (r >> 0U) & 0x7fU; 394} 395static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_strb_m(void) 396{ 397 return 0x1U << 7U; 398} 399static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_cal_f(u32 v) 400{ 401 return (v & 0x7fU) << 8U; 402} 403static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_cal_m(void) 404{ 405 return 0x7fU << 8U; 406} 407static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_cal_v(u32 r) 408{ 409 return (r >> 8U) & 0x7fU; 410} 411static inline u32 trim_sys_gpcpll_dvfs1_dfs_ext_sel_m(void) 412{ 413 return 0x1U << 15U; 414} 415static inline u32 trim_sys_gpcpll_dvfs1_dfs_ctrl_f(u32 v) 416{ 417 return (v & 0xfffU) << 16U; 418} 419static inline u32 trim_sys_gpcpll_dvfs1_dfs_ctrl_m(void) 420{ 421 return 0xfffU << 16U; 422} 423static inline u32 trim_sys_gpcpll_dvfs1_dfs_ctrl_v(u32 r) 424{ 425 return (r >> 16U) & 0xfffU; 426} 427static inline u32 trim_sys_gpcpll_dvfs1_en_sdm_m(void) 428{ 429 return 0x1U << 28U; 430} 431static inline u32 trim_sys_gpcpll_dvfs1_en_dfs_m(void) 432{ 433 return 0x1U << 29U; 434} 435static inline u32 trim_sys_gpcpll_dvfs1_en_dfs_cal_m(void) 436{ 437 return 0x1U << 30U; 438} 439static inline u32 trim_sys_gpcpll_dvfs1_dfs_cal_done_v(u32 r) 440{ 441 return (r >> 31U) & 0x1U; 442} 443static inline u32 trim_sys_gpcpll_dvfs2_r(void) 444{ 445 return 0x00137020U; 446} 447static inline u32 trim_sys_gpcpll_ndiv_slowdown_r(void) 448{ 449 return 0x0013701cU; 450} 451static inline u32 trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_m(void) 452{ 453 return 0x1U << 22U; 454} 455static inline u32 trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_yes_f(void) 456{ 457 return 0x400000U; 458} 459static inline u32 trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_no_f(void) 460{ 461 return 0x0U; 462} 463static inline u32 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(void) 464{ 465 return 0x1U << 31U; 466} 467static inline u32 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_yes_f(void) 468{ 469 return 0x80000000U; 470} 471static inline u32 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f(void) 472{ 473 return 0x0U; 474} 475static inline u32 trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r(void) 476{ 477 return 0x001328a0U; 478} 479static inline u32 trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(u32 r) 480{ 481 return (r >> 24U) & 0x1U; 482} 483static inline u32 trim_gpc_bcast_gpcpll_dvfs2_r(void) 484{ 485 return 0x00132820U; 486} 487static inline u32 trim_sys_bypassctrl_r(void) 488{ 489 return 0x00137340U; 490} 491static inline u32 trim_sys_bypassctrl_gpcpll_m(void) 492{ 493 return 0x1U << 0U; 494} 495static inline u32 trim_sys_bypassctrl_gpcpll_bypassclk_f(void) 496{ 497 return 0x1U; 498} 499static inline u32 trim_sys_bypassctrl_gpcpll_vco_f(void) 500{ 501 return 0x0U; 502} 503#endif
diff --git a/include/nvgpu/hw/gp106/hw_bus_gp106.h b/include/nvgpu/hw/gp106/hw_bus_gp106.h
deleted file mode 100644
index ce3aafd..0000000
--- a/include/nvgpu/hw/gp106/hw_bus_gp106.h
+++ /dev/null
@@ -1,223 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_bus_gp106_h_ 57#define _hw_bus_gp106_h_ 58 59static inline u32 bus_bar0_window_r(void) 60{ 61 return 0x00001700U; 62} 63static inline u32 bus_bar0_window_base_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 bus_bar0_window_target_vid_mem_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) 72{ 73 return 0x2000000U; 74} 75static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) 76{ 77 return 0x3000000U; 78} 79static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) 80{ 81 return 0x00000010U; 82} 83static inline u32 bus_bar1_block_r(void) 84{ 85 return 0x00001704U; 86} 87static inline u32 bus_bar1_block_ptr_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 0U; 90} 91static inline u32 bus_bar1_block_target_vid_mem_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) 96{ 97 return 0x20000000U; 98} 99static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) 100{ 101 return 0x30000000U; 102} 103static inline u32 bus_bar1_block_mode_virtual_f(void) 104{ 105 return 0x80000000U; 106} 107static inline u32 bus_bar2_block_r(void) 108{ 109 return 0x00001714U; 110} 111static inline u32 bus_bar2_block_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 bus_bar2_block_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 bus_bar2_block_mode_virtual_f(void) 128{ 129 return 0x80000000U; 130} 131static inline u32 bus_bar1_block_ptr_shift_v(void) 132{ 133 return 0x0000000cU; 134} 135static inline u32 bus_bar2_block_ptr_shift_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 bus_bind_status_r(void) 140{ 141 return 0x00001710U; 142} 143static inline u32 bus_bind_status_bar1_pending_v(u32 r) 144{ 145 return (r >> 0U) & 0x1U; 146} 147static inline u32 bus_bind_status_bar1_pending_empty_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 bus_bind_status_bar1_pending_busy_f(void) 152{ 153 return 0x1U; 154} 155static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) 156{ 157 return (r >> 1U) & 0x1U; 158} 159static inline u32 bus_bind_status_bar1_outstanding_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 bus_bind_status_bar1_outstanding_true_f(void) 164{ 165 return 0x2U; 166} 167static inline u32 bus_bind_status_bar2_pending_v(u32 r) 168{ 169 return (r >> 2U) & 0x1U; 170} 171static inline u32 bus_bind_status_bar2_pending_empty_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 bus_bind_status_bar2_pending_busy_f(void) 176{ 177 return 0x4U; 178} 179static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) 180{ 181 return (r >> 3U) & 0x1U; 182} 183static inline u32 bus_bind_status_bar2_outstanding_false_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 bus_bind_status_bar2_outstanding_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 bus_intr_0_r(void) 192{ 193 return 0x00001100U; 194} 195static inline u32 bus_intr_0_pri_squash_m(void) 196{ 197 return 0x1U << 1U; 198} 199static inline u32 bus_intr_0_pri_fecserr_m(void) 200{ 201 return 0x1U << 2U; 202} 203static inline u32 bus_intr_0_pri_timeout_m(void) 204{ 205 return 0x1U << 3U; 206} 207static inline u32 bus_intr_en_0_r(void) 208{ 209 return 0x00001140U; 210} 211static inline u32 bus_intr_en_0_pri_squash_m(void) 212{ 213 return 0x1U << 1U; 214} 215static inline u32 bus_intr_en_0_pri_fecserr_m(void) 216{ 217 return 0x1U << 2U; 218} 219static inline u32 bus_intr_en_0_pri_timeout_m(void) 220{ 221 return 0x1U << 3U; 222} 223#endif
diff --git a/include/nvgpu/hw/gp106/hw_ccsr_gp106.h b/include/nvgpu/hw/gp106/hw_ccsr_gp106.h
deleted file mode 100644
index cd63777..0000000
--- a/include/nvgpu/hw/gp106/hw_ccsr_gp106.h
+++ /dev/null
@@ -1,163 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ccsr_gp106_h_ 57#define _hw_ccsr_gp106_h_ 58 59static inline u32 ccsr_channel_inst_r(u32 i) 60{ 61 return 0x00800000U + i*8U; 62} 63static inline u32 ccsr_channel_inst__size_1_v(void) 64{ 65 return 0x00001000U; 66} 67static inline u32 ccsr_channel_inst_ptr_f(u32 v) 68{ 69 return (v & 0xfffffffU) << 0U; 70} 71static inline u32 ccsr_channel_inst_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) 76{ 77 return 0x20000000U; 78} 79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) 80{ 81 return 0x30000000U; 82} 83static inline u32 ccsr_channel_inst_bind_false_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 ccsr_channel_inst_bind_true_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 ccsr_channel_r(u32 i) 92{ 93 return 0x00800004U + i*8U; 94} 95static inline u32 ccsr_channel__size_1_v(void) 96{ 97 return 0x00001000U; 98} 99static inline u32 ccsr_channel_enable_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 ccsr_channel_enable_set_f(u32 v) 104{ 105 return (v & 0x1U) << 10U; 106} 107static inline u32 ccsr_channel_enable_set_true_f(void) 108{ 109 return 0x400U; 110} 111static inline u32 ccsr_channel_enable_clr_true_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ccsr_channel_status_v(u32 r) 116{ 117 return (r >> 24U) & 0xfU; 118} 119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) 120{ 121 return 0x00000002U; 122} 123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) 124{ 125 return 0x00000004U; 126} 127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) 128{ 129 return 0x0000000aU; 130} 131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) 132{ 133 return 0x0000000bU; 134} 135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) 140{ 141 return 0x0000000dU; 142} 143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) 144{ 145 return 0x0000000eU; 146} 147static inline u32 ccsr_channel_next_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 ccsr_channel_next_true_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 ccsr_channel_force_ctx_reload_true_f(void) 156{ 157 return 0x100U; 158} 159static inline u32 ccsr_channel_busy_v(u32 r) 160{ 161 return (r >> 28U) & 0x1U; 162} 163#endif
diff --git a/include/nvgpu/hw/gp106/hw_ce_gp106.h b/include/nvgpu/hw/gp106/hw_ce_gp106.h
deleted file mode 100644
index 8892f42..0000000
--- a/include/nvgpu/hw/gp106/hw_ce_gp106.h
+++ /dev/null
@@ -1,87 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ce_gp106_h_ 57#define _hw_ce_gp106_h_ 58 59static inline u32 ce_intr_status_r(u32 i) 60{ 61 return 0x00104410U + i*128U; 62} 63static inline u32 ce_intr_status_blockpipe_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 ce_intr_status_blockpipe_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 ce_intr_status_nonblockpipe_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 ce_intr_status_nonblockpipe_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 ce_intr_status_launcherr_pending_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 ce_intr_status_launcherr_reset_f(void) 84{ 85 return 0x4U; 86} 87#endif
diff --git a/include/nvgpu/hw/gp106/hw_ctxsw_prog_gp106.h b/include/nvgpu/hw/gp106/hw_ctxsw_prog_gp106.h
deleted file mode 100644
index 3387d23..0000000
--- a/include/nvgpu/hw/gp106/hw_ctxsw_prog_gp106.h
+++ /dev/null
@@ -1,295 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ctxsw_prog_gp106_h_ 57#define _hw_ctxsw_prog_gp106_h_ 58 59static inline u32 ctxsw_prog_fecs_header_v(void) 60{ 61 return 0x00000100U; 62} 63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ctxsw_prog_main_image_patch_count_o(void) 68{ 69 return 0x00000010U; 70} 71static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) 72{ 73 return 0x00000014U; 74} 75static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) 76{ 77 return 0x00000018U; 78} 79static inline u32 ctxsw_prog_main_image_zcull_o(void) 80{ 81 return 0x0000001cU; 82} 83static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) 84{ 85 return 0x00000001U; 86} 87static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) 88{ 89 return 0x00000002U; 90} 91static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) 92{ 93 return 0x00000020U; 94} 95static inline u32 ctxsw_prog_main_image_pm_o(void) 96{ 97 return 0x00000028U; 98} 99static inline u32 ctxsw_prog_main_image_pm_mode_m(void) 100{ 101 return 0x7U << 0U; 102} 103static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) 108{ 109 return 0x7U << 3U; 110} 111static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) 112{ 113 return 0x8U; 114} 115static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) 120{ 121 return 0x0000002cU; 122} 123static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) 124{ 125 return 0x000000f4U; 126} 127static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void) 128{ 129 return 0x000000d0U; 130} 131static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void) 132{ 133 return 0x000000d4U; 134} 135static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void) 136{ 137 return 0x000000d8U; 138} 139static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void) 140{ 141 return 0x000000dcU; 142} 143static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) 144{ 145 return 0x000000f8U; 146} 147static inline u32 ctxsw_prog_main_image_magic_value_o(void) 148{ 149 return 0x000000fcU; 150} 151static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) 152{ 153 return 0x600dc0deU; 154} 155static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) 156{ 157 return 0x0000000cU; 158} 159static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) 160{ 161 return (r >> 0U) & 0xffffU; 162} 163static inline u32 ctxsw_prog_local_image_ppc_info_o(void) 164{ 165 return 0x000000f4U; 166} 167static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) 168{ 169 return (r >> 0U) & 0xffffU; 170} 171static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) 172{ 173 return (r >> 16U) & 0xffffU; 174} 175static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) 176{ 177 return 0x000000f8U; 178} 179static inline u32 ctxsw_prog_local_magic_value_o(void) 180{ 181 return 0x000000fcU; 182} 183static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) 184{ 185 return 0xad0becabU; 186} 187static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) 188{ 189 return 0x000000ecU; 190} 191static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) 192{ 193 return (r >> 0U) & 0xffffU; 194} 195static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) 196{ 197 return (r >> 16U) & 0xffU; 198} 199static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) 200{ 201 return 0x00000100U; 202} 203static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) 204{ 205 return 0x00000004U; 206} 207static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) 208{ 209 return 0x00000000U; 210} 211static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) 212{ 213 return 0x00000002U; 214} 215static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) 216{ 217 return 0x000000a0U; 218} 219static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) 220{ 221 return 2U; 222} 223static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) 224{ 225 return (v & 0x3U) << 0U; 226} 227static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) 228{ 229 return 0x3U << 0U; 230} 231static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) 232{ 233 return (r >> 0U) & 0x3U; 234} 235static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) 236{ 237 return 0x0U; 238} 239static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) 240{ 241 return 0x2U; 242} 243static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) 244{ 245 return 0x000000a4U; 246} 247static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) 248{ 249 return 0x000000a8U; 250} 251static inline u32 ctxsw_prog_main_image_misc_options_o(void) 252{ 253 return 0x0000003cU; 254} 255static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) 256{ 257 return 0x1U << 3U; 258} 259static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) 260{ 261 return 0x0U; 262} 263static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void) 264{ 265 return 0x00000080U; 266} 267static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v) 268{ 269 return (v & 0x3U) << 0U; 270} 271static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void) 272{ 273 return 0x1U; 274} 275static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void) 276{ 277 return 0x00000068U; 278} 279static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void) 280{ 281 return 0x00000084U; 282} 283static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v) 284{ 285 return (v & 0x3U) << 0U; 286} 287static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void) 288{ 289 return 0x1U; 290} 291static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void) 292{ 293 return 0x2U; 294} 295#endif
diff --git a/include/nvgpu/hw/gp106/hw_falcon_gp106.h b/include/nvgpu/hw/gp106/hw_falcon_gp106.h
deleted file mode 100644
index d899e3f..0000000
--- a/include/nvgpu/hw/gp106/hw_falcon_gp106.h
+++ /dev/null
@@ -1,603 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_falcon_gp106_h_ 57#define _hw_falcon_gp106_h_ 58 59static inline u32 falcon_falcon_irqsset_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 falcon_falcon_irqsclr_r(void) 68{ 69 return 0x00000004U; 70} 71static inline u32 falcon_falcon_irqstat_r(void) 72{ 73 return 0x00000008U; 74} 75static inline u32 falcon_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 falcon_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 falcon_falcon_irqmode_r(void) 88{ 89 return 0x0000000cU; 90} 91static inline u32 falcon_falcon_irqmset_r(void) 92{ 93 return 0x00000010U; 94} 95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 falcon_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 falcon_falcon_irqmclr_r(void) 128{ 129 return 0x00000014U; 130} 131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 falcon_falcon_irqmask_r(void) 168{ 169 return 0x00000018U; 170} 171static inline u32 falcon_falcon_irqdest_r(void) 172{ 173 return 0x0000001cU; 174} 175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 falcon_falcon_curctx_r(void) 248{ 249 return 0x00000050U; 250} 251static inline u32 falcon_falcon_nxtctx_r(void) 252{ 253 return 0x00000054U; 254} 255static inline u32 falcon_falcon_mailbox0_r(void) 256{ 257 return 0x00000040U; 258} 259static inline u32 falcon_falcon_mailbox1_r(void) 260{ 261 return 0x00000044U; 262} 263static inline u32 falcon_falcon_itfen_r(void) 264{ 265 return 0x00000048U; 266} 267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 falcon_falcon_idlestate_r(void) 272{ 273 return 0x0000004cU; 274} 275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 falcon_falcon_os_r(void) 284{ 285 return 0x00000080U; 286} 287static inline u32 falcon_falcon_engctl_r(void) 288{ 289 return 0x000000a4U; 290} 291static inline u32 falcon_falcon_cpuctl_r(void) 292{ 293 return 0x00000100U; 294} 295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) 300{ 301 return (v & 0x1U) << 2U; 302} 303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) 304{ 305 return (v & 0x1U) << 3U; 306} 307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) 308{ 309 return (v & 0x1U) << 4U; 310} 311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) 312{ 313 return 0x1U << 4U; 314} 315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) 316{ 317 return (r >> 4U) & 0x1U; 318} 319static inline u32 falcon_falcon_cpuctl_stopped_m(void) 320{ 321 return 0x1U << 5U; 322} 323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 324{ 325 return (v & 0x1U) << 6U; 326} 327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) 328{ 329 return 0x1U << 6U; 330} 331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 332{ 333 return (r >> 6U) & 0x1U; 334} 335static inline u32 falcon_falcon_cpuctl_alias_r(void) 336{ 337 return 0x00000130U; 338} 339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) 340{ 341 return (v & 0x1U) << 1U; 342} 343static inline u32 falcon_falcon_imemc_r(u32 i) 344{ 345 return 0x00000180U + i*16U; 346} 347static inline u32 falcon_falcon_imemc_offs_f(u32 v) 348{ 349 return (v & 0x3fU) << 2U; 350} 351static inline u32 falcon_falcon_imemc_blk_f(u32 v) 352{ 353 return (v & 0xffU) << 8U; 354} 355static inline u32 falcon_falcon_imemc_aincw_f(u32 v) 356{ 357 return (v & 0x1U) << 24U; 358} 359static inline u32 falcon_falcon_imemc_secure_f(u32 v) 360{ 361 return (v & 0x1U) << 28U; 362} 363static inline u32 falcon_falcon_imemd_r(u32 i) 364{ 365 return 0x00000184U + i*16U; 366} 367static inline u32 falcon_falcon_imemt_r(u32 i) 368{ 369 return 0x00000188U + i*16U; 370} 371static inline u32 falcon_falcon_sctl_r(void) 372{ 373 return 0x00000240U; 374} 375static inline u32 falcon_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 falcon_falcon_bootvec_r(void) 380{ 381 return 0x00000104U; 382} 383static inline u32 falcon_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 falcon_falcon_dmactl_r(void) 388{ 389 return 0x0000010cU; 390} 391static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) 400{ 401 return (v & 0x1U) << 0U; 402} 403static inline u32 falcon_falcon_hwcfg_r(void) 404{ 405 return 0x00000108U; 406} 407static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) 408{ 409 return (r >> 0U) & 0x1ffU; 410} 411static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) 412{ 413 return (r >> 9U) & 0x1ffU; 414} 415static inline u32 falcon_falcon_dmatrfbase_r(void) 416{ 417 return 0x00000110U; 418} 419static inline u32 falcon_falcon_dmatrfbase1_r(void) 420{ 421 return 0x00000128U; 422} 423static inline u32 falcon_falcon_dmatrfmoffs_r(void) 424{ 425 return 0x00000114U; 426} 427static inline u32 falcon_falcon_dmatrfcmd_r(void) 428{ 429 return 0x00000118U; 430} 431static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) 432{ 433 return (v & 0x1U) << 4U; 434} 435static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) 436{ 437 return (v & 0x1U) << 5U; 438} 439static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) 440{ 441 return (v & 0x7U) << 8U; 442} 443static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) 444{ 445 return (v & 0x7U) << 12U; 446} 447static inline u32 falcon_falcon_dmatrffboffs_r(void) 448{ 449 return 0x0000011cU; 450} 451static inline u32 falcon_falcon_imctl_debug_r(void) 452{ 453 return 0x0000015cU; 454} 455static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) 456{ 457 return (v & 0xffffffU) << 0U; 458} 459static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) 460{ 461 return (v & 0x7U) << 24U; 462} 463static inline u32 falcon_falcon_imstat_r(void) 464{ 465 return 0x00000144U; 466} 467static inline u32 falcon_falcon_traceidx_r(void) 468{ 469 return 0x00000148U; 470} 471static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) 472{ 473 return (r >> 16U) & 0xffU; 474} 475static inline u32 falcon_falcon_traceidx_idx_f(u32 v) 476{ 477 return (v & 0xffU) << 0U; 478} 479static inline u32 falcon_falcon_tracepc_r(void) 480{ 481 return 0x0000014cU; 482} 483static inline u32 falcon_falcon_tracepc_pc_v(u32 r) 484{ 485 return (r >> 0U) & 0xffffffU; 486} 487static inline u32 falcon_falcon_exterraddr_r(void) 488{ 489 return 0x00000168U; 490} 491static inline u32 falcon_falcon_exterrstat_r(void) 492{ 493 return 0x0000016cU; 494} 495static inline u32 falcon_falcon_exterrstat_valid_m(void) 496{ 497 return 0x1U << 31U; 498} 499static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) 500{ 501 return (r >> 31U) & 0x1U; 502} 503static inline u32 falcon_falcon_exterrstat_valid_true_v(void) 504{ 505 return 0x00000001U; 506} 507static inline u32 falcon_falcon_icd_cmd_r(void) 508{ 509 return 0x00000200U; 510} 511static inline u32 falcon_falcon_icd_cmd_opc_s(void) 512{ 513 return 4U; 514} 515static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) 516{ 517 return (v & 0xfU) << 0U; 518} 519static inline u32 falcon_falcon_icd_cmd_opc_m(void) 520{ 521 return 0xfU << 0U; 522} 523static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) 524{ 525 return (r >> 0U) & 0xfU; 526} 527static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) 528{ 529 return 0x8U; 530} 531static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) 532{ 533 return 0xeU; 534} 535static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) 536{ 537 return (v & 0x1fU) << 8U; 538} 539static inline u32 falcon_falcon_icd_rdata_r(void) 540{ 541 return 0x0000020cU; 542} 543static inline u32 falcon_falcon_dmemc_r(u32 i) 544{ 545 return 0x000001c0U + i*8U; 546} 547static inline u32 falcon_falcon_dmemc_offs_f(u32 v) 548{ 549 return (v & 0x3fU) << 2U; 550} 551static inline u32 falcon_falcon_dmemc_offs_m(void) 552{ 553 return 0x3fU << 2U; 554} 555static inline u32 falcon_falcon_dmemc_blk_f(u32 v) 556{ 557 return (v & 0xffU) << 8U; 558} 559static inline u32 falcon_falcon_dmemc_blk_m(void) 560{ 561 return 0xffU << 8U; 562} 563static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) 564{ 565 return (v & 0x1U) << 24U; 566} 567static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) 568{ 569 return (v & 0x1U) << 25U; 570} 571static inline u32 falcon_falcon_dmemd_r(u32 i) 572{ 573 return 0x000001c4U + i*8U; 574} 575static inline u32 falcon_falcon_debug1_r(void) 576{ 577 return 0x00000090U; 578} 579static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) 580{ 581 return 1U; 582} 583static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) 584{ 585 return (v & 0x1U) << 16U; 586} 587static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) 588{ 589 return 0x1U << 16U; 590} 591static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) 592{ 593 return (r >> 16U) & 0x1U; 594} 595static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 falcon_falcon_debuginfo_r(void) 600{ 601 return 0x00000094U; 602} 603#endif
diff --git a/include/nvgpu/hw/gp106/hw_fb_gp106.h b/include/nvgpu/hw/gp106/hw_fb_gp106.h
deleted file mode 100644
index 1c2a1ac..0000000
--- a/include/nvgpu/hw/gp106/hw_fb_gp106.h
+++ /dev/null
@@ -1,563 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fb_gp106_h_ 57#define _hw_fb_gp106_h_ 58 59static inline u32 fb_fbhub_num_active_ltcs_r(void) 60{ 61 return 0x00100800U; 62} 63static inline u32 fb_mmu_ctrl_r(void) 64{ 65 return 0x00100c80U; 66} 67static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) 68{ 69 return (r >> 15U) & 0x1U; 70} 71static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) 76{ 77 return (r >> 16U) & 0xffU; 78} 79static inline u32 fb_priv_mmu_phy_secure_r(void) 80{ 81 return 0x00100ce4U; 82} 83static inline u32 fb_mmu_invalidate_pdb_r(void) 84{ 85 return 0x00100cb8U; 86} 87static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) 92{ 93 return 0x2U; 94} 95static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) 96{ 97 return (v & 0xfffffffU) << 4U; 98} 99static inline u32 fb_mmu_invalidate_r(void) 100{ 101 return 0x00100cbcU; 102} 103static inline u32 fb_mmu_invalidate_all_va_true_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) 108{ 109 return 0x2U; 110} 111static inline u32 fb_mmu_invalidate_hubtlb_only_s(void) 112{ 113 return 1U; 114} 115static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v) 116{ 117 return (v & 0x1U) << 2U; 118} 119static inline u32 fb_mmu_invalidate_hubtlb_only_m(void) 120{ 121 return 0x1U << 2U; 122} 123static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r) 124{ 125 return (r >> 2U) & 0x1U; 126} 127static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void) 128{ 129 return 0x4U; 130} 131static inline u32 fb_mmu_invalidate_replay_s(void) 132{ 133 return 3U; 134} 135static inline u32 fb_mmu_invalidate_replay_f(u32 v) 136{ 137 return (v & 0x7U) << 3U; 138} 139static inline u32 fb_mmu_invalidate_replay_m(void) 140{ 141 return 0x7U << 3U; 142} 143static inline u32 fb_mmu_invalidate_replay_v(u32 r) 144{ 145 return (r >> 3U) & 0x7U; 146} 147static inline u32 fb_mmu_invalidate_replay_none_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 fb_mmu_invalidate_replay_start_f(void) 152{ 153 return 0x8U; 154} 155static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void) 156{ 157 return 0x10U; 158} 159static inline u32 fb_mmu_invalidate_replay_cancel_targeted_f(void) 160{ 161 return 0x18U; 162} 163static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void) 164{ 165 return 0x20U; 166} 167static inline u32 fb_mmu_invalidate_replay_cancel_f(void) 168{ 169 return 0x20U; 170} 171static inline u32 fb_mmu_invalidate_sys_membar_s(void) 172{ 173 return 1U; 174} 175static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v) 176{ 177 return (v & 0x1U) << 6U; 178} 179static inline u32 fb_mmu_invalidate_sys_membar_m(void) 180{ 181 return 0x1U << 6U; 182} 183static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r) 184{ 185 return (r >> 6U) & 0x1U; 186} 187static inline u32 fb_mmu_invalidate_sys_membar_true_f(void) 188{ 189 return 0x40U; 190} 191static inline u32 fb_mmu_invalidate_ack_s(void) 192{ 193 return 2U; 194} 195static inline u32 fb_mmu_invalidate_ack_f(u32 v) 196{ 197 return (v & 0x3U) << 7U; 198} 199static inline u32 fb_mmu_invalidate_ack_m(void) 200{ 201 return 0x3U << 7U; 202} 203static inline u32 fb_mmu_invalidate_ack_v(u32 r) 204{ 205 return (r >> 7U) & 0x3U; 206} 207static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void) 212{ 213 return 0x100U; 214} 215static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void) 216{ 217 return 0x80U; 218} 219static inline u32 fb_mmu_invalidate_cancel_client_id_s(void) 220{ 221 return 6U; 222} 223static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v) 224{ 225 return (v & 0x3fU) << 9U; 226} 227static inline u32 fb_mmu_invalidate_cancel_client_id_m(void) 228{ 229 return 0x3fU << 9U; 230} 231static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r) 232{ 233 return (r >> 9U) & 0x3fU; 234} 235static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void) 236{ 237 return 5U; 238} 239static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v) 240{ 241 return (v & 0x1fU) << 15U; 242} 243static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void) 244{ 245 return 0x1fU << 15U; 246} 247static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r) 248{ 249 return (r >> 15U) & 0x1fU; 250} 251static inline u32 fb_mmu_invalidate_cancel_client_type_s(void) 252{ 253 return 1U; 254} 255static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v) 256{ 257 return (v & 0x1U) << 20U; 258} 259static inline u32 fb_mmu_invalidate_cancel_client_type_m(void) 260{ 261 return 0x1U << 20U; 262} 263static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r) 264{ 265 return (r >> 20U) & 0x1U; 266} 267static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void) 272{ 273 return 0x100000U; 274} 275static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void) 276{ 277 return 3U; 278} 279static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v) 280{ 281 return (v & 0x7U) << 24U; 282} 283static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void) 284{ 285 return 0x7U << 24U; 286} 287static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r) 288{ 289 return (r >> 24U) & 0x7U; 290} 291static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void) 292{ 293 return 0x0U; 294} 295static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void) 296{ 297 return 0x1000000U; 298} 299static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void) 300{ 301 return 0x2000000U; 302} 303static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void) 304{ 305 return 0x3000000U; 306} 307static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void) 308{ 309 return 0x4000000U; 310} 311static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void) 312{ 313 return 0x5000000U; 314} 315static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void) 316{ 317 return 0x6000000U; 318} 319static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void) 320{ 321 return 0x7000000U; 322} 323static inline u32 fb_mmu_invalidate_trigger_s(void) 324{ 325 return 1U; 326} 327static inline u32 fb_mmu_invalidate_trigger_f(u32 v) 328{ 329 return (v & 0x1U) << 31U; 330} 331static inline u32 fb_mmu_invalidate_trigger_m(void) 332{ 333 return 0x1U << 31U; 334} 335static inline u32 fb_mmu_invalidate_trigger_v(u32 r) 336{ 337 return (r >> 31U) & 0x1U; 338} 339static inline u32 fb_mmu_invalidate_trigger_true_f(void) 340{ 341 return 0x80000000U; 342} 343static inline u32 fb_mmu_debug_wr_r(void) 344{ 345 return 0x00100cc8U; 346} 347static inline u32 fb_mmu_debug_wr_aperture_s(void) 348{ 349 return 2U; 350} 351static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) 352{ 353 return (v & 0x3U) << 0U; 354} 355static inline u32 fb_mmu_debug_wr_aperture_m(void) 356{ 357 return 0x3U << 0U; 358} 359static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) 360{ 361 return (r >> 0U) & 0x3U; 362} 363static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) 364{ 365 return 0x0U; 366} 367static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) 368{ 369 return 0x2U; 370} 371static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) 372{ 373 return 0x3U; 374} 375static inline u32 fb_mmu_debug_wr_vol_false_f(void) 376{ 377 return 0x0U; 378} 379static inline u32 fb_mmu_debug_wr_vol_true_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 fb_mmu_debug_wr_vol_true_f(void) 384{ 385 return 0x4U; 386} 387static inline u32 fb_mmu_debug_wr_addr_f(u32 v) 388{ 389 return (v & 0xfffffffU) << 4U; 390} 391static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) 392{ 393 return 0x0000000cU; 394} 395static inline u32 fb_mmu_debug_rd_r(void) 396{ 397 return 0x00100cccU; 398} 399static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) 400{ 401 return 0x0U; 402} 403static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) 404{ 405 return 0x2U; 406} 407static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) 408{ 409 return 0x3U; 410} 411static inline u32 fb_mmu_debug_rd_vol_false_f(void) 412{ 413 return 0x0U; 414} 415static inline u32 fb_mmu_debug_rd_addr_f(u32 v) 416{ 417 return (v & 0xfffffffU) << 4U; 418} 419static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) 420{ 421 return 0x0000000cU; 422} 423static inline u32 fb_mmu_debug_ctrl_r(void) 424{ 425 return 0x00100cc4U; 426} 427static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) 428{ 429 return (r >> 16U) & 0x1U; 430} 431static inline u32 fb_mmu_debug_ctrl_debug_m(void) 432{ 433 return 0x1U << 16U; 434} 435static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) 436{ 437 return 0x00000001U; 438} 439static inline u32 fb_mmu_debug_ctrl_debug_enabled_f(void) 440{ 441 return 0x10000U; 442} 443static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) 444{ 445 return 0x00000000U; 446} 447static inline u32 fb_mmu_debug_ctrl_debug_disabled_f(void) 448{ 449 return 0x0U; 450} 451static inline u32 fb_mmu_priv_level_mask_r(void) 452{ 453 return 0x00100cdcU; 454} 455static inline u32 fb_mmu_priv_level_mask_write_violation_m(void) 456{ 457 return 0x1U << 7U; 458} 459static inline u32 fb_niso_flush_sysmem_addr_r(void) 460{ 461 return 0x00100c10U; 462} 463static inline u32 fb_mmu_local_memory_range_r(void) 464{ 465 return 0x00100ce0U; 466} 467static inline u32 fb_mmu_local_memory_range_lower_scale_v(u32 r) 468{ 469 return (r >> 0U) & 0xfU; 470} 471static inline u32 fb_mmu_local_memory_range_lower_mag_v(u32 r) 472{ 473 return (r >> 4U) & 0x3fU; 474} 475static inline u32 fb_mmu_local_memory_range_ecc_mode_v(u32 r) 476{ 477 return (r >> 30U) & 0x1U; 478} 479static inline u32 fb_fbpa_fbio_delay_r(void) 480{ 481 return 0x009a065cU; 482} 483static inline u32 fb_fbpa_fbio_delay_src_f(u32 v) 484{ 485 return (v & 0xfU) << 0U; 486} 487static inline u32 fb_fbpa_fbio_delay_src_m(void) 488{ 489 return 0xfU << 0U; 490} 491static inline u32 fb_fbpa_fbio_delay_src_v(u32 r) 492{ 493 return (r >> 0U) & 0xfU; 494} 495static inline u32 fb_fbpa_fbio_delay_src_max_v(void) 496{ 497 return 0x00000002U; 498} 499static inline u32 fb_fbpa_fbio_delay_priv_f(u32 v) 500{ 501 return (v & 0xfU) << 4U; 502} 503static inline u32 fb_fbpa_fbio_delay_priv_m(void) 504{ 505 return 0xfU << 4U; 506} 507static inline u32 fb_fbpa_fbio_delay_priv_v(u32 r) 508{ 509 return (r >> 4U) & 0xfU; 510} 511static inline u32 fb_fbpa_fbio_delay_priv_max_v(void) 512{ 513 return 0x00000002U; 514} 515static inline u32 fb_fbpa_fbio_cmd_delay_r(void) 516{ 517 return 0x009a08e0U; 518} 519static inline u32 fb_fbpa_fbio_cmd_delay_cmd_src_f(u32 v) 520{ 521 return (v & 0xfU) << 0U; 522} 523static inline u32 fb_fbpa_fbio_cmd_delay_cmd_src_m(void) 524{ 525 return 0xfU << 0U; 526} 527static inline u32 fb_fbpa_fbio_cmd_delay_cmd_src_v(u32 r) 528{ 529 return (r >> 0U) & 0xfU; 530} 531static inline u32 fb_fbpa_fbio_cmd_delay_cmd_src_max_v(void) 532{ 533 return 0x00000001U; 534} 535static inline u32 fb_fbpa_fbio_cmd_delay_cmd_priv_f(u32 v) 536{ 537 return (v & 0xfU) << 4U; 538} 539static inline u32 fb_fbpa_fbio_cmd_delay_cmd_priv_m(void) 540{ 541 return 0xfU << 4U; 542} 543static inline u32 fb_fbpa_fbio_cmd_delay_cmd_priv_v(u32 r) 544{ 545 return (r >> 4U) & 0xfU; 546} 547static inline u32 fb_fbpa_fbio_cmd_delay_cmd_priv_max_v(void) 548{ 549 return 0x00000001U; 550} 551static inline u32 fb_niso_scrub_status_r(void) 552{ 553 return 0x00100b20U; 554} 555static inline u32 fb_niso_scrub_status_flag_v(u32 r) 556{ 557 return (r >> 0U) & 0x1U; 558} 559static inline u32 fb_fbpa_fbio_iref_byte_rx_ctrl_r(void) 560{ 561 return 0x009a0eb0U; 562} 563#endif
diff --git a/include/nvgpu/hw/gp106/hw_fbpa_gp106.h b/include/nvgpu/hw/gp106/hw_fbpa_gp106.h
deleted file mode 100644
index 797a40c..0000000
--- a/include/nvgpu/hw/gp106/hw_fbpa_gp106.h
+++ /dev/null
@@ -1,67 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fbpa_gp106_h_ 57#define _hw_fbpa_gp106_h_ 58 59static inline u32 fbpa_cstatus_r(void) 60{ 61 return 0x009a020cU; 62} 63static inline u32 fbpa_cstatus_ramamount_v(u32 r) 64{ 65 return (r >> 0U) & 0x1ffffU; 66} 67#endif
diff --git a/include/nvgpu/hw/gp106/hw_fifo_gp106.h b/include/nvgpu/hw/gp106/hw_fifo_gp106.h
deleted file mode 100644
index 804e9e4..0000000
--- a/include/nvgpu/hw/gp106/hw_fifo_gp106.h
+++ /dev/null
@@ -1,695 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fifo_gp106_h_ 57#define _hw_fifo_gp106_h_ 58 59static inline u32 fifo_bar1_base_r(void) 60{ 61 return 0x00002254U; 62} 63static inline u32 fifo_bar1_base_ptr_f(u32 v) 64{ 65 return (v & 0xfffffffU) << 0U; 66} 67static inline u32 fifo_bar1_base_ptr_align_shift_v(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 fifo_bar1_base_valid_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fifo_bar1_base_valid_true_f(void) 76{ 77 return 0x10000000U; 78} 79static inline u32 fifo_runlist_base_r(void) 80{ 81 return 0x00002270U; 82} 83static inline u32 fifo_runlist_base_ptr_f(u32 v) 84{ 85 return (v & 0xfffffffU) << 0U; 86} 87static inline u32 fifo_runlist_base_target_vid_mem_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) 92{ 93 return 0x20000000U; 94} 95static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) 96{ 97 return 0x30000000U; 98} 99static inline u32 fifo_runlist_r(void) 100{ 101 return 0x00002274U; 102} 103static inline u32 fifo_runlist_engine_f(u32 v) 104{ 105 return (v & 0xfU) << 20U; 106} 107static inline u32 fifo_eng_runlist_base_r(u32 i) 108{ 109 return 0x00002280U + i*8U; 110} 111static inline u32 fifo_eng_runlist_base__size_1_v(void) 112{ 113 return 0x00000007U; 114} 115static inline u32 fifo_eng_runlist_r(u32 i) 116{ 117 return 0x00002284U + i*8U; 118} 119static inline u32 fifo_eng_runlist__size_1_v(void) 120{ 121 return 0x00000007U; 122} 123static inline u32 fifo_eng_runlist_length_f(u32 v) 124{ 125 return (v & 0xffffU) << 0U; 126} 127static inline u32 fifo_eng_runlist_length_max_v(void) 128{ 129 return 0x0000ffffU; 130} 131static inline u32 fifo_eng_runlist_pending_true_f(void) 132{ 133 return 0x100000U; 134} 135static inline u32 fifo_pb_timeslice_r(u32 i) 136{ 137 return 0x00002350U + i*4U; 138} 139static inline u32 fifo_pb_timeslice_timeout_16_f(void) 140{ 141 return 0x10U; 142} 143static inline u32 fifo_pb_timeslice_timescale_0_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 fifo_pb_timeslice_enable_true_f(void) 148{ 149 return 0x10000000U; 150} 151static inline u32 fifo_pbdma_map_r(u32 i) 152{ 153 return 0x00002390U + i*4U; 154} 155static inline u32 fifo_intr_0_r(void) 156{ 157 return 0x00002100U; 158} 159static inline u32 fifo_intr_0_bind_error_pending_f(void) 160{ 161 return 0x1U; 162} 163static inline u32 fifo_intr_0_bind_error_reset_f(void) 164{ 165 return 0x1U; 166} 167static inline u32 fifo_intr_0_sched_error_pending_f(void) 168{ 169 return 0x100U; 170} 171static inline u32 fifo_intr_0_sched_error_reset_f(void) 172{ 173 return 0x100U; 174} 175static inline u32 fifo_intr_0_chsw_error_pending_f(void) 176{ 177 return 0x10000U; 178} 179static inline u32 fifo_intr_0_chsw_error_reset_f(void) 180{ 181 return 0x10000U; 182} 183static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void) 184{ 185 return 0x800000U; 186} 187static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void) 188{ 189 return 0x800000U; 190} 191static inline u32 fifo_intr_0_lb_error_pending_f(void) 192{ 193 return 0x1000000U; 194} 195static inline u32 fifo_intr_0_lb_error_reset_f(void) 196{ 197 return 0x1000000U; 198} 199static inline u32 fifo_intr_0_replayable_fault_error_pending_f(void) 200{ 201 return 0x2000000U; 202} 203static inline u32 fifo_intr_0_dropped_mmu_fault_pending_f(void) 204{ 205 return 0x8000000U; 206} 207static inline u32 fifo_intr_0_dropped_mmu_fault_reset_f(void) 208{ 209 return 0x8000000U; 210} 211static inline u32 fifo_intr_0_mmu_fault_pending_f(void) 212{ 213 return 0x10000000U; 214} 215static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) 216{ 217 return 0x20000000U; 218} 219static inline u32 fifo_intr_0_runlist_event_pending_f(void) 220{ 221 return 0x40000000U; 222} 223static inline u32 fifo_intr_0_channel_intr_pending_f(void) 224{ 225 return 0x80000000U; 226} 227static inline u32 fifo_intr_en_0_r(void) 228{ 229 return 0x00002140U; 230} 231static inline u32 fifo_intr_en_0_sched_error_f(u32 v) 232{ 233 return (v & 0x1U) << 8U; 234} 235static inline u32 fifo_intr_en_0_sched_error_m(void) 236{ 237 return 0x1U << 8U; 238} 239static inline u32 fifo_intr_en_0_mmu_fault_f(u32 v) 240{ 241 return (v & 0x1U) << 28U; 242} 243static inline u32 fifo_intr_en_0_mmu_fault_m(void) 244{ 245 return 0x1U << 28U; 246} 247static inline u32 fifo_intr_en_1_r(void) 248{ 249 return 0x00002528U; 250} 251static inline u32 fifo_intr_bind_error_r(void) 252{ 253 return 0x0000252cU; 254} 255static inline u32 fifo_intr_sched_error_r(void) 256{ 257 return 0x0000254cU; 258} 259static inline u32 fifo_intr_sched_error_code_f(u32 v) 260{ 261 return (v & 0xffU) << 0U; 262} 263static inline u32 fifo_intr_sched_error_code_ctxsw_timeout_v(void) 264{ 265 return 0x0000000aU; 266} 267static inline u32 fifo_intr_chsw_error_r(void) 268{ 269 return 0x0000256cU; 270} 271static inline u32 fifo_intr_mmu_fault_id_r(void) 272{ 273 return 0x0000259cU; 274} 275static inline u32 fifo_intr_mmu_fault_eng_id_graphics_v(void) 276{ 277 return 0x00000000U; 278} 279static inline u32 fifo_intr_mmu_fault_eng_id_graphics_f(void) 280{ 281 return 0x0U; 282} 283static inline u32 fifo_intr_mmu_fault_inst_r(u32 i) 284{ 285 return 0x00002800U + i*16U; 286} 287static inline u32 fifo_intr_mmu_fault_inst_ptr_v(u32 r) 288{ 289 return (r >> 0U) & 0xfffffffU; 290} 291static inline u32 fifo_intr_mmu_fault_inst_ptr_align_shift_v(void) 292{ 293 return 0x0000000cU; 294} 295static inline u32 fifo_intr_mmu_fault_lo_r(u32 i) 296{ 297 return 0x00002804U + i*16U; 298} 299static inline u32 fifo_intr_mmu_fault_hi_r(u32 i) 300{ 301 return 0x00002808U + i*16U; 302} 303static inline u32 fifo_intr_mmu_fault_info_r(u32 i) 304{ 305 return 0x0000280cU + i*16U; 306} 307static inline u32 fifo_intr_mmu_fault_info_type_v(u32 r) 308{ 309 return (r >> 0U) & 0x1fU; 310} 311static inline u32 fifo_intr_mmu_fault_info_client_type_v(u32 r) 312{ 313 return (r >> 20U) & 0x1U; 314} 315static inline u32 fifo_intr_mmu_fault_info_client_type_gpc_v(void) 316{ 317 return 0x00000000U; 318} 319static inline u32 fifo_intr_mmu_fault_info_client_type_hub_v(void) 320{ 321 return 0x00000001U; 322} 323static inline u32 fifo_intr_mmu_fault_info_client_v(u32 r) 324{ 325 return (r >> 8U) & 0x7fU; 326} 327static inline u32 fifo_intr_pbdma_id_r(void) 328{ 329 return 0x000025a0U; 330} 331static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) 332{ 333 return (v & 0x1U) << (0U + i*1U); 334} 335static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) 336{ 337 return (r >> (0U + i*1U)) & 0x1U; 338} 339static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) 340{ 341 return 0x00000004U; 342} 343static inline u32 fifo_intr_runlist_r(void) 344{ 345 return 0x00002a00U; 346} 347static inline u32 fifo_fb_timeout_r(void) 348{ 349 return 0x00002a04U; 350} 351static inline u32 fifo_fb_timeout_period_m(void) 352{ 353 return 0x3fffffffU << 0U; 354} 355static inline u32 fifo_fb_timeout_period_max_f(void) 356{ 357 return 0x3fffffffU; 358} 359static inline u32 fifo_error_sched_disable_r(void) 360{ 361 return 0x0000262cU; 362} 363static inline u32 fifo_sched_disable_r(void) 364{ 365 return 0x00002630U; 366} 367static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) 368{ 369 return (v & 0x1U) << (0U + i*1U); 370} 371static inline u32 fifo_sched_disable_runlist_m(u32 i) 372{ 373 return 0x1U << (0U + i*1U); 374} 375static inline u32 fifo_sched_disable_true_v(void) 376{ 377 return 0x00000001U; 378} 379static inline u32 fifo_preempt_r(void) 380{ 381 return 0x00002634U; 382} 383static inline u32 fifo_preempt_pending_true_f(void) 384{ 385 return 0x100000U; 386} 387static inline u32 fifo_preempt_type_channel_f(void) 388{ 389 return 0x0U; 390} 391static inline u32 fifo_preempt_type_tsg_f(void) 392{ 393 return 0x1000000U; 394} 395static inline u32 fifo_preempt_chid_f(u32 v) 396{ 397 return (v & 0xfffU) << 0U; 398} 399static inline u32 fifo_preempt_id_f(u32 v) 400{ 401 return (v & 0xfffU) << 0U; 402} 403static inline u32 fifo_trigger_mmu_fault_r(u32 i) 404{ 405 return 0x00002a30U + i*4U; 406} 407static inline u32 fifo_trigger_mmu_fault_id_f(u32 v) 408{ 409 return (v & 0x1fU) << 0U; 410} 411static inline u32 fifo_trigger_mmu_fault_enable_f(u32 v) 412{ 413 return (v & 0x1U) << 8U; 414} 415static inline u32 fifo_engine_status_r(u32 i) 416{ 417 return 0x00002640U + i*8U; 418} 419static inline u32 fifo_engine_status__size_1_v(void) 420{ 421 return 0x00000009U; 422} 423static inline u32 fifo_engine_status_id_v(u32 r) 424{ 425 return (r >> 0U) & 0xfffU; 426} 427static inline u32 fifo_engine_status_id_type_v(u32 r) 428{ 429 return (r >> 12U) & 0x1U; 430} 431static inline u32 fifo_engine_status_id_type_chid_v(void) 432{ 433 return 0x00000000U; 434} 435static inline u32 fifo_engine_status_id_type_tsgid_v(void) 436{ 437 return 0x00000001U; 438} 439static inline u32 fifo_engine_status_ctx_status_v(u32 r) 440{ 441 return (r >> 13U) & 0x7U; 442} 443static inline u32 fifo_engine_status_ctx_status_invalid_v(void) 444{ 445 return 0x00000000U; 446} 447static inline u32 fifo_engine_status_ctx_status_valid_v(void) 448{ 449 return 0x00000001U; 450} 451static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) 452{ 453 return 0x00000005U; 454} 455static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) 456{ 457 return 0x00000006U; 458} 459static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) 460{ 461 return 0x00000007U; 462} 463static inline u32 fifo_engine_status_next_id_v(u32 r) 464{ 465 return (r >> 16U) & 0xfffU; 466} 467static inline u32 fifo_engine_status_next_id_type_v(u32 r) 468{ 469 return (r >> 28U) & 0x1U; 470} 471static inline u32 fifo_engine_status_next_id_type_chid_v(void) 472{ 473 return 0x00000000U; 474} 475static inline u32 fifo_engine_status_faulted_v(u32 r) 476{ 477 return (r >> 30U) & 0x1U; 478} 479static inline u32 fifo_engine_status_faulted_true_v(void) 480{ 481 return 0x00000001U; 482} 483static inline u32 fifo_engine_status_engine_v(u32 r) 484{ 485 return (r >> 31U) & 0x1U; 486} 487static inline u32 fifo_engine_status_engine_idle_v(void) 488{ 489 return 0x00000000U; 490} 491static inline u32 fifo_engine_status_engine_busy_v(void) 492{ 493 return 0x00000001U; 494} 495static inline u32 fifo_engine_status_ctxsw_v(u32 r) 496{ 497 return (r >> 15U) & 0x1U; 498} 499static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) 500{ 501 return 0x00000001U; 502} 503static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) 504{ 505 return 0x8000U; 506} 507static inline u32 fifo_pbdma_status_r(u32 i) 508{ 509 return 0x00003080U + i*4U; 510} 511static inline u32 fifo_pbdma_status__size_1_v(void) 512{ 513 return 0x00000004U; 514} 515static inline u32 fifo_pbdma_status_id_v(u32 r) 516{ 517 return (r >> 0U) & 0xfffU; 518} 519static inline u32 fifo_pbdma_status_id_type_v(u32 r) 520{ 521 return (r >> 12U) & 0x1U; 522} 523static inline u32 fifo_pbdma_status_id_type_chid_v(void) 524{ 525 return 0x00000000U; 526} 527static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) 528{ 529 return 0x00000001U; 530} 531static inline u32 fifo_pbdma_status_chan_status_v(u32 r) 532{ 533 return (r >> 13U) & 0x7U; 534} 535static inline u32 fifo_pbdma_status_chan_status_valid_v(void) 536{ 537 return 0x00000001U; 538} 539static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) 540{ 541 return 0x00000005U; 542} 543static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) 544{ 545 return 0x00000006U; 546} 547static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) 548{ 549 return 0x00000007U; 550} 551static inline u32 fifo_pbdma_status_next_id_v(u32 r) 552{ 553 return (r >> 16U) & 0xfffU; 554} 555static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) 556{ 557 return (r >> 28U) & 0x1U; 558} 559static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) 560{ 561 return 0x00000000U; 562} 563static inline u32 fifo_pbdma_status_chsw_v(u32 r) 564{ 565 return (r >> 15U) & 0x1U; 566} 567static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) 568{ 569 return 0x00000001U; 570} 571static inline u32 fifo_replay_fault_buffer_lo_r(void) 572{ 573 return 0x00002a70U; 574} 575static inline u32 fifo_replay_fault_buffer_lo_enable_v(u32 r) 576{ 577 return (r >> 0U) & 0x1U; 578} 579static inline u32 fifo_replay_fault_buffer_lo_enable_true_v(void) 580{ 581 return 0x00000001U; 582} 583static inline u32 fifo_replay_fault_buffer_lo_enable_false_v(void) 584{ 585 return 0x00000000U; 586} 587static inline u32 fifo_replay_fault_buffer_lo_base_f(u32 v) 588{ 589 return (v & 0xfffffU) << 12U; 590} 591static inline u32 fifo_replay_fault_buffer_lo_base_reset_v(void) 592{ 593 return 0x00000000U; 594} 595static inline u32 fifo_replay_fault_buffer_hi_r(void) 596{ 597 return 0x00002a74U; 598} 599static inline u32 fifo_replay_fault_buffer_hi_base_f(u32 v) 600{ 601 return (v & 0xffU) << 0U; 602} 603static inline u32 fifo_replay_fault_buffer_hi_base_reset_v(void) 604{ 605 return 0x00000000U; 606} 607static inline u32 fifo_replay_fault_buffer_size_r(void) 608{ 609 return 0x00002a78U; 610} 611static inline u32 fifo_replay_fault_buffer_size_hw_f(u32 v) 612{ 613 return (v & 0x3fffU) << 0U; 614} 615static inline u32 fifo_replay_fault_buffer_size_hw_entries_v(void) 616{ 617 return 0x00001200U; 618} 619static inline u32 fifo_replay_fault_buffer_get_r(void) 620{ 621 return 0x00002a7cU; 622} 623static inline u32 fifo_replay_fault_buffer_get_offset_hw_f(u32 v) 624{ 625 return (v & 0x3fffU) << 0U; 626} 627static inline u32 fifo_replay_fault_buffer_get_offset_hw_init_v(void) 628{ 629 return 0x00000000U; 630} 631static inline u32 fifo_replay_fault_buffer_put_r(void) 632{ 633 return 0x00002a80U; 634} 635static inline u32 fifo_replay_fault_buffer_put_offset_hw_f(u32 v) 636{ 637 return (v & 0x3fffU) << 0U; 638} 639static inline u32 fifo_replay_fault_buffer_put_offset_hw_init_v(void) 640{ 641 return 0x00000000U; 642} 643static inline u32 fifo_replay_fault_buffer_info_r(void) 644{ 645 return 0x00002a84U; 646} 647static inline u32 fifo_replay_fault_buffer_info_overflow_f(u32 v) 648{ 649 return (v & 0x1U) << 0U; 650} 651static inline u32 fifo_replay_fault_buffer_info_overflow_false_v(void) 652{ 653 return 0x00000000U; 654} 655static inline u32 fifo_replay_fault_buffer_info_overflow_true_v(void) 656{ 657 return 0x00000001U; 658} 659static inline u32 fifo_replay_fault_buffer_info_overflow_clear_v(void) 660{ 661 return 0x00000001U; 662} 663static inline u32 fifo_replay_fault_buffer_info_write_nack_f(u32 v) 664{ 665 return (v & 0x1U) << 24U; 666} 667static inline u32 fifo_replay_fault_buffer_info_write_nack_false_v(void) 668{ 669 return 0x00000000U; 670} 671static inline u32 fifo_replay_fault_buffer_info_write_nack_true_v(void) 672{ 673 return 0x00000001U; 674} 675static inline u32 fifo_replay_fault_buffer_info_write_nack_clear_v(void) 676{ 677 return 0x00000001U; 678} 679static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_f(u32 v) 680{ 681 return (v & 0x1U) << 28U; 682} 683static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_false_v(void) 684{ 685 return 0x00000000U; 686} 687static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_true_v(void) 688{ 689 return 0x00000001U; 690} 691static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_clear_v(void) 692{ 693 return 0x00000001U; 694} 695#endif
diff --git a/include/nvgpu/hw/gp106/hw_flush_gp106.h b/include/nvgpu/hw/gp106/hw_flush_gp106.h
deleted file mode 100644
index c4e1c32..0000000
--- a/include/nvgpu/hw/gp106/hw_flush_gp106.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_flush_gp106_h_ 57#define _hw_flush_gp106_h_ 58 59static inline u32 flush_l2_system_invalidate_r(void) 60{ 61 return 0x00070004U; 62} 63static inline u32 flush_l2_system_invalidate_pending_v(u32 r) 64{ 65 return (r >> 0U) & 0x1U; 66} 67static inline u32 flush_l2_system_invalidate_pending_busy_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 flush_l2_system_invalidate_pending_busy_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) 76{ 77 return (r >> 1U) & 0x1U; 78} 79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 flush_l2_flush_dirty_r(void) 84{ 85 return 0x00070010U; 86} 87static inline u32 flush_l2_flush_dirty_pending_v(u32 r) 88{ 89 return (r >> 0U) & 0x1U; 90} 91static inline u32 flush_l2_flush_dirty_pending_empty_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 flush_l2_flush_dirty_pending_empty_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 flush_l2_flush_dirty_pending_busy_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 flush_l2_flush_dirty_pending_busy_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 flush_l2_clean_comptags_r(void) 124{ 125 return 0x0007000cU; 126} 127static inline u32 flush_l2_clean_comptags_pending_v(u32 r) 128{ 129 return (r >> 0U) & 0x1U; 130} 131static inline u32 flush_l2_clean_comptags_pending_empty_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 flush_l2_clean_comptags_pending_empty_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 flush_l2_clean_comptags_pending_busy_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 flush_l2_clean_comptags_pending_busy_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 flush_fb_flush_r(void) 164{ 165 return 0x00070000U; 166} 167static inline u32 flush_fb_flush_pending_v(u32 r) 168{ 169 return (r >> 0U) & 0x1U; 170} 171static inline u32 flush_fb_flush_pending_busy_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 flush_fb_flush_pending_busy_f(void) 176{ 177 return 0x1U; 178} 179static inline u32 flush_fb_flush_outstanding_v(u32 r) 180{ 181 return (r >> 1U) & 0x1U; 182} 183static inline u32 flush_fb_flush_outstanding_true_v(void) 184{ 185 return 0x00000001U; 186} 187#endif
diff --git a/include/nvgpu/hw/gp106/hw_fuse_gp106.h b/include/nvgpu/hw/gp106/hw_fuse_gp106.h
deleted file mode 100644
index bfb19b9..0000000
--- a/include/nvgpu/hw/gp106/hw_fuse_gp106.h
+++ /dev/null
@@ -1,275 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fuse_gp106_h_ 57#define _hw_fuse_gp106_h_ 58 59static inline u32 fuse_status_opt_gpc_r(void) 60{ 61 return 0x00021c1cU; 62} 63static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) 64{ 65 return 0x00021c38U + i*4U; 66} 67static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) 68{ 69 return 0x00021838U + i*4U; 70} 71static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) 72{ 73 return 0x00021944U; 74} 75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) 76{ 77 return (v & 0x3U) << 0U; 78} 79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) 80{ 81 return 0x3U << 0U; 82} 83static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) 84{ 85 return (r >> 0U) & 0x3U; 86} 87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) 88{ 89 return 0x00021948U; 90} 91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) 92{ 93 return (v & 0x1U) << 0U; 94} 95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) 96{ 97 return 0x1U << 0U; 98} 99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 fuse_status_opt_fbio_r(void) 112{ 113 return 0x00021c14U; 114} 115static inline u32 fuse_status_opt_fbio_data_f(u32 v) 116{ 117 return (v & 0xffffU) << 0U; 118} 119static inline u32 fuse_status_opt_fbio_data_m(void) 120{ 121 return 0xffffU << 0U; 122} 123static inline u32 fuse_status_opt_fbio_data_v(u32 r) 124{ 125 return (r >> 0U) & 0xffffU; 126} 127static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) 128{ 129 return 0x00021d70U + i*4U; 130} 131static inline u32 fuse_status_opt_fbp_r(void) 132{ 133 return 0x00021d38U; 134} 135static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) 136{ 137 return (r >> (0U + i*1U)) & 0x1U; 138} 139static inline u32 fuse_vin_cal_fuse_rev_r(void) 140{ 141 return 0x0002164cU; 142} 143static inline u32 fuse_vin_cal_fuse_rev_data_v(u32 r) 144{ 145 return (r >> 0U) & 0x3U; 146} 147static inline u32 fuse_vin_cal_gpc0_r(void) 148{ 149 return 0x00021650U; 150} 151static inline u32 fuse_vin_cal_gpc0_icpt_int_data_s(void) 152{ 153 return 12U; 154} 155static inline u32 fuse_vin_cal_gpc0_icpt_int_data_v(u32 r) 156{ 157 return (r >> 16U) & 0xfffU; 158} 159static inline u32 fuse_vin_cal_gpc0_icpt_frac_data_s(void) 160{ 161 return 2U; 162} 163static inline u32 fuse_vin_cal_gpc0_icpt_frac_data_v(u32 r) 164{ 165 return (r >> 14U) & 0x3U; 166} 167static inline u32 fuse_vin_cal_gpc0_slope_int_data_s(void) 168{ 169 return 4U; 170} 171static inline u32 fuse_vin_cal_gpc0_slope_int_data_v(u32 r) 172{ 173 return (r >> 10U) & 0xfU; 174} 175static inline u32 fuse_vin_cal_gpc0_slope_frac_data_s(void) 176{ 177 return 10U; 178} 179static inline u32 fuse_vin_cal_gpc0_slope_frac_data_v(u32 r) 180{ 181 return (r >> 0U) & 0x3ffU; 182} 183static inline u32 fuse_vin_cal_gpc1_delta_r(void) 184{ 185 return 0x00021654U; 186} 187static inline u32 fuse_vin_cal_gpc1_delta_icpt_int_data_s(void) 188{ 189 return 8U; 190} 191static inline u32 fuse_vin_cal_gpc1_delta_icpt_int_data_v(u32 r) 192{ 193 return (r >> 14U) & 0xffU; 194} 195static inline u32 fuse_vin_cal_gpc1_delta_icpt_frac_data_s(void) 196{ 197 return 2U; 198} 199static inline u32 fuse_vin_cal_gpc1_delta_icpt_frac_data_v(u32 r) 200{ 201 return (r >> 12U) & 0x3U; 202} 203static inline u32 fuse_vin_cal_gpc1_delta_icpt_sign_data_s(void) 204{ 205 return 1U; 206} 207static inline u32 fuse_vin_cal_gpc1_delta_icpt_sign_data_v(u32 r) 208{ 209 return (r >> 22U) & 0x1U; 210} 211static inline u32 fuse_vin_cal_gpc1_delta_slope_int_data_s(void) 212{ 213 return 1U; 214} 215static inline u32 fuse_vin_cal_gpc1_delta_slope_int_data_v(u32 r) 216{ 217 return (r >> 10U) & 0x1U; 218} 219static inline u32 fuse_vin_cal_gpc1_delta_slope_frac_data_s(void) 220{ 221 return 10U; 222} 223static inline u32 fuse_vin_cal_gpc1_delta_slope_frac_data_v(u32 r) 224{ 225 return (r >> 0U) & 0x3ffU; 226} 227static inline u32 fuse_vin_cal_gpc1_delta_slope_sign_data_s(void) 228{ 229 return 1U; 230} 231static inline u32 fuse_vin_cal_gpc1_delta_slope_sign_data_v(u32 r) 232{ 233 return (r >> 11U) & 0x1U; 234} 235static inline u32 fuse_vin_cal_gpc2_delta_r(void) 236{ 237 return 0x00021658U; 238} 239static inline u32 fuse_vin_cal_gpc3_delta_r(void) 240{ 241 return 0x0002165cU; 242} 243static inline u32 fuse_vin_cal_gpc4_delta_r(void) 244{ 245 return 0x00021660U; 246} 247static inline u32 fuse_vin_cal_gpc5_delta_r(void) 248{ 249 return 0x00021664U; 250} 251static inline u32 fuse_vin_cal_shared_delta_r(void) 252{ 253 return 0x00021668U; 254} 255static inline u32 fuse_vin_cal_sram_delta_r(void) 256{ 257 return 0x0002166cU; 258} 259static inline u32 fuse_vin_cal_sram_delta_icpt_int_data_s(void) 260{ 261 return 9U; 262} 263static inline u32 fuse_vin_cal_sram_delta_icpt_int_data_v(u32 r) 264{ 265 return (r >> 13U) & 0x1ffU; 266} 267static inline u32 fuse_vin_cal_sram_delta_icpt_frac_data_s(void) 268{ 269 return 1U; 270} 271static inline u32 fuse_vin_cal_sram_delta_icpt_frac_data_v(u32 r) 272{ 273 return (r >> 12U) & 0x1U; 274} 275#endif
diff --git a/include/nvgpu/hw/gp106/hw_gc6_gp106.h b/include/nvgpu/hw/gp106/hw_gc6_gp106.h
deleted file mode 100644
index 91e9d7b..0000000
--- a/include/nvgpu/hw/gp106/hw_gc6_gp106.h
+++ /dev/null
@@ -1,62 +0,0 @@ 1/* 2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gc6_gp106_h_ 57#define _hw_gc6_gp106_h_ 58static inline u32 gc6_sci_strap_r(void) 59{ 60 return 0x00010ebb0; 61} 62#endif
diff --git a/include/nvgpu/hw/gp106/hw_gmmu_gp106.h b/include/nvgpu/hw/gp106/hw_gmmu_gp106.h
deleted file mode 100644
index 8369001..0000000
--- a/include/nvgpu/hw/gp106/hw_gmmu_gp106.h
+++ /dev/null
@@ -1,331 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gmmu_gp106_h_ 57#define _hw_gmmu_gp106_h_ 58 59static inline u32 gmmu_new_pde_is_pte_w(void) 60{ 61 return 0U; 62} 63static inline u32 gmmu_new_pde_is_pte_false_f(void) 64{ 65 return 0x0U; 66} 67static inline u32 gmmu_new_pde_aperture_w(void) 68{ 69 return 0U; 70} 71static inline u32 gmmu_new_pde_aperture_invalid_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 gmmu_new_pde_aperture_video_memory_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void) 84{ 85 return 0x6U; 86} 87static inline u32 gmmu_new_pde_address_sys_f(u32 v) 88{ 89 return (v & 0xffffffU) << 8U; 90} 91static inline u32 gmmu_new_pde_address_sys_w(void) 92{ 93 return 0U; 94} 95static inline u32 gmmu_new_pde_vol_w(void) 96{ 97 return 0U; 98} 99static inline u32 gmmu_new_pde_vol_true_f(void) 100{ 101 return 0x8U; 102} 103static inline u32 gmmu_new_pde_vol_false_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 gmmu_new_pde_address_shift_v(void) 108{ 109 return 0x0000000cU; 110} 111static inline u32 gmmu_new_pde__size_v(void) 112{ 113 return 0x00000008U; 114} 115static inline u32 gmmu_new_dual_pde_is_pte_w(void) 116{ 117 return 0U; 118} 119static inline u32 gmmu_new_dual_pde_is_pte_false_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 gmmu_new_dual_pde_aperture_big_w(void) 124{ 125 return 0U; 126} 127static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void) 128{ 129 return 0x0U; 130} 131static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void) 132{ 133 return 0x2U; 134} 135static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void) 136{ 137 return 0x4U; 138} 139static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void) 140{ 141 return 0x6U; 142} 143static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v) 144{ 145 return (v & 0xfffffffU) << 4U; 146} 147static inline u32 gmmu_new_dual_pde_address_big_sys_w(void) 148{ 149 return 0U; 150} 151static inline u32 gmmu_new_dual_pde_aperture_small_w(void) 152{ 153 return 2U; 154} 155static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void) 164{ 165 return 0x4U; 166} 167static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void) 168{ 169 return 0x6U; 170} 171static inline u32 gmmu_new_dual_pde_vol_small_w(void) 172{ 173 return 2U; 174} 175static inline u32 gmmu_new_dual_pde_vol_small_true_f(void) 176{ 177 return 0x8U; 178} 179static inline u32 gmmu_new_dual_pde_vol_small_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 gmmu_new_dual_pde_vol_big_w(void) 184{ 185 return 0U; 186} 187static inline u32 gmmu_new_dual_pde_vol_big_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 gmmu_new_dual_pde_vol_big_false_f(void) 192{ 193 return 0x0U; 194} 195static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v) 196{ 197 return (v & 0xffffffU) << 8U; 198} 199static inline u32 gmmu_new_dual_pde_address_small_sys_w(void) 200{ 201 return 2U; 202} 203static inline u32 gmmu_new_dual_pde_address_shift_v(void) 204{ 205 return 0x0000000cU; 206} 207static inline u32 gmmu_new_dual_pde_address_big_shift_v(void) 208{ 209 return 0x00000008U; 210} 211static inline u32 gmmu_new_dual_pde__size_v(void) 212{ 213 return 0x00000010U; 214} 215static inline u32 gmmu_new_pte__size_v(void) 216{ 217 return 0x00000008U; 218} 219static inline u32 gmmu_new_pte_valid_w(void) 220{ 221 return 0U; 222} 223static inline u32 gmmu_new_pte_valid_true_f(void) 224{ 225 return 0x1U; 226} 227static inline u32 gmmu_new_pte_valid_false_f(void) 228{ 229 return 0x0U; 230} 231static inline u32 gmmu_new_pte_privilege_w(void) 232{ 233 return 0U; 234} 235static inline u32 gmmu_new_pte_privilege_true_f(void) 236{ 237 return 0x20U; 238} 239static inline u32 gmmu_new_pte_privilege_false_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gmmu_new_pte_address_sys_f(u32 v) 244{ 245 return (v & 0xffffffU) << 8U; 246} 247static inline u32 gmmu_new_pte_address_sys_w(void) 248{ 249 return 0U; 250} 251static inline u32 gmmu_new_pte_address_vid_f(u32 v) 252{ 253 return (v & 0xffffffU) << 8U; 254} 255static inline u32 gmmu_new_pte_address_vid_w(void) 256{ 257 return 0U; 258} 259static inline u32 gmmu_new_pte_vol_w(void) 260{ 261 return 0U; 262} 263static inline u32 gmmu_new_pte_vol_true_f(void) 264{ 265 return 0x8U; 266} 267static inline u32 gmmu_new_pte_vol_false_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 gmmu_new_pte_aperture_w(void) 272{ 273 return 0U; 274} 275static inline u32 gmmu_new_pte_aperture_video_memory_f(void) 276{ 277 return 0x0U; 278} 279static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void) 280{ 281 return 0x4U; 282} 283static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void) 284{ 285 return 0x6U; 286} 287static inline u32 gmmu_new_pte_read_only_w(void) 288{ 289 return 0U; 290} 291static inline u32 gmmu_new_pte_read_only_true_f(void) 292{ 293 return 0x40U; 294} 295static inline u32 gmmu_new_pte_comptagline_f(u32 v) 296{ 297 return (v & 0x3ffffU) << 4U; 298} 299static inline u32 gmmu_new_pte_comptagline_w(void) 300{ 301 return 1U; 302} 303static inline u32 gmmu_new_pte_kind_f(u32 v) 304{ 305 return (v & 0xffU) << 24U; 306} 307static inline u32 gmmu_new_pte_kind_w(void) 308{ 309 return 1U; 310} 311static inline u32 gmmu_new_pte_address_shift_v(void) 312{ 313 return 0x0000000cU; 314} 315static inline u32 gmmu_pte_kind_f(u32 v) 316{ 317 return (v & 0xffU) << 4U; 318} 319static inline u32 gmmu_pte_kind_w(void) 320{ 321 return 1U; 322} 323static inline u32 gmmu_pte_kind_invalid_v(void) 324{ 325 return 0x000000ffU; 326} 327static inline u32 gmmu_pte_kind_pitch_v(void) 328{ 329 return 0x00000000U; 330} 331#endif
diff --git a/include/nvgpu/hw/gp106/hw_gr_gp106.h b/include/nvgpu/hw/gp106/hw_gr_gp106.h
deleted file mode 100644
index ac82901..0000000
--- a/include/nvgpu/hw/gp106/hw_gr_gp106.h
+++ /dev/null
@@ -1,4167 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gr_gp106_h_ 57#define _hw_gr_gp106_h_ 58 59static inline u32 gr_intr_r(void) 60{ 61 return 0x00400100U; 62} 63static inline u32 gr_intr_notify_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 gr_intr_notify_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gr_intr_semaphore_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gr_intr_semaphore_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gr_intr_illegal_method_pending_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 gr_intr_illegal_method_reset_f(void) 84{ 85 return 0x10U; 86} 87static inline u32 gr_intr_illegal_notify_pending_f(void) 88{ 89 return 0x40U; 90} 91static inline u32 gr_intr_illegal_notify_reset_f(void) 92{ 93 return 0x40U; 94} 95static inline u32 gr_intr_firmware_method_f(u32 v) 96{ 97 return (v & 0x1U) << 8U; 98} 99static inline u32 gr_intr_firmware_method_pending_f(void) 100{ 101 return 0x100U; 102} 103static inline u32 gr_intr_firmware_method_reset_f(void) 104{ 105 return 0x100U; 106} 107static inline u32 gr_intr_illegal_class_pending_f(void) 108{ 109 return 0x20U; 110} 111static inline u32 gr_intr_illegal_class_reset_f(void) 112{ 113 return 0x20U; 114} 115static inline u32 gr_intr_fecs_error_pending_f(void) 116{ 117 return 0x80000U; 118} 119static inline u32 gr_intr_fecs_error_reset_f(void) 120{ 121 return 0x80000U; 122} 123static inline u32 gr_intr_class_error_pending_f(void) 124{ 125 return 0x100000U; 126} 127static inline u32 gr_intr_class_error_reset_f(void) 128{ 129 return 0x100000U; 130} 131static inline u32 gr_intr_exception_pending_f(void) 132{ 133 return 0x200000U; 134} 135static inline u32 gr_intr_exception_reset_f(void) 136{ 137 return 0x200000U; 138} 139static inline u32 gr_fecs_intr_r(void) 140{ 141 return 0x00400144U; 142} 143static inline u32 gr_class_error_r(void) 144{ 145 return 0x00400110U; 146} 147static inline u32 gr_class_error_code_v(u32 r) 148{ 149 return (r >> 0U) & 0xffffU; 150} 151static inline u32 gr_intr_nonstall_r(void) 152{ 153 return 0x00400120U; 154} 155static inline u32 gr_intr_nonstall_trap_pending_f(void) 156{ 157 return 0x2U; 158} 159static inline u32 gr_intr_en_r(void) 160{ 161 return 0x0040013cU; 162} 163static inline u32 gr_exception_r(void) 164{ 165 return 0x00400108U; 166} 167static inline u32 gr_exception_fe_m(void) 168{ 169 return 0x1U << 0U; 170} 171static inline u32 gr_exception_gpc_m(void) 172{ 173 return 0x1U << 24U; 174} 175static inline u32 gr_exception_memfmt_m(void) 176{ 177 return 0x1U << 1U; 178} 179static inline u32 gr_exception_ds_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 gr_exception_sked_m(void) 184{ 185 return 0x1U << 8U; 186} 187static inline u32 gr_exception_pd_m(void) 188{ 189 return 0x1U << 2U; 190} 191static inline u32 gr_exception_scc_m(void) 192{ 193 return 0x1U << 3U; 194} 195static inline u32 gr_exception_ssync_m(void) 196{ 197 return 0x1U << 5U; 198} 199static inline u32 gr_exception_mme_m(void) 200{ 201 return 0x1U << 7U; 202} 203static inline u32 gr_exception1_r(void) 204{ 205 return 0x00400118U; 206} 207static inline u32 gr_exception1_gpc_0_pending_f(void) 208{ 209 return 0x1U; 210} 211static inline u32 gr_exception2_r(void) 212{ 213 return 0x0040011cU; 214} 215static inline u32 gr_exception_en_r(void) 216{ 217 return 0x00400138U; 218} 219static inline u32 gr_exception_en_fe_m(void) 220{ 221 return 0x1U << 0U; 222} 223static inline u32 gr_exception1_en_r(void) 224{ 225 return 0x00400130U; 226} 227static inline u32 gr_exception2_en_r(void) 228{ 229 return 0x00400134U; 230} 231static inline u32 gr_gpfifo_ctl_r(void) 232{ 233 return 0x00400500U; 234} 235static inline u32 gr_gpfifo_ctl_access_f(u32 v) 236{ 237 return (v & 0x1U) << 0U; 238} 239static inline u32 gr_gpfifo_ctl_access_disabled_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gr_gpfifo_ctl_access_enabled_f(void) 244{ 245 return 0x1U; 246} 247static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) 248{ 249 return (v & 0x1U) << 16U; 250} 251static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) 252{ 253 return 0x00000001U; 254} 255static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) 256{ 257 return 0x10000U; 258} 259static inline u32 gr_gpfifo_status_r(void) 260{ 261 return 0x00400504U; 262} 263static inline u32 gr_trapped_addr_r(void) 264{ 265 return 0x00400704U; 266} 267static inline u32 gr_trapped_addr_mthd_v(u32 r) 268{ 269 return (r >> 2U) & 0xfffU; 270} 271static inline u32 gr_trapped_addr_subch_v(u32 r) 272{ 273 return (r >> 16U) & 0x7U; 274} 275static inline u32 gr_trapped_addr_mme_generated_v(u32 r) 276{ 277 return (r >> 20U) & 0x1U; 278} 279static inline u32 gr_trapped_addr_datahigh_v(u32 r) 280{ 281 return (r >> 24U) & 0x1U; 282} 283static inline u32 gr_trapped_addr_priv_v(u32 r) 284{ 285 return (r >> 28U) & 0x1U; 286} 287static inline u32 gr_trapped_addr_status_v(u32 r) 288{ 289 return (r >> 31U) & 0x1U; 290} 291static inline u32 gr_trapped_data_lo_r(void) 292{ 293 return 0x00400708U; 294} 295static inline u32 gr_trapped_data_hi_r(void) 296{ 297 return 0x0040070cU; 298} 299static inline u32 gr_trapped_data_mme_r(void) 300{ 301 return 0x00400710U; 302} 303static inline u32 gr_trapped_data_mme_pc_v(u32 r) 304{ 305 return (r >> 0U) & 0xfffU; 306} 307static inline u32 gr_status_r(void) 308{ 309 return 0x00400700U; 310} 311static inline u32 gr_status_fe_method_upper_v(u32 r) 312{ 313 return (r >> 1U) & 0x1U; 314} 315static inline u32 gr_status_fe_method_lower_v(u32 r) 316{ 317 return (r >> 2U) & 0x1U; 318} 319static inline u32 gr_status_fe_method_lower_idle_v(void) 320{ 321 return 0x00000000U; 322} 323static inline u32 gr_status_fe_gi_v(u32 r) 324{ 325 return (r >> 21U) & 0x1U; 326} 327static inline u32 gr_status_mask_r(void) 328{ 329 return 0x00400610U; 330} 331static inline u32 gr_status_1_r(void) 332{ 333 return 0x00400604U; 334} 335static inline u32 gr_status_2_r(void) 336{ 337 return 0x00400608U; 338} 339static inline u32 gr_engine_status_r(void) 340{ 341 return 0x0040060cU; 342} 343static inline u32 gr_engine_status_value_busy_f(void) 344{ 345 return 0x1U; 346} 347static inline u32 gr_pri_be0_becs_be_exception_r(void) 348{ 349 return 0x00410204U; 350} 351static inline u32 gr_pri_be0_becs_be_exception_en_r(void) 352{ 353 return 0x00410208U; 354} 355static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) 356{ 357 return 0x00502c90U; 358} 359static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) 360{ 361 return 0x00502c94U; 362} 363static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) 364{ 365 return 0x00504508U; 366} 367static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 368{ 369 return 0x0050450cU; 370} 371static inline u32 gr_activity_0_r(void) 372{ 373 return 0x00400380U; 374} 375static inline u32 gr_activity_1_r(void) 376{ 377 return 0x00400384U; 378} 379static inline u32 gr_activity_2_r(void) 380{ 381 return 0x00400388U; 382} 383static inline u32 gr_activity_4_r(void) 384{ 385 return 0x00400390U; 386} 387static inline u32 gr_activity_4_gpc0_s(void) 388{ 389 return 3U; 390} 391static inline u32 gr_activity_4_gpc0_f(u32 v) 392{ 393 return (v & 0x7U) << 0U; 394} 395static inline u32 gr_activity_4_gpc0_m(void) 396{ 397 return 0x7U << 0U; 398} 399static inline u32 gr_activity_4_gpc0_v(u32 r) 400{ 401 return (r >> 0U) & 0x7U; 402} 403static inline u32 gr_activity_4_gpc0_empty_v(void) 404{ 405 return 0x00000000U; 406} 407static inline u32 gr_activity_4_gpc0_preempted_v(void) 408{ 409 return 0x00000004U; 410} 411static inline u32 gr_pri_gpc0_gcc_dbg_r(void) 412{ 413 return 0x00501000U; 414} 415static inline u32 gr_pri_gpcs_gcc_dbg_r(void) 416{ 417 return 0x00419000U; 418} 419static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) 420{ 421 return 0x1U << 1U; 422} 423static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) 424{ 425 return 0x005046a4U; 426} 427static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) 428{ 429 return 0x00419ea4U; 430} 431static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) 432{ 433 return 0x1U << 0U; 434} 435static inline u32 gr_pri_sked_activity_r(void) 436{ 437 return 0x00407054U; 438} 439static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) 440{ 441 return 0x00502c80U; 442} 443static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) 444{ 445 return 0x00502c84U; 446} 447static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) 448{ 449 return 0x00502c88U; 450} 451static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) 452{ 453 return 0x00502c8cU; 454} 455static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) 456{ 457 return 0x00504500U; 458} 459static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) 460{ 461 return 0x00504d00U; 462} 463static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) 464{ 465 return 0x00501d00U; 466} 467static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) 468{ 469 return 0x0041ac80U; 470} 471static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) 472{ 473 return 0x0041ac84U; 474} 475static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) 476{ 477 return 0x0041ac88U; 478} 479static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) 480{ 481 return 0x0041ac8cU; 482} 483static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) 484{ 485 return 0x0041c500U; 486} 487static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) 488{ 489 return 0x0041cd00U; 490} 491static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) 492{ 493 return 0x00419d00U; 494} 495static inline u32 gr_pri_be0_becs_be_activity0_r(void) 496{ 497 return 0x00410200U; 498} 499static inline u32 gr_pri_be1_becs_be_activity0_r(void) 500{ 501 return 0x00410600U; 502} 503static inline u32 gr_pri_bes_becs_be_activity0_r(void) 504{ 505 return 0x00408a00U; 506} 507static inline u32 gr_pri_ds_mpipe_status_r(void) 508{ 509 return 0x00405858U; 510} 511static inline u32 gr_pri_fe_go_idle_info_r(void) 512{ 513 return 0x00404194U; 514} 515static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) 516{ 517 return 0x00504238U; 518} 519static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void) 520{ 521 return 0x005046b8U; 522} 523static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp0_pending_f(void) 524{ 525 return 0x10U; 526} 527static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp1_pending_f(void) 528{ 529 return 0x20U; 530} 531static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp2_pending_f(void) 532{ 533 return 0x40U; 534} 535static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp3_pending_f(void) 536{ 537 return 0x80U; 538} 539static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp0_pending_f(void) 540{ 541 return 0x100U; 542} 543static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp1_pending_f(void) 544{ 545 return 0x200U; 546} 547static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp2_pending_f(void) 548{ 549 return 0x400U; 550} 551static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp3_pending_f(void) 552{ 553 return 0x800U; 554} 555static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_r(void) 556{ 557 return 0x005044a0U; 558} 559static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm0_pending_f(void) 560{ 561 return 0x1U; 562} 563static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm1_pending_f(void) 564{ 565 return 0x2U; 566} 567static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm0_pending_f(void) 568{ 569 return 0x10U; 570} 571static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f(void) 572{ 573 return 0x20U; 574} 575static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm0_pending_f(void) 576{ 577 return 0x100U; 578} 579static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f(void) 580{ 581 return 0x200U; 582} 583static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_single_err_count_r(void) 584{ 585 return 0x005046bcU; 586} 587static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r(void) 588{ 589 return 0x005046c0U; 590} 591static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_r(void) 592{ 593 return 0x005044a4U; 594} 595static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_corrected_m(void) 596{ 597 return 0xffU << 0U; 598} 599static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_corrected_v(u32 r) 600{ 601 return (r >> 0U) & 0xffU; 602} 603static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_detected_m(void) 604{ 605 return 0xffU << 8U; 606} 607static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_detected_v(u32 r) 608{ 609 return (r >> 8U) & 0xffU; 610} 611static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_double_detected_m(void) 612{ 613 return 0xffU << 16U; 614} 615static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_double_detected_v(u32 r) 616{ 617 return (r >> 16U) & 0xffU; 618} 619static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void) 620{ 621 return 0x005042c4U; 622} 623static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void) 624{ 625 return 0x0U; 626} 627static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void) 628{ 629 return 0x1U; 630} 631static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void) 632{ 633 return 0x2U; 634} 635static inline u32 gr_pri_be0_crop_status1_r(void) 636{ 637 return 0x00410134U; 638} 639static inline u32 gr_pri_bes_crop_status1_r(void) 640{ 641 return 0x00408934U; 642} 643static inline u32 gr_pri_be0_zrop_status_r(void) 644{ 645 return 0x00410048U; 646} 647static inline u32 gr_pri_be0_zrop_status2_r(void) 648{ 649 return 0x0041004cU; 650} 651static inline u32 gr_pri_bes_zrop_status_r(void) 652{ 653 return 0x00408848U; 654} 655static inline u32 gr_pri_bes_zrop_status2_r(void) 656{ 657 return 0x0040884cU; 658} 659static inline u32 gr_pipe_bundle_address_r(void) 660{ 661 return 0x00400200U; 662} 663static inline u32 gr_pipe_bundle_address_value_v(u32 r) 664{ 665 return (r >> 0U) & 0xffffU; 666} 667static inline u32 gr_pipe_bundle_data_r(void) 668{ 669 return 0x00400204U; 670} 671static inline u32 gr_pipe_bundle_config_r(void) 672{ 673 return 0x00400208U; 674} 675static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) 676{ 677 return 0x0U; 678} 679static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) 680{ 681 return 0x80000000U; 682} 683static inline u32 gr_fe_hww_esr_r(void) 684{ 685 return 0x00404000U; 686} 687static inline u32 gr_fe_hww_esr_reset_active_f(void) 688{ 689 return 0x40000000U; 690} 691static inline u32 gr_fe_hww_esr_en_enable_f(void) 692{ 693 return 0x80000000U; 694} 695static inline u32 gr_fe_hww_esr_info_r(void) 696{ 697 return 0x004041b0U; 698} 699static inline u32 gr_fe_go_idle_timeout_r(void) 700{ 701 return 0x00404154U; 702} 703static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) 704{ 705 return (v & 0xffffffffU) << 0U; 706} 707static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) 708{ 709 return 0x0U; 710} 711static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) 712{ 713 return 0x1800U; 714} 715static inline u32 gr_fe_object_table_r(u32 i) 716{ 717 return 0x00404200U + i*4U; 718} 719static inline u32 gr_fe_object_table_nvclass_v(u32 r) 720{ 721 return (r >> 0U) & 0xffffU; 722} 723static inline u32 gr_fe_tpc_fs_r(void) 724{ 725 return 0x004041c4U; 726} 727static inline u32 gr_pri_mme_shadow_raw_index_r(void) 728{ 729 return 0x00404488U; 730} 731static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) 732{ 733 return 0x80000000U; 734} 735static inline u32 gr_pri_mme_shadow_raw_data_r(void) 736{ 737 return 0x0040448cU; 738} 739static inline u32 gr_mme_hww_esr_r(void) 740{ 741 return 0x00404490U; 742} 743static inline u32 gr_mme_hww_esr_reset_active_f(void) 744{ 745 return 0x40000000U; 746} 747static inline u32 gr_mme_hww_esr_en_enable_f(void) 748{ 749 return 0x80000000U; 750} 751static inline u32 gr_mme_hww_esr_info_r(void) 752{ 753 return 0x00404494U; 754} 755static inline u32 gr_memfmt_hww_esr_r(void) 756{ 757 return 0x00404600U; 758} 759static inline u32 gr_memfmt_hww_esr_reset_active_f(void) 760{ 761 return 0x40000000U; 762} 763static inline u32 gr_memfmt_hww_esr_en_enable_f(void) 764{ 765 return 0x80000000U; 766} 767static inline u32 gr_fecs_cpuctl_r(void) 768{ 769 return 0x00409100U; 770} 771static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) 772{ 773 return (v & 0x1U) << 1U; 774} 775static inline u32 gr_fecs_cpuctl_alias_r(void) 776{ 777 return 0x00409130U; 778} 779static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) 780{ 781 return (v & 0x1U) << 1U; 782} 783static inline u32 gr_fecs_dmactl_r(void) 784{ 785 return 0x0040910cU; 786} 787static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) 788{ 789 return (v & 0x1U) << 0U; 790} 791static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) 792{ 793 return 0x1U << 1U; 794} 795static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) 796{ 797 return 0x1U << 2U; 798} 799static inline u32 gr_fecs_os_r(void) 800{ 801 return 0x00409080U; 802} 803static inline u32 gr_fecs_idlestate_r(void) 804{ 805 return 0x0040904cU; 806} 807static inline u32 gr_fecs_mailbox0_r(void) 808{ 809 return 0x00409040U; 810} 811static inline u32 gr_fecs_mailbox1_r(void) 812{ 813 return 0x00409044U; 814} 815static inline u32 gr_fecs_irqstat_r(void) 816{ 817 return 0x00409008U; 818} 819static inline u32 gr_fecs_irqmode_r(void) 820{ 821 return 0x0040900cU; 822} 823static inline u32 gr_fecs_irqmask_r(void) 824{ 825 return 0x00409018U; 826} 827static inline u32 gr_fecs_irqdest_r(void) 828{ 829 return 0x0040901cU; 830} 831static inline u32 gr_fecs_curctx_r(void) 832{ 833 return 0x00409050U; 834} 835static inline u32 gr_fecs_nxtctx_r(void) 836{ 837 return 0x00409054U; 838} 839static inline u32 gr_fecs_engctl_r(void) 840{ 841 return 0x004090a4U; 842} 843static inline u32 gr_fecs_debug1_r(void) 844{ 845 return 0x00409090U; 846} 847static inline u32 gr_fecs_debuginfo_r(void) 848{ 849 return 0x00409094U; 850} 851static inline u32 gr_fecs_icd_cmd_r(void) 852{ 853 return 0x00409200U; 854} 855static inline u32 gr_fecs_icd_cmd_opc_s(void) 856{ 857 return 4U; 858} 859static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) 860{ 861 return (v & 0xfU) << 0U; 862} 863static inline u32 gr_fecs_icd_cmd_opc_m(void) 864{ 865 return 0xfU << 0U; 866} 867static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) 868{ 869 return (r >> 0U) & 0xfU; 870} 871static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) 872{ 873 return 0x8U; 874} 875static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) 876{ 877 return 0xeU; 878} 879static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) 880{ 881 return (v & 0x1fU) << 8U; 882} 883static inline u32 gr_fecs_icd_rdata_r(void) 884{ 885 return 0x0040920cU; 886} 887static inline u32 gr_fecs_imemc_r(u32 i) 888{ 889 return 0x00409180U + i*16U; 890} 891static inline u32 gr_fecs_imemc_offs_f(u32 v) 892{ 893 return (v & 0x3fU) << 2U; 894} 895static inline u32 gr_fecs_imemc_blk_f(u32 v) 896{ 897 return (v & 0xffU) << 8U; 898} 899static inline u32 gr_fecs_imemc_aincw_f(u32 v) 900{ 901 return (v & 0x1U) << 24U; 902} 903static inline u32 gr_fecs_imemd_r(u32 i) 904{ 905 return 0x00409184U + i*16U; 906} 907static inline u32 gr_fecs_imemt_r(u32 i) 908{ 909 return 0x00409188U + i*16U; 910} 911static inline u32 gr_fecs_imemt_tag_f(u32 v) 912{ 913 return (v & 0xffffU) << 0U; 914} 915static inline u32 gr_fecs_dmemc_r(u32 i) 916{ 917 return 0x004091c0U + i*8U; 918} 919static inline u32 gr_fecs_dmemc_offs_s(void) 920{ 921 return 6U; 922} 923static inline u32 gr_fecs_dmemc_offs_f(u32 v) 924{ 925 return (v & 0x3fU) << 2U; 926} 927static inline u32 gr_fecs_dmemc_offs_m(void) 928{ 929 return 0x3fU << 2U; 930} 931static inline u32 gr_fecs_dmemc_offs_v(u32 r) 932{ 933 return (r >> 2U) & 0x3fU; 934} 935static inline u32 gr_fecs_dmemc_blk_f(u32 v) 936{ 937 return (v & 0xffU) << 8U; 938} 939static inline u32 gr_fecs_dmemc_aincw_f(u32 v) 940{ 941 return (v & 0x1U) << 24U; 942} 943static inline u32 gr_fecs_dmemd_r(u32 i) 944{ 945 return 0x004091c4U + i*8U; 946} 947static inline u32 gr_fecs_dmatrfbase_r(void) 948{ 949 return 0x00409110U; 950} 951static inline u32 gr_fecs_dmatrfmoffs_r(void) 952{ 953 return 0x00409114U; 954} 955static inline u32 gr_fecs_dmatrffboffs_r(void) 956{ 957 return 0x0040911cU; 958} 959static inline u32 gr_fecs_dmatrfcmd_r(void) 960{ 961 return 0x00409118U; 962} 963static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) 964{ 965 return (v & 0x1U) << 4U; 966} 967static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) 968{ 969 return (v & 0x1U) << 5U; 970} 971static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) 972{ 973 return (v & 0x7U) << 8U; 974} 975static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) 976{ 977 return (v & 0x7U) << 12U; 978} 979static inline u32 gr_fecs_bootvec_r(void) 980{ 981 return 0x00409104U; 982} 983static inline u32 gr_fecs_bootvec_vec_f(u32 v) 984{ 985 return (v & 0xffffffffU) << 0U; 986} 987static inline u32 gr_fecs_falcon_hwcfg_r(void) 988{ 989 return 0x00409108U; 990} 991static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) 992{ 993 return 0x0041a108U; 994} 995static inline u32 gr_fecs_falcon_rm_r(void) 996{ 997 return 0x00409084U; 998} 999static inline u32 gr_fecs_current_ctx_r(void) 1000{ 1001 return 0x00409b00U; 1002} 1003static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) 1004{ 1005 return (v & 0xfffffffU) << 0U; 1006} 1007static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) 1008{ 1009 return (r >> 0U) & 0xfffffffU; 1010} 1011static inline u32 gr_fecs_current_ctx_target_s(void) 1012{ 1013 return 2U; 1014} 1015static inline u32 gr_fecs_current_ctx_target_f(u32 v) 1016{ 1017 return (v & 0x3U) << 28U; 1018} 1019static inline u32 gr_fecs_current_ctx_target_m(void) 1020{ 1021 return 0x3U << 28U; 1022} 1023static inline u32 gr_fecs_current_ctx_target_v(u32 r) 1024{ 1025 return (r >> 28U) & 0x3U; 1026} 1027static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) 1028{ 1029 return 0x0U; 1030} 1031static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) 1032{ 1033 return 0x20000000U; 1034} 1035static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) 1036{ 1037 return 0x30000000U; 1038} 1039static inline u32 gr_fecs_current_ctx_valid_s(void) 1040{ 1041 return 1U; 1042} 1043static inline u32 gr_fecs_current_ctx_valid_f(u32 v) 1044{ 1045 return (v & 0x1U) << 31U; 1046} 1047static inline u32 gr_fecs_current_ctx_valid_m(void) 1048{ 1049 return 0x1U << 31U; 1050} 1051static inline u32 gr_fecs_current_ctx_valid_v(u32 r) 1052{ 1053 return (r >> 31U) & 0x1U; 1054} 1055static inline u32 gr_fecs_current_ctx_valid_false_f(void) 1056{ 1057 return 0x0U; 1058} 1059static inline u32 gr_fecs_method_data_r(void) 1060{ 1061 return 0x00409500U; 1062} 1063static inline u32 gr_fecs_method_push_r(void) 1064{ 1065 return 0x00409504U; 1066} 1067static inline u32 gr_fecs_method_push_adr_f(u32 v) 1068{ 1069 return (v & 0xfffU) << 0U; 1070} 1071static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) 1072{ 1073 return 0x00000003U; 1074} 1075static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) 1076{ 1077 return 0x3U; 1078} 1079static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) 1080{ 1081 return 0x00000010U; 1082} 1083static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) 1084{ 1085 return 0x00000009U; 1086} 1087static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) 1088{ 1089 return 0x00000015U; 1090} 1091static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) 1092{ 1093 return 0x00000016U; 1094} 1095static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) 1096{ 1097 return 0x00000025U; 1098} 1099static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) 1100{ 1101 return 0x00000030U; 1102} 1103static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) 1104{ 1105 return 0x00000031U; 1106} 1107static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) 1108{ 1109 return 0x00000032U; 1110} 1111static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) 1112{ 1113 return 0x00000038U; 1114} 1115static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) 1116{ 1117 return 0x00000039U; 1118} 1119static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) 1120{ 1121 return 0x21U; 1122} 1123static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void) 1124{ 1125 return 0x0000001aU; 1126} 1127static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) 1128{ 1129 return 0x00000004U; 1130} 1131static inline u32 gr_fecs_host_int_status_r(void) 1132{ 1133 return 0x00409c18U; 1134} 1135static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) 1136{ 1137 return (v & 0x1U) << 16U; 1138} 1139static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) 1140{ 1141 return (v & 0x1U) << 17U; 1142} 1143static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) 1144{ 1145 return (v & 0x1U) << 18U; 1146} 1147static inline u32 gr_fecs_host_int_clear_r(void) 1148{ 1149 return 0x00409c20U; 1150} 1151static inline u32 gr_fecs_host_int_enable_r(void) 1152{ 1153 return 0x00409c24U; 1154} 1155static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) 1156{ 1157 return 0x10000U; 1158} 1159static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) 1160{ 1161 return 0x20000U; 1162} 1163static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) 1164{ 1165 return 0x40000U; 1166} 1167static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) 1168{ 1169 return 0x80000U; 1170} 1171static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) 1172{ 1173 return 0x00409614U; 1174} 1175static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) 1176{ 1177 return 0x0U; 1178} 1179static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) 1180{ 1181 return 0x0U; 1182} 1183static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) 1184{ 1185 return 0x0U; 1186} 1187static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) 1188{ 1189 return 0x10U; 1190} 1191static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) 1192{ 1193 return 0x20U; 1194} 1195static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) 1196{ 1197 return 0x40U; 1198} 1199static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) 1200{ 1201 return 0x0U; 1202} 1203static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) 1204{ 1205 return 0x100U; 1206} 1207static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) 1208{ 1209 return 0x0U; 1210} 1211static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) 1212{ 1213 return 0x200U; 1214} 1215static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) 1216{ 1217 return 1U; 1218} 1219static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) 1220{ 1221 return (v & 0x1U) << 10U; 1222} 1223static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) 1224{ 1225 return 0x1U << 10U; 1226} 1227static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) 1228{ 1229 return (r >> 10U) & 0x1U; 1230} 1231static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) 1232{ 1233 return 0x0U; 1234} 1235static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) 1236{ 1237 return 0x400U; 1238} 1239static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) 1240{ 1241 return 0x0040960cU; 1242} 1243static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) 1244{ 1245 return 0x00409800U + i*4U; 1246} 1247static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) 1248{ 1249 return 0x00000010U; 1250} 1251static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) 1252{ 1253 return (v & 0xffffffffU) << 0U; 1254} 1255static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) 1256{ 1257 return 0x00000001U; 1258} 1259static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) 1260{ 1261 return 0x00000002U; 1262} 1263static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) 1264{ 1265 return 0x004098c0U + i*4U; 1266} 1267static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) 1268{ 1269 return (v & 0xffffffffU) << 0U; 1270} 1271static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) 1272{ 1273 return 0x00409840U + i*4U; 1274} 1275static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) 1276{ 1277 return (v & 0xffffffffU) << 0U; 1278} 1279static inline u32 gr_fecs_fs_r(void) 1280{ 1281 return 0x00409604U; 1282} 1283static inline u32 gr_fecs_fs_num_available_gpcs_s(void) 1284{ 1285 return 5U; 1286} 1287static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) 1288{ 1289 return (v & 0x1fU) << 0U; 1290} 1291static inline u32 gr_fecs_fs_num_available_gpcs_m(void) 1292{ 1293 return 0x1fU << 0U; 1294} 1295static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) 1296{ 1297 return (r >> 0U) & 0x1fU; 1298} 1299static inline u32 gr_fecs_fs_num_available_fbps_s(void) 1300{ 1301 return 5U; 1302} 1303static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) 1304{ 1305 return (v & 0x1fU) << 16U; 1306} 1307static inline u32 gr_fecs_fs_num_available_fbps_m(void) 1308{ 1309 return 0x1fU << 16U; 1310} 1311static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) 1312{ 1313 return (r >> 16U) & 0x1fU; 1314} 1315static inline u32 gr_fecs_cfg_r(void) 1316{ 1317 return 0x00409620U; 1318} 1319static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) 1320{ 1321 return (r >> 0U) & 0xffU; 1322} 1323static inline u32 gr_fecs_rc_lanes_r(void) 1324{ 1325 return 0x00409880U; 1326} 1327static inline u32 gr_fecs_rc_lanes_num_chains_s(void) 1328{ 1329 return 6U; 1330} 1331static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) 1332{ 1333 return (v & 0x3fU) << 0U; 1334} 1335static inline u32 gr_fecs_rc_lanes_num_chains_m(void) 1336{ 1337 return 0x3fU << 0U; 1338} 1339static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) 1340{ 1341 return (r >> 0U) & 0x3fU; 1342} 1343static inline u32 gr_fecs_ctxsw_status_1_r(void) 1344{ 1345 return 0x00409400U; 1346} 1347static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) 1348{ 1349 return 1U; 1350} 1351static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) 1352{ 1353 return (v & 0x1U) << 12U; 1354} 1355static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) 1356{ 1357 return 0x1U << 12U; 1358} 1359static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) 1360{ 1361 return (r >> 12U) & 0x1U; 1362} 1363static inline u32 gr_fecs_arb_ctx_adr_r(void) 1364{ 1365 return 0x00409a24U; 1366} 1367static inline u32 gr_fecs_new_ctx_r(void) 1368{ 1369 return 0x00409b04U; 1370} 1371static inline u32 gr_fecs_new_ctx_ptr_s(void) 1372{ 1373 return 28U; 1374} 1375static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) 1376{ 1377 return (v & 0xfffffffU) << 0U; 1378} 1379static inline u32 gr_fecs_new_ctx_ptr_m(void) 1380{ 1381 return 0xfffffffU << 0U; 1382} 1383static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) 1384{ 1385 return (r >> 0U) & 0xfffffffU; 1386} 1387static inline u32 gr_fecs_new_ctx_target_s(void) 1388{ 1389 return 2U; 1390} 1391static inline u32 gr_fecs_new_ctx_target_f(u32 v) 1392{ 1393 return (v & 0x3U) << 28U; 1394} 1395static inline u32 gr_fecs_new_ctx_target_m(void) 1396{ 1397 return 0x3U << 28U; 1398} 1399static inline u32 gr_fecs_new_ctx_target_v(u32 r) 1400{ 1401 return (r >> 28U) & 0x3U; 1402} 1403static inline u32 gr_fecs_new_ctx_target_vid_mem_f(void) 1404{ 1405 return 0x0U; 1406} 1407static inline u32 gr_fecs_new_ctx_target_sys_mem_ncoh_f(void) 1408{ 1409 return 0x30000000U; 1410} 1411static inline u32 gr_fecs_new_ctx_target_sys_mem_coh_f(void) 1412{ 1413 return 0x20000000U; 1414} 1415static inline u32 gr_fecs_new_ctx_valid_s(void) 1416{ 1417 return 1U; 1418} 1419static inline u32 gr_fecs_new_ctx_valid_f(u32 v) 1420{ 1421 return (v & 0x1U) << 31U; 1422} 1423static inline u32 gr_fecs_new_ctx_valid_m(void) 1424{ 1425 return 0x1U << 31U; 1426} 1427static inline u32 gr_fecs_new_ctx_valid_v(u32 r) 1428{ 1429 return (r >> 31U) & 0x1U; 1430} 1431static inline u32 gr_fecs_arb_ctx_ptr_r(void) 1432{ 1433 return 0x00409a0cU; 1434} 1435static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) 1436{ 1437 return 28U; 1438} 1439static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) 1440{ 1441 return (v & 0xfffffffU) << 0U; 1442} 1443static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) 1444{ 1445 return 0xfffffffU << 0U; 1446} 1447static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) 1448{ 1449 return (r >> 0U) & 0xfffffffU; 1450} 1451static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) 1452{ 1453 return 2U; 1454} 1455static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) 1456{ 1457 return (v & 0x3U) << 28U; 1458} 1459static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) 1460{ 1461 return 0x3U << 28U; 1462} 1463static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) 1464{ 1465 return (r >> 28U) & 0x3U; 1466} 1467static inline u32 gr_fecs_arb_ctx_ptr_target_vid_mem_f(void) 1468{ 1469 return 0x0U; 1470} 1471static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(void) 1472{ 1473 return 0x30000000U; 1474} 1475static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_coh_f(void) 1476{ 1477 return 0x20000000U; 1478} 1479static inline u32 gr_fecs_arb_ctx_cmd_r(void) 1480{ 1481 return 0x00409a10U; 1482} 1483static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) 1484{ 1485 return 5U; 1486} 1487static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) 1488{ 1489 return (v & 0x1fU) << 0U; 1490} 1491static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) 1492{ 1493 return 0x1fU << 0U; 1494} 1495static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) 1496{ 1497 return (r >> 0U) & 0x1fU; 1498} 1499static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) 1500{ 1501 return 0x00409c00U; 1502} 1503static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) 1504{ 1505 return 0x00502c04U; 1506} 1507static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) 1508{ 1509 return 0x00502400U; 1510} 1511static inline u32 gr_gpc0_gpccs_ctxsw_mailbox__size_1_v(void) 1512{ 1513 return 0x00000010U; 1514} 1515static inline u32 gr_fecs_ctxsw_idlestate_r(void) 1516{ 1517 return 0x00409420U; 1518} 1519static inline u32 gr_fecs_feature_override_ecc_r(void) 1520{ 1521 return 0x00409658U; 1522} 1523static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) 1524{ 1525 return 0x00502420U; 1526} 1527static inline u32 gr_rstr2d_gpc_map0_r(void) 1528{ 1529 return 0x0040780cU; 1530} 1531static inline u32 gr_rstr2d_gpc_map1_r(void) 1532{ 1533 return 0x00407810U; 1534} 1535static inline u32 gr_rstr2d_gpc_map2_r(void) 1536{ 1537 return 0x00407814U; 1538} 1539static inline u32 gr_rstr2d_gpc_map3_r(void) 1540{ 1541 return 0x00407818U; 1542} 1543static inline u32 gr_rstr2d_gpc_map4_r(void) 1544{ 1545 return 0x0040781cU; 1546} 1547static inline u32 gr_rstr2d_gpc_map5_r(void) 1548{ 1549 return 0x00407820U; 1550} 1551static inline u32 gr_rstr2d_map_table_cfg_r(void) 1552{ 1553 return 0x004078bcU; 1554} 1555static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) 1556{ 1557 return (v & 0xffU) << 0U; 1558} 1559static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) 1560{ 1561 return (v & 0xffU) << 8U; 1562} 1563static inline u32 gr_pd_hww_esr_r(void) 1564{ 1565 return 0x00406018U; 1566} 1567static inline u32 gr_pd_hww_esr_reset_active_f(void) 1568{ 1569 return 0x40000000U; 1570} 1571static inline u32 gr_pd_hww_esr_en_enable_f(void) 1572{ 1573 return 0x80000000U; 1574} 1575static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) 1576{ 1577 return 0x00406028U + i*4U; 1578} 1579static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) 1580{ 1581 return 0x00000004U; 1582} 1583static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) 1584{ 1585 return (v & 0xfU) << 0U; 1586} 1587static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) 1588{ 1589 return (v & 0xfU) << 4U; 1590} 1591static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) 1592{ 1593 return (v & 0xfU) << 8U; 1594} 1595static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) 1596{ 1597 return (v & 0xfU) << 12U; 1598} 1599static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) 1600{ 1601 return (v & 0xfU) << 16U; 1602} 1603static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) 1604{ 1605 return (v & 0xfU) << 20U; 1606} 1607static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) 1608{ 1609 return (v & 0xfU) << 24U; 1610} 1611static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) 1612{ 1613 return (v & 0xfU) << 28U; 1614} 1615static inline u32 gr_pd_ab_dist_cfg0_r(void) 1616{ 1617 return 0x004064c0U; 1618} 1619static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) 1620{ 1621 return 0x80000000U; 1622} 1623static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) 1624{ 1625 return 0x0U; 1626} 1627static inline u32 gr_pd_ab_dist_cfg1_r(void) 1628{ 1629 return 0x004064c4U; 1630} 1631static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) 1632{ 1633 return 0xffffU; 1634} 1635static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) 1636{ 1637 return (v & 0xffffU) << 16U; 1638} 1639static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) 1640{ 1641 return 0x00000080U; 1642} 1643static inline u32 gr_pd_ab_dist_cfg2_r(void) 1644{ 1645 return 0x004064c8U; 1646} 1647static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) 1648{ 1649 return (v & 0x1fffU) << 0U; 1650} 1651static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) 1652{ 1653 return 0x00000900U; 1654} 1655static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) 1656{ 1657 return (v & 0x1fffU) << 16U; 1658} 1659static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) 1660{ 1661 return 0x00000020U; 1662} 1663static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) 1664{ 1665 return 0x00000900U; 1666} 1667static inline u32 gr_pd_dist_skip_table_r(u32 i) 1668{ 1669 return 0x004064d0U + i*4U; 1670} 1671static inline u32 gr_pd_dist_skip_table__size_1_v(void) 1672{ 1673 return 0x00000008U; 1674} 1675static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) 1676{ 1677 return (v & 0xffU) << 0U; 1678} 1679static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) 1680{ 1681 return (v & 0xffU) << 8U; 1682} 1683static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) 1684{ 1685 return (v & 0xffU) << 16U; 1686} 1687static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) 1688{ 1689 return (v & 0xffU) << 24U; 1690} 1691static inline u32 gr_ds_debug_r(void) 1692{ 1693 return 0x00405800U; 1694} 1695static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) 1696{ 1697 return 0x0U; 1698} 1699static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) 1700{ 1701 return 0x8000000U; 1702} 1703static inline u32 gr_ds_zbc_color_r_r(void) 1704{ 1705 return 0x00405804U; 1706} 1707static inline u32 gr_ds_zbc_color_r_val_f(u32 v) 1708{ 1709 return (v & 0xffffffffU) << 0U; 1710} 1711static inline u32 gr_ds_zbc_color_g_r(void) 1712{ 1713 return 0x00405808U; 1714} 1715static inline u32 gr_ds_zbc_color_g_val_f(u32 v) 1716{ 1717 return (v & 0xffffffffU) << 0U; 1718} 1719static inline u32 gr_ds_zbc_color_b_r(void) 1720{ 1721 return 0x0040580cU; 1722} 1723static inline u32 gr_ds_zbc_color_b_val_f(u32 v) 1724{ 1725 return (v & 0xffffffffU) << 0U; 1726} 1727static inline u32 gr_ds_zbc_color_a_r(void) 1728{ 1729 return 0x00405810U; 1730} 1731static inline u32 gr_ds_zbc_color_a_val_f(u32 v) 1732{ 1733 return (v & 0xffffffffU) << 0U; 1734} 1735static inline u32 gr_ds_zbc_color_fmt_r(void) 1736{ 1737 return 0x00405814U; 1738} 1739static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) 1740{ 1741 return (v & 0x7fU) << 0U; 1742} 1743static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) 1744{ 1745 return 0x0U; 1746} 1747static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) 1748{ 1749 return 0x00000001U; 1750} 1751static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) 1752{ 1753 return 0x00000002U; 1754} 1755static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) 1756{ 1757 return 0x00000004U; 1758} 1759static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) 1760{ 1761 return 0x00000028U; 1762} 1763static inline u32 gr_ds_zbc_z_r(void) 1764{ 1765 return 0x00405818U; 1766} 1767static inline u32 gr_ds_zbc_z_val_s(void) 1768{ 1769 return 32U; 1770} 1771static inline u32 gr_ds_zbc_z_val_f(u32 v) 1772{ 1773 return (v & 0xffffffffU) << 0U; 1774} 1775static inline u32 gr_ds_zbc_z_val_m(void) 1776{ 1777 return 0xffffffffU << 0U; 1778} 1779static inline u32 gr_ds_zbc_z_val_v(u32 r) 1780{ 1781 return (r >> 0U) & 0xffffffffU; 1782} 1783static inline u32 gr_ds_zbc_z_val__init_v(void) 1784{ 1785 return 0x00000000U; 1786} 1787static inline u32 gr_ds_zbc_z_val__init_f(void) 1788{ 1789 return 0x0U; 1790} 1791static inline u32 gr_ds_zbc_z_fmt_r(void) 1792{ 1793 return 0x0040581cU; 1794} 1795static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) 1796{ 1797 return (v & 0x1U) << 0U; 1798} 1799static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) 1800{ 1801 return 0x0U; 1802} 1803static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) 1804{ 1805 return 0x00000001U; 1806} 1807static inline u32 gr_ds_zbc_tbl_index_r(void) 1808{ 1809 return 0x00405820U; 1810} 1811static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) 1812{ 1813 return (v & 0xfU) << 0U; 1814} 1815static inline u32 gr_ds_zbc_tbl_ld_r(void) 1816{ 1817 return 0x00405824U; 1818} 1819static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) 1820{ 1821 return 0x0U; 1822} 1823static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) 1824{ 1825 return 0x1U; 1826} 1827static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) 1828{ 1829 return 0x0U; 1830} 1831static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) 1832{ 1833 return 0x4U; 1834} 1835static inline u32 gr_ds_tga_constraintlogic_beta_r(void) 1836{ 1837 return 0x00405830U; 1838} 1839static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) 1840{ 1841 return (v & 0x3fffffU) << 0U; 1842} 1843static inline u32 gr_ds_tga_constraintlogic_alpha_r(void) 1844{ 1845 return 0x0040585cU; 1846} 1847static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) 1848{ 1849 return (v & 0xffffU) << 0U; 1850} 1851static inline u32 gr_ds_hww_esr_r(void) 1852{ 1853 return 0x00405840U; 1854} 1855static inline u32 gr_ds_hww_esr_reset_s(void) 1856{ 1857 return 1U; 1858} 1859static inline u32 gr_ds_hww_esr_reset_f(u32 v) 1860{ 1861 return (v & 0x1U) << 30U; 1862} 1863static inline u32 gr_ds_hww_esr_reset_m(void) 1864{ 1865 return 0x1U << 30U; 1866} 1867static inline u32 gr_ds_hww_esr_reset_v(u32 r) 1868{ 1869 return (r >> 30U) & 0x1U; 1870} 1871static inline u32 gr_ds_hww_esr_reset_task_v(void) 1872{ 1873 return 0x00000001U; 1874} 1875static inline u32 gr_ds_hww_esr_reset_task_f(void) 1876{ 1877 return 0x40000000U; 1878} 1879static inline u32 gr_ds_hww_esr_en_enabled_f(void) 1880{ 1881 return 0x80000000U; 1882} 1883static inline u32 gr_ds_hww_esr_2_r(void) 1884{ 1885 return 0x00405848U; 1886} 1887static inline u32 gr_ds_hww_esr_2_reset_s(void) 1888{ 1889 return 1U; 1890} 1891static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) 1892{ 1893 return (v & 0x1U) << 30U; 1894} 1895static inline u32 gr_ds_hww_esr_2_reset_m(void) 1896{ 1897 return 0x1U << 30U; 1898} 1899static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) 1900{ 1901 return (r >> 30U) & 0x1U; 1902} 1903static inline u32 gr_ds_hww_esr_2_reset_task_v(void) 1904{ 1905 return 0x00000001U; 1906} 1907static inline u32 gr_ds_hww_esr_2_reset_task_f(void) 1908{ 1909 return 0x40000000U; 1910} 1911static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) 1912{ 1913 return 0x80000000U; 1914} 1915static inline u32 gr_ds_hww_report_mask_r(void) 1916{ 1917 return 0x00405844U; 1918} 1919static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) 1920{ 1921 return 0x1U; 1922} 1923static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) 1924{ 1925 return 0x2U; 1926} 1927static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) 1928{ 1929 return 0x4U; 1930} 1931static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) 1932{ 1933 return 0x8U; 1934} 1935static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) 1936{ 1937 return 0x10U; 1938} 1939static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) 1940{ 1941 return 0x20U; 1942} 1943static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) 1944{ 1945 return 0x40U; 1946} 1947static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) 1948{ 1949 return 0x80U; 1950} 1951static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) 1952{ 1953 return 0x100U; 1954} 1955static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) 1956{ 1957 return 0x200U; 1958} 1959static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) 1960{ 1961 return 0x400U; 1962} 1963static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) 1964{ 1965 return 0x800U; 1966} 1967static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) 1968{ 1969 return 0x1000U; 1970} 1971static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) 1972{ 1973 return 0x2000U; 1974} 1975static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) 1976{ 1977 return 0x4000U; 1978} 1979static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) 1980{ 1981 return 0x8000U; 1982} 1983static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) 1984{ 1985 return 0x10000U; 1986} 1987static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) 1988{ 1989 return 0x20000U; 1990} 1991static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) 1992{ 1993 return 0x40000U; 1994} 1995static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) 1996{ 1997 return 0x80000U; 1998} 1999static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) 2000{ 2001 return 0x100000U; 2002} 2003static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) 2004{ 2005 return 0x200000U; 2006} 2007static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) 2008{ 2009 return 0x400000U; 2010} 2011static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) 2012{ 2013 return 0x800000U; 2014} 2015static inline u32 gr_ds_hww_report_mask_2_r(void) 2016{ 2017 return 0x0040584cU; 2018} 2019static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) 2020{ 2021 return 0x1U; 2022} 2023static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) 2024{ 2025 return 0x00405870U + i*4U; 2026} 2027static inline u32 gr_scc_bundle_cb_base_r(void) 2028{ 2029 return 0x00408004U; 2030} 2031static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) 2032{ 2033 return (v & 0xffffffffU) << 0U; 2034} 2035static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) 2036{ 2037 return 0x00000008U; 2038} 2039static inline u32 gr_scc_bundle_cb_size_r(void) 2040{ 2041 return 0x00408008U; 2042} 2043static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) 2044{ 2045 return (v & 0x7ffU) << 0U; 2046} 2047static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) 2048{ 2049 return 0x00000030U; 2050} 2051static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) 2052{ 2053 return 0x00000100U; 2054} 2055static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) 2056{ 2057 return 0x00000000U; 2058} 2059static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) 2060{ 2061 return 0x0U; 2062} 2063static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) 2064{ 2065 return 0x80000000U; 2066} 2067static inline u32 gr_scc_pagepool_base_r(void) 2068{ 2069 return 0x0040800cU; 2070} 2071static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) 2072{ 2073 return (v & 0xffffffffU) << 0U; 2074} 2075static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) 2076{ 2077 return 0x00000008U; 2078} 2079static inline u32 gr_scc_pagepool_r(void) 2080{ 2081 return 0x00408010U; 2082} 2083static inline u32 gr_scc_pagepool_total_pages_f(u32 v) 2084{ 2085 return (v & 0x3ffU) << 0U; 2086} 2087static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) 2088{ 2089 return 0x00000000U; 2090} 2091static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) 2092{ 2093 return 0x00000200U; 2094} 2095static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) 2096{ 2097 return 0x00000100U; 2098} 2099static inline u32 gr_scc_pagepool_max_valid_pages_s(void) 2100{ 2101 return 10U; 2102} 2103static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) 2104{ 2105 return (v & 0x3ffU) << 10U; 2106} 2107static inline u32 gr_scc_pagepool_max_valid_pages_m(void) 2108{ 2109 return 0x3ffU << 10U; 2110} 2111static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) 2112{ 2113 return (r >> 10U) & 0x3ffU; 2114} 2115static inline u32 gr_scc_pagepool_valid_true_f(void) 2116{ 2117 return 0x80000000U; 2118} 2119static inline u32 gr_scc_init_r(void) 2120{ 2121 return 0x0040802cU; 2122} 2123static inline u32 gr_scc_init_ram_trigger_f(void) 2124{ 2125 return 0x1U; 2126} 2127static inline u32 gr_scc_hww_esr_r(void) 2128{ 2129 return 0x00408030U; 2130} 2131static inline u32 gr_scc_hww_esr_reset_active_f(void) 2132{ 2133 return 0x40000000U; 2134} 2135static inline u32 gr_scc_hww_esr_en_enable_f(void) 2136{ 2137 return 0x80000000U; 2138} 2139static inline u32 gr_sked_hww_esr_r(void) 2140{ 2141 return 0x00407020U; 2142} 2143static inline u32 gr_sked_hww_esr_reset_active_f(void) 2144{ 2145 return 0x40000000U; 2146} 2147static inline u32 gr_cwd_fs_r(void) 2148{ 2149 return 0x00405b00U; 2150} 2151static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) 2152{ 2153 return (v & 0xffU) << 0U; 2154} 2155static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) 2156{ 2157 return (v & 0xffU) << 8U; 2158} 2159static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) 2160{ 2161 return 0x00405b60U + i*4U; 2162} 2163static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) 2164{ 2165 return 4U; 2166} 2167static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) 2168{ 2169 return (v & 0xfU) << 0U; 2170} 2171static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) 2172{ 2173 return 4U; 2174} 2175static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) 2176{ 2177 return (v & 0xfU) << 4U; 2178} 2179static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) 2180{ 2181 return (v & 0xfU) << 8U; 2182} 2183static inline u32 gr_cwd_sm_id_r(u32 i) 2184{ 2185 return 0x00405ba0U + i*4U; 2186} 2187static inline u32 gr_cwd_sm_id__size_1_v(void) 2188{ 2189 return 0x00000010U; 2190} 2191static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) 2192{ 2193 return (v & 0xffU) << 0U; 2194} 2195static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) 2196{ 2197 return (v & 0xffU) << 8U; 2198} 2199static inline u32 gr_gpc0_fs_gpc_r(void) 2200{ 2201 return 0x00502608U; 2202} 2203static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) 2204{ 2205 return (r >> 0U) & 0x1fU; 2206} 2207static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) 2208{ 2209 return (r >> 16U) & 0x1fU; 2210} 2211static inline u32 gr_gpc0_cfg_r(void) 2212{ 2213 return 0x00502620U; 2214} 2215static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) 2216{ 2217 return (r >> 0U) & 0xffU; 2218} 2219static inline u32 gr_gpccs_rc_lanes_r(void) 2220{ 2221 return 0x00502880U; 2222} 2223static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) 2224{ 2225 return 6U; 2226} 2227static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) 2228{ 2229 return (v & 0x3fU) << 0U; 2230} 2231static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) 2232{ 2233 return 0x3fU << 0U; 2234} 2235static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) 2236{ 2237 return (r >> 0U) & 0x3fU; 2238} 2239static inline u32 gr_gpccs_rc_lane_size_r(void) 2240{ 2241 return 0x00502910U; 2242} 2243static inline u32 gr_gpccs_rc_lane_size_v_s(void) 2244{ 2245 return 24U; 2246} 2247static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) 2248{ 2249 return (v & 0xffffffU) << 0U; 2250} 2251static inline u32 gr_gpccs_rc_lane_size_v_m(void) 2252{ 2253 return 0xffffffU << 0U; 2254} 2255static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) 2256{ 2257 return (r >> 0U) & 0xffffffU; 2258} 2259static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) 2260{ 2261 return 0x00000000U; 2262} 2263static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) 2264{ 2265 return 0x0U; 2266} 2267static inline u32 gr_gpc0_zcull_fs_r(void) 2268{ 2269 return 0x00500910U; 2270} 2271static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) 2272{ 2273 return (v & 0x1ffU) << 0U; 2274} 2275static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) 2276{ 2277 return (v & 0xfU) << 16U; 2278} 2279static inline u32 gr_gpc0_zcull_ram_addr_r(void) 2280{ 2281 return 0x00500914U; 2282} 2283static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) 2284{ 2285 return (v & 0xfU) << 0U; 2286} 2287static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) 2288{ 2289 return (v & 0xfU) << 8U; 2290} 2291static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) 2292{ 2293 return 0x00500918U; 2294} 2295static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) 2296{ 2297 return (v & 0xffffffU) << 0U; 2298} 2299static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) 2300{ 2301 return 0x00800000U; 2302} 2303static inline u32 gr_gpc0_zcull_total_ram_size_r(void) 2304{ 2305 return 0x00500920U; 2306} 2307static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) 2308{ 2309 return (v & 0xffffU) << 0U; 2310} 2311static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) 2312{ 2313 return 0x00500a04U + i*32U; 2314} 2315static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) 2316{ 2317 return 0x00000040U; 2318} 2319static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) 2320{ 2321 return 0x00000010U; 2322} 2323static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) 2324{ 2325 return 0x00500c10U + i*4U; 2326} 2327static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) 2328{ 2329 return (v & 0xffU) << 0U; 2330} 2331static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) 2332{ 2333 return 0x00500c30U + i*4U; 2334} 2335static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) 2336{ 2337 return (r >> 0U) & 0xffU; 2338} 2339static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) 2340{ 2341 return 0x00504088U; 2342} 2343static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) 2344{ 2345 return (v & 0xffffU) << 0U; 2346} 2347static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) 2348{ 2349 return 0x00504698U; 2350} 2351static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_f(u32 v) 2352{ 2353 return (v & 0xffffU) << 0U; 2354} 2355static inline u32 gr_gpc0_tpc0_sm_arch_r(void) 2356{ 2357 return 0x0050469cU; 2358} 2359static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) 2360{ 2361 return (r >> 0U) & 0xffU; 2362} 2363static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) 2364{ 2365 return (r >> 8U) & 0xfffU; 2366} 2367static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) 2368{ 2369 return (r >> 20U) & 0xfffU; 2370} 2371static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) 2372{ 2373 return 0x00503018U; 2374} 2375static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) 2376{ 2377 return 0x1U << 0U; 2378} 2379static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) 2380{ 2381 return 0x1U; 2382} 2383static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) 2384{ 2385 return 0x005030c0U; 2386} 2387static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) 2388{ 2389 return (v & 0x3fffffU) << 0U; 2390} 2391static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) 2392{ 2393 return 0x3fffffU << 0U; 2394} 2395static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) 2396{ 2397 return 0x00000320U; 2398} 2399static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void) 2400{ 2401 return 0x00000ba8U; 2402} 2403static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) 2404{ 2405 return 0x00000020U; 2406} 2407static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) 2408{ 2409 return 0x005030f4U; 2410} 2411static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) 2412{ 2413 return 0x005030e4U; 2414} 2415static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) 2416{ 2417 return (v & 0xffffU) << 0U; 2418} 2419static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) 2420{ 2421 return 0xffffU << 0U; 2422} 2423static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) 2424{ 2425 return 0x00000800U; 2426} 2427static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) 2428{ 2429 return 0x00000020U; 2430} 2431static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) 2432{ 2433 return 0x005030f8U; 2434} 2435static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void) 2436{ 2437 return 0x005030f0U; 2438} 2439static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v) 2440{ 2441 return (v & 0x3fffffU) << 0U; 2442} 2443static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void) 2444{ 2445 return 0x00000320U; 2446} 2447static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void) 2448{ 2449 return 0x00419b00U; 2450} 2451static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v) 2452{ 2453 return (v & 0xffffffffU) << 0U; 2454} 2455static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void) 2456{ 2457 return 0x00419b04U; 2458} 2459static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void) 2460{ 2461 return 21U; 2462} 2463static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v) 2464{ 2465 return (v & 0x1fffffU) << 0U; 2466} 2467static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void) 2468{ 2469 return 0x1fffffU << 0U; 2470} 2471static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r) 2472{ 2473 return (r >> 0U) & 0x1fffffU; 2474} 2475static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void) 2476{ 2477 return 0x80U; 2478} 2479static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void) 2480{ 2481 return 1U; 2482} 2483static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v) 2484{ 2485 return (v & 0x1U) << 31U; 2486} 2487static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void) 2488{ 2489 return 0x1U << 31U; 2490} 2491static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r) 2492{ 2493 return (r >> 31U) & 0x1U; 2494} 2495static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void) 2496{ 2497 return 0x80000000U; 2498} 2499static inline u32 gr_gpccs_falcon_addr_r(void) 2500{ 2501 return 0x0041a0acU; 2502} 2503static inline u32 gr_gpccs_falcon_addr_lsb_s(void) 2504{ 2505 return 6U; 2506} 2507static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) 2508{ 2509 return (v & 0x3fU) << 0U; 2510} 2511static inline u32 gr_gpccs_falcon_addr_lsb_m(void) 2512{ 2513 return 0x3fU << 0U; 2514} 2515static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) 2516{ 2517 return (r >> 0U) & 0x3fU; 2518} 2519static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) 2520{ 2521 return 0x00000000U; 2522} 2523static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) 2524{ 2525 return 0x0U; 2526} 2527static inline u32 gr_gpccs_falcon_addr_msb_s(void) 2528{ 2529 return 6U; 2530} 2531static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) 2532{ 2533 return (v & 0x3fU) << 6U; 2534} 2535static inline u32 gr_gpccs_falcon_addr_msb_m(void) 2536{ 2537 return 0x3fU << 6U; 2538} 2539static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) 2540{ 2541 return (r >> 6U) & 0x3fU; 2542} 2543static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) 2544{ 2545 return 0x00000000U; 2546} 2547static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) 2548{ 2549 return 0x0U; 2550} 2551static inline u32 gr_gpccs_falcon_addr_ext_s(void) 2552{ 2553 return 12U; 2554} 2555static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) 2556{ 2557 return (v & 0xfffU) << 0U; 2558} 2559static inline u32 gr_gpccs_falcon_addr_ext_m(void) 2560{ 2561 return 0xfffU << 0U; 2562} 2563static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) 2564{ 2565 return (r >> 0U) & 0xfffU; 2566} 2567static inline u32 gr_gpccs_cpuctl_r(void) 2568{ 2569 return 0x0041a100U; 2570} 2571static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) 2572{ 2573 return (v & 0x1U) << 1U; 2574} 2575static inline u32 gr_gpccs_dmactl_r(void) 2576{ 2577 return 0x0041a10cU; 2578} 2579static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) 2580{ 2581 return (v & 0x1U) << 0U; 2582} 2583static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) 2584{ 2585 return 0x1U << 1U; 2586} 2587static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) 2588{ 2589 return 0x1U << 2U; 2590} 2591static inline u32 gr_gpccs_imemc_r(u32 i) 2592{ 2593 return 0x0041a180U + i*16U; 2594} 2595static inline u32 gr_gpccs_imemc_offs_f(u32 v) 2596{ 2597 return (v & 0x3fU) << 2U; 2598} 2599static inline u32 gr_gpccs_imemc_blk_f(u32 v) 2600{ 2601 return (v & 0xffU) << 8U; 2602} 2603static inline u32 gr_gpccs_imemc_aincw_f(u32 v) 2604{ 2605 return (v & 0x1U) << 24U; 2606} 2607static inline u32 gr_gpccs_imemd_r(u32 i) 2608{ 2609 return 0x0041a184U + i*16U; 2610} 2611static inline u32 gr_gpccs_imemt_r(u32 i) 2612{ 2613 return 0x0041a188U + i*16U; 2614} 2615static inline u32 gr_gpccs_imemt__size_1_v(void) 2616{ 2617 return 0x00000004U; 2618} 2619static inline u32 gr_gpccs_imemt_tag_f(u32 v) 2620{ 2621 return (v & 0xffffU) << 0U; 2622} 2623static inline u32 gr_gpccs_dmemc_r(u32 i) 2624{ 2625 return 0x0041a1c0U + i*8U; 2626} 2627static inline u32 gr_gpccs_dmemc_offs_f(u32 v) 2628{ 2629 return (v & 0x3fU) << 2U; 2630} 2631static inline u32 gr_gpccs_dmemc_blk_f(u32 v) 2632{ 2633 return (v & 0xffU) << 8U; 2634} 2635static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) 2636{ 2637 return (v & 0x1U) << 24U; 2638} 2639static inline u32 gr_gpccs_dmemd_r(u32 i) 2640{ 2641 return 0x0041a1c4U + i*8U; 2642} 2643static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) 2644{ 2645 return 0x0041a800U + i*4U; 2646} 2647static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) 2648{ 2649 return (v & 0xffffffffU) << 0U; 2650} 2651static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) 2652{ 2653 return 0x00418e24U; 2654} 2655static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) 2656{ 2657 return 32U; 2658} 2659static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) 2660{ 2661 return (v & 0xffffffffU) << 0U; 2662} 2663static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) 2664{ 2665 return 0xffffffffU << 0U; 2666} 2667static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) 2668{ 2669 return (r >> 0U) & 0xffffffffU; 2670} 2671static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) 2672{ 2673 return 0x00000000U; 2674} 2675static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) 2676{ 2677 return 0x0U; 2678} 2679static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) 2680{ 2681 return 0x00418e28U; 2682} 2683static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) 2684{ 2685 return 11U; 2686} 2687static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) 2688{ 2689 return (v & 0x7ffU) << 0U; 2690} 2691static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) 2692{ 2693 return 0x7ffU << 0U; 2694} 2695static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) 2696{ 2697 return (r >> 0U) & 0x7ffU; 2698} 2699static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) 2700{ 2701 return 0x00000030U; 2702} 2703static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) 2704{ 2705 return 0x30U; 2706} 2707static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) 2708{ 2709 return 1U; 2710} 2711static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) 2712{ 2713 return (v & 0x1U) << 31U; 2714} 2715static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) 2716{ 2717 return 0x1U << 31U; 2718} 2719static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) 2720{ 2721 return (r >> 31U) & 0x1U; 2722} 2723static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) 2724{ 2725 return 0x00000000U; 2726} 2727static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) 2728{ 2729 return 0x0U; 2730} 2731static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) 2732{ 2733 return 0x00000001U; 2734} 2735static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) 2736{ 2737 return 0x80000000U; 2738} 2739static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void) 2740{ 2741 return 0x005001dcU; 2742} 2743static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v) 2744{ 2745 return (v & 0xffffU) << 0U; 2746} 2747static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void) 2748{ 2749 return 0x00000de0U; 2750} 2751static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void) 2752{ 2753 return 0x00000100U; 2754} 2755static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void) 2756{ 2757 return 0x005001d8U; 2758} 2759static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v) 2760{ 2761 return (v & 0xffffffffU) << 0U; 2762} 2763static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void) 2764{ 2765 return 0x00000008U; 2766} 2767static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void) 2768{ 2769 return 0x004181e4U; 2770} 2771static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v) 2772{ 2773 return (v & 0xfffU) << 0U; 2774} 2775static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void) 2776{ 2777 return 0x00000100U; 2778} 2779static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void) 2780{ 2781 return 0x0041befcU; 2782} 2783static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v) 2784{ 2785 return (v & 0xfffU) << 0U; 2786} 2787static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) 2788{ 2789 return 0x00418ea0U + i*4U; 2790} 2791static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) 2792{ 2793 return (v & 0x3fffffU) << 0U; 2794} 2795static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) 2796{ 2797 return 0x3fffffU << 0U; 2798} 2799static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i) 2800{ 2801 return 0x00418010U + i*4U; 2802} 2803static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v) 2804{ 2805 return (v & 0xffffffffU) << 0U; 2806} 2807static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i) 2808{ 2809 return 0x0041804cU + i*4U; 2810} 2811static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v) 2812{ 2813 return (v & 0xffffffffU) << 0U; 2814} 2815static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i) 2816{ 2817 return 0x00418088U + i*4U; 2818} 2819static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v) 2820{ 2821 return (v & 0xffffffffU) << 0U; 2822} 2823static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i) 2824{ 2825 return 0x004180c4U + i*4U; 2826} 2827static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v) 2828{ 2829 return (v & 0xffffffffU) << 0U; 2830} 2831static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void) 2832{ 2833 return 0x00500100U; 2834} 2835static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i) 2836{ 2837 return 0x00418110U + i*4U; 2838} 2839static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v) 2840{ 2841 return (v & 0xffffffffU) << 0U; 2842} 2843static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void) 2844{ 2845 return 0x0050014cU; 2846} 2847static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) 2848{ 2849 return 0x00418810U; 2850} 2851static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) 2852{ 2853 return (v & 0xfffffffU) << 0U; 2854} 2855static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) 2856{ 2857 return 0x0000000cU; 2858} 2859static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) 2860{ 2861 return 0x80000000U; 2862} 2863static inline u32 gr_crstr_gpc_map0_r(void) 2864{ 2865 return 0x00418b08U; 2866} 2867static inline u32 gr_crstr_gpc_map0_tile0_f(u32 v) 2868{ 2869 return (v & 0x7U) << 0U; 2870} 2871static inline u32 gr_crstr_gpc_map0_tile1_f(u32 v) 2872{ 2873 return (v & 0x7U) << 5U; 2874} 2875static inline u32 gr_crstr_gpc_map0_tile2_f(u32 v) 2876{ 2877 return (v & 0x7U) << 10U; 2878} 2879static inline u32 gr_crstr_gpc_map0_tile3_f(u32 v) 2880{ 2881 return (v & 0x7U) << 15U; 2882} 2883static inline u32 gr_crstr_gpc_map0_tile4_f(u32 v) 2884{ 2885 return (v & 0x7U) << 20U; 2886} 2887static inline u32 gr_crstr_gpc_map0_tile5_f(u32 v) 2888{ 2889 return (v & 0x7U) << 25U; 2890} 2891static inline u32 gr_crstr_gpc_map1_r(void) 2892{ 2893 return 0x00418b0cU; 2894} 2895static inline u32 gr_crstr_gpc_map1_tile6_f(u32 v) 2896{ 2897 return (v & 0x7U) << 0U; 2898} 2899static inline u32 gr_crstr_gpc_map1_tile7_f(u32 v) 2900{ 2901 return (v & 0x7U) << 5U; 2902} 2903static inline u32 gr_crstr_gpc_map1_tile8_f(u32 v) 2904{ 2905 return (v & 0x7U) << 10U; 2906} 2907static inline u32 gr_crstr_gpc_map1_tile9_f(u32 v) 2908{ 2909 return (v & 0x7U) << 15U; 2910} 2911static inline u32 gr_crstr_gpc_map1_tile10_f(u32 v) 2912{ 2913 return (v & 0x7U) << 20U; 2914} 2915static inline u32 gr_crstr_gpc_map1_tile11_f(u32 v) 2916{ 2917 return (v & 0x7U) << 25U; 2918} 2919static inline u32 gr_crstr_gpc_map2_r(void) 2920{ 2921 return 0x00418b10U; 2922} 2923static inline u32 gr_crstr_gpc_map2_tile12_f(u32 v) 2924{ 2925 return (v & 0x7U) << 0U; 2926} 2927static inline u32 gr_crstr_gpc_map2_tile13_f(u32 v) 2928{ 2929 return (v & 0x7U) << 5U; 2930} 2931static inline u32 gr_crstr_gpc_map2_tile14_f(u32 v) 2932{ 2933 return (v & 0x7U) << 10U; 2934} 2935static inline u32 gr_crstr_gpc_map2_tile15_f(u32 v) 2936{ 2937 return (v & 0x7U) << 15U; 2938} 2939static inline u32 gr_crstr_gpc_map2_tile16_f(u32 v) 2940{ 2941 return (v & 0x7U) << 20U; 2942} 2943static inline u32 gr_crstr_gpc_map2_tile17_f(u32 v) 2944{ 2945 return (v & 0x7U) << 25U; 2946} 2947static inline u32 gr_crstr_gpc_map3_r(void) 2948{ 2949 return 0x00418b14U; 2950} 2951static inline u32 gr_crstr_gpc_map3_tile18_f(u32 v) 2952{ 2953 return (v & 0x7U) << 0U; 2954} 2955static inline u32 gr_crstr_gpc_map3_tile19_f(u32 v) 2956{ 2957 return (v & 0x7U) << 5U; 2958} 2959static inline u32 gr_crstr_gpc_map3_tile20_f(u32 v) 2960{ 2961 return (v & 0x7U) << 10U; 2962} 2963static inline u32 gr_crstr_gpc_map3_tile21_f(u32 v) 2964{ 2965 return (v & 0x7U) << 15U; 2966} 2967static inline u32 gr_crstr_gpc_map3_tile22_f(u32 v) 2968{ 2969 return (v & 0x7U) << 20U; 2970} 2971static inline u32 gr_crstr_gpc_map3_tile23_f(u32 v) 2972{ 2973 return (v & 0x7U) << 25U; 2974} 2975static inline u32 gr_crstr_gpc_map4_r(void) 2976{ 2977 return 0x00418b18U; 2978} 2979static inline u32 gr_crstr_gpc_map4_tile24_f(u32 v) 2980{ 2981 return (v & 0x7U) << 0U; 2982} 2983static inline u32 gr_crstr_gpc_map4_tile25_f(u32 v) 2984{ 2985 return (v & 0x7U) << 5U; 2986} 2987static inline u32 gr_crstr_gpc_map4_tile26_f(u32 v) 2988{ 2989 return (v & 0x7U) << 10U; 2990} 2991static inline u32 gr_crstr_gpc_map4_tile27_f(u32 v) 2992{ 2993 return (v & 0x7U) << 15U; 2994} 2995static inline u32 gr_crstr_gpc_map4_tile28_f(u32 v) 2996{ 2997 return (v & 0x7U) << 20U; 2998} 2999static inline u32 gr_crstr_gpc_map4_tile29_f(u32 v) 3000{ 3001 return (v & 0x7U) << 25U; 3002} 3003static inline u32 gr_crstr_gpc_map5_r(void) 3004{ 3005 return 0x00418b1cU; 3006} 3007static inline u32 gr_crstr_gpc_map5_tile30_f(u32 v) 3008{ 3009 return (v & 0x7U) << 0U; 3010} 3011static inline u32 gr_crstr_gpc_map5_tile31_f(u32 v) 3012{ 3013 return (v & 0x7U) << 5U; 3014} 3015static inline u32 gr_crstr_gpc_map5_tile32_f(u32 v) 3016{ 3017 return (v & 0x7U) << 10U; 3018} 3019static inline u32 gr_crstr_gpc_map5_tile33_f(u32 v) 3020{ 3021 return (v & 0x7U) << 15U; 3022} 3023static inline u32 gr_crstr_gpc_map5_tile34_f(u32 v) 3024{ 3025 return (v & 0x7U) << 20U; 3026} 3027static inline u32 gr_crstr_gpc_map5_tile35_f(u32 v) 3028{ 3029 return (v & 0x7U) << 25U; 3030} 3031static inline u32 gr_crstr_map_table_cfg_r(void) 3032{ 3033 return 0x00418bb8U; 3034} 3035static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) 3036{ 3037 return (v & 0xffU) << 0U; 3038} 3039static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) 3040{ 3041 return (v & 0xffU) << 8U; 3042} 3043static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_r(void) 3044{ 3045 return 0x00418980U; 3046} 3047static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f(u32 v) 3048{ 3049 return (v & 0x7U) << 0U; 3050} 3051static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_1_f(u32 v) 3052{ 3053 return (v & 0x7U) << 4U; 3054} 3055static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_2_f(u32 v) 3056{ 3057 return (v & 0x7U) << 8U; 3058} 3059static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_3_f(u32 v) 3060{ 3061 return (v & 0x7U) << 12U; 3062} 3063static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_4_f(u32 v) 3064{ 3065 return (v & 0x7U) << 16U; 3066} 3067static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_5_f(u32 v) 3068{ 3069 return (v & 0x7U) << 20U; 3070} 3071static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_6_f(u32 v) 3072{ 3073 return (v & 0x7U) << 24U; 3074} 3075static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_7_f(u32 v) 3076{ 3077 return (v & 0x7U) << 28U; 3078} 3079static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_r(void) 3080{ 3081 return 0x00418984U; 3082} 3083static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f(u32 v) 3084{ 3085 return (v & 0x7U) << 0U; 3086} 3087static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_9_f(u32 v) 3088{ 3089 return (v & 0x7U) << 4U; 3090} 3091static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_10_f(u32 v) 3092{ 3093 return (v & 0x7U) << 8U; 3094} 3095static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_11_f(u32 v) 3096{ 3097 return (v & 0x7U) << 12U; 3098} 3099static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_12_f(u32 v) 3100{ 3101 return (v & 0x7U) << 16U; 3102} 3103static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_13_f(u32 v) 3104{ 3105 return (v & 0x7U) << 20U; 3106} 3107static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_14_f(u32 v) 3108{ 3109 return (v & 0x7U) << 24U; 3110} 3111static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_15_f(u32 v) 3112{ 3113 return (v & 0x7U) << 28U; 3114} 3115static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_r(void) 3116{ 3117 return 0x00418988U; 3118} 3119static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f(u32 v) 3120{ 3121 return (v & 0x7U) << 0U; 3122} 3123static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_17_f(u32 v) 3124{ 3125 return (v & 0x7U) << 4U; 3126} 3127static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_18_f(u32 v) 3128{ 3129 return (v & 0x7U) << 8U; 3130} 3131static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_19_f(u32 v) 3132{ 3133 return (v & 0x7U) << 12U; 3134} 3135static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_20_f(u32 v) 3136{ 3137 return (v & 0x7U) << 16U; 3138} 3139static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_21_f(u32 v) 3140{ 3141 return (v & 0x7U) << 20U; 3142} 3143static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_22_f(u32 v) 3144{ 3145 return (v & 0x7U) << 24U; 3146} 3147static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_s(void) 3148{ 3149 return 3U; 3150} 3151static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_f(u32 v) 3152{ 3153 return (v & 0x7U) << 28U; 3154} 3155static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_m(void) 3156{ 3157 return 0x7U << 28U; 3158} 3159static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_v(u32 r) 3160{ 3161 return (r >> 28U) & 0x7U; 3162} 3163static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_r(void) 3164{ 3165 return 0x0041898cU; 3166} 3167static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f(u32 v) 3168{ 3169 return (v & 0x7U) << 0U; 3170} 3171static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_25_f(u32 v) 3172{ 3173 return (v & 0x7U) << 4U; 3174} 3175static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_26_f(u32 v) 3176{ 3177 return (v & 0x7U) << 8U; 3178} 3179static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_27_f(u32 v) 3180{ 3181 return (v & 0x7U) << 12U; 3182} 3183static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_28_f(u32 v) 3184{ 3185 return (v & 0x7U) << 16U; 3186} 3187static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_29_f(u32 v) 3188{ 3189 return (v & 0x7U) << 20U; 3190} 3191static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_30_f(u32 v) 3192{ 3193 return (v & 0x7U) << 24U; 3194} 3195static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_31_f(u32 v) 3196{ 3197 return (v & 0x7U) << 28U; 3198} 3199static inline u32 gr_gpcs_gpm_pd_cfg_r(void) 3200{ 3201 return 0x00418c6cU; 3202} 3203static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_disable_f(void) 3204{ 3205 return 0x0U; 3206} 3207static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_enable_f(void) 3208{ 3209 return 0x1U; 3210} 3211static inline u32 gr_gpcs_gcc_pagepool_base_r(void) 3212{ 3213 return 0x00419004U; 3214} 3215static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) 3216{ 3217 return (v & 0xffffffffU) << 0U; 3218} 3219static inline u32 gr_gpcs_gcc_pagepool_r(void) 3220{ 3221 return 0x00419008U; 3222} 3223static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) 3224{ 3225 return (v & 0x3ffU) << 0U; 3226} 3227static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) 3228{ 3229 return 0x0041980cU; 3230} 3231static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) 3232{ 3233 return 0x10U; 3234} 3235static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) 3236{ 3237 return 0x00419848U; 3238} 3239static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) 3240{ 3241 return (v & 0xfffffffU) << 0U; 3242} 3243static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) 3244{ 3245 return (v & 0x1U) << 28U; 3246} 3247static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) 3248{ 3249 return 0x10000000U; 3250} 3251static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) 3252{ 3253 return 0x00419c00U; 3254} 3255static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) 3256{ 3257 return 0x0U; 3258} 3259static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) 3260{ 3261 return 0x8U; 3262} 3263static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) 3264{ 3265 return 0x00419c2cU; 3266} 3267static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) 3268{ 3269 return (v & 0xfffffffU) << 0U; 3270} 3271static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) 3272{ 3273 return (v & 0x1U) << 28U; 3274} 3275static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) 3276{ 3277 return 0x10000000U; 3278} 3279static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(void) 3280{ 3281 return 0x00419e44U; 3282} 3283static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_error_report_f(void) 3284{ 3285 return 0x2U; 3286} 3287static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_api_stack_error_report_f(void) 3288{ 3289 return 0x4U; 3290} 3291static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_ret_empty_stack_error_report_f(void) 3292{ 3293 return 0x8U; 3294} 3295static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_wrap_report_f(void) 3296{ 3297 return 0x10U; 3298} 3299static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_pc_report_f(void) 3300{ 3301 return 0x20U; 3302} 3303static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_overflow_report_f(void) 3304{ 3305 return 0x40U; 3306} 3307static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_immc_addr_report_f(void) 3308{ 3309 return 0x80U; 3310} 3311static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_reg_report_f(void) 3312{ 3313 return 0x100U; 3314} 3315static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) 3316{ 3317 return 0x200U; 3318} 3319static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_sph_instr_combo_report_f(void) 3320{ 3321 return 0x400U; 3322} 3323static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) 3324{ 3325 return 0x800U; 3326} 3327static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_report_f(void) 3328{ 3329 return 0x1000U; 3330} 3331static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_reg_report_f(void) 3332{ 3333 return 0x2000U; 3334} 3335static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_addr_report_f(void) 3336{ 3337 return 0x4000U; 3338} 3339static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_addr_report_f(void) 3340{ 3341 return 0x8000U; 3342} 3343static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) 3344{ 3345 return 0x10000U; 3346} 3347static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param2_report_f(void) 3348{ 3349 return 0x20000U; 3350} 3351static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) 3352{ 3353 return 0x40000U; 3354} 3355static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_mmu_fault_report_f(void) 3356{ 3357 return 0x800000U; 3358} 3359static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_overflow_report_f(void) 3360{ 3361 return 0x400000U; 3362} 3363static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_geometry_sm_error_report_f(void) 3364{ 3365 return 0x80000U; 3366} 3367static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_divergent_report_f(void) 3368{ 3369 return 0x100000U; 3370} 3371static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_report_mask_r(void) 3372{ 3373 return 0x00504644U; 3374} 3375static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r(void) 3376{ 3377 return 0x00419e4cU; 3378} 3379static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_sm_to_sm_fault_report_f(void) 3380{ 3381 return 0x1U; 3382} 3383static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_l1_error_report_f(void) 3384{ 3385 return 0x2U; 3386} 3387static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) 3388{ 3389 return 0x4U; 3390} 3391static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_physical_stack_overflow_error_report_f(void) 3392{ 3393 return 0x8U; 3394} 3395static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_int_report_f(void) 3396{ 3397 return 0x10U; 3398} 3399static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_ecc_sec_error_report_f(void) 3400{ 3401 return 0x20000000U; 3402} 3403static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_ecc_ded_error_report_f(void) 3404{ 3405 return 0x40000000U; 3406} 3407static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_pause_report_f(void) 3408{ 3409 return 0x20U; 3410} 3411static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_single_step_complete_report_f(void) 3412{ 3413 return 0x40U; 3414} 3415static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_report_mask_r(void) 3416{ 3417 return 0x0050464cU; 3418} 3419static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) 3420{ 3421 return 0x00419d0cU; 3422} 3423static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) 3424{ 3425 return 0x2U; 3426} 3427static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) 3428{ 3429 return 0x1U; 3430} 3431static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 3432{ 3433 return 0x0050450cU; 3434} 3435static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) 3436{ 3437 return (r >> 1U) & 0x1U; 3438} 3439static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) 3440{ 3441 return 0x2U; 3442} 3443static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) 3444{ 3445 return 0x0041ac94U; 3446} 3447static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) 3448{ 3449 return (v & 0xffU) << 16U; 3450} 3451static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) 3452{ 3453 return 0x00502c90U; 3454} 3455static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) 3456{ 3457 return (r >> 2U) & 0x1U; 3458} 3459static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) 3460{ 3461 return (r >> 16U) & 0xffU; 3462} 3463static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) 3464{ 3465 return 0x00000001U; 3466} 3467static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) 3468{ 3469 return 0x00504508U; 3470} 3471static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) 3472{ 3473 return (r >> 0U) & 0x1U; 3474} 3475static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) 3476{ 3477 return 0x00000001U; 3478} 3479static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) 3480{ 3481 return (r >> 1U) & 0x1U; 3482} 3483static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) 3484{ 3485 return 0x00000001U; 3486} 3487static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_r(void) 3488{ 3489 return 0x00504610U; 3490} 3491static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_m(void) 3492{ 3493 return 0x1U << 0U; 3494} 3495static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(u32 r) 3496{ 3497 return (r >> 0U) & 0x1U; 3498} 3499static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v(void) 3500{ 3501 return 0x00000001U; 3502} 3503static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_f(void) 3504{ 3505 return 0x1U; 3506} 3507static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_v(void) 3508{ 3509 return 0x00000000U; 3510} 3511static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_f(void) 3512{ 3513 return 0x0U; 3514} 3515static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(void) 3516{ 3517 return 0x80000000U; 3518} 3519static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_disable_f(void) 3520{ 3521 return 0x0U; 3522} 3523static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_run_trigger_task_f(void) 3524{ 3525 return 0x40000000U; 3526} 3527static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_m(void) 3528{ 3529 return 0x1U << 1U; 3530} 3531static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_v(u32 r) 3532{ 3533 return (r >> 1U) & 0x1U; 3534} 3535static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_disable_f(void) 3536{ 3537 return 0x0U; 3538} 3539static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_m(void) 3540{ 3541 return 0x1U << 2U; 3542} 3543static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_v(u32 r) 3544{ 3545 return (r >> 2U) & 0x1U; 3546} 3547static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_disable_f(void) 3548{ 3549 return 0x0U; 3550} 3551static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_r(void) 3552{ 3553 return 0x00504614U; 3554} 3555static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_1_r(void) 3556{ 3557 return 0x00504618U; 3558} 3559static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r(void) 3560{ 3561 return 0x00504624U; 3562} 3563static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_1_r(void) 3564{ 3565 return 0x00504628U; 3566} 3567static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r(void) 3568{ 3569 return 0x00504634U; 3570} 3571static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r(void) 3572{ 3573 return 0x00504638U; 3574} 3575static inline u32 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_r(void) 3576{ 3577 return 0x00419e24U; 3578} 3579static inline u32 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_stop_on_any_warp_disable_v(void) 3580{ 3581 return 0x00000000U; 3582} 3583static inline u32 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_stop_on_any_sm_disable_v(void) 3584{ 3585 return 0x00000000U; 3586} 3587static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_r(void) 3588{ 3589 return 0x0050460cU; 3590} 3591static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_sm_in_trap_mode_v(u32 r) 3592{ 3593 return (r >> 0U) & 0x1U; 3594} 3595static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(u32 r) 3596{ 3597 return (r >> 4U) & 0x1U; 3598} 3599static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v(void) 3600{ 3601 return 0x00000001U; 3602} 3603static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_r(void) 3604{ 3605 return 0x00419e50U; 3606} 3607static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_int_pending_f(void) 3608{ 3609 return 0x10U; 3610} 3611static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_pause_pending_f(void) 3612{ 3613 return 0x20U; 3614} 3615static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_single_step_complete_pending_f(void) 3616{ 3617 return 0x40U; 3618} 3619static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_r(void) 3620{ 3621 return 0x00504650U; 3622} 3623static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f(void) 3624{ 3625 return 0x10U; 3626} 3627static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_ecc_sec_error_pending_f(void) 3628{ 3629 return 0x20000000U; 3630} 3631static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_ecc_ded_error_pending_f(void) 3632{ 3633 return 0x40000000U; 3634} 3635static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f(void) 3636{ 3637 return 0x20U; 3638} 3639static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(void) 3640{ 3641 return 0x40U; 3642} 3643static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_r(void) 3644{ 3645 return 0x00504224U; 3646} 3647static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_intr_pending_f(void) 3648{ 3649 return 0x1U; 3650} 3651static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_r(void) 3652{ 3653 return 0x00504648U; 3654} 3655static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_v(u32 r) 3656{ 3657 return (r >> 0U) & 0xffffU; 3658} 3659static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v(void) 3660{ 3661 return 0x00000000U; 3662} 3663static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f(void) 3664{ 3665 return 0x0U; 3666} 3667static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) 3668{ 3669 return 0x00504770U; 3670} 3671static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) 3672{ 3673 return 0x00419f70U; 3674} 3675static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) 3676{ 3677 return 0x1U << 4U; 3678} 3679static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) 3680{ 3681 return (v & 0x1U) << 4U; 3682} 3683static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) 3684{ 3685 return 0x0050477cU; 3686} 3687static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) 3688{ 3689 return 0x00419f7cU; 3690} 3691static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) 3692{ 3693 return 0x1U << 0U; 3694} 3695static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) 3696{ 3697 return (v & 0x1U) << 0U; 3698} 3699static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) 3700{ 3701 return 0x0041be08U; 3702} 3703static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) 3704{ 3705 return 0x4U; 3706} 3707static inline u32 gr_ppcs_wwdx_map_gpc_map0_r(void) 3708{ 3709 return 0x0041bf00U; 3710} 3711static inline u32 gr_ppcs_wwdx_map_gpc_map1_r(void) 3712{ 3713 return 0x0041bf04U; 3714} 3715static inline u32 gr_ppcs_wwdx_map_gpc_map2_r(void) 3716{ 3717 return 0x0041bf08U; 3718} 3719static inline u32 gr_ppcs_wwdx_map_gpc_map3_r(void) 3720{ 3721 return 0x0041bf0cU; 3722} 3723static inline u32 gr_ppcs_wwdx_map_gpc_map4_r(void) 3724{ 3725 return 0x0041bf10U; 3726} 3727static inline u32 gr_ppcs_wwdx_map_gpc_map5_r(void) 3728{ 3729 return 0x0041bf14U; 3730} 3731static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) 3732{ 3733 return 0x0041bfd0U; 3734} 3735static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) 3736{ 3737 return (v & 0xffU) << 0U; 3738} 3739static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) 3740{ 3741 return (v & 0xffU) << 8U; 3742} 3743static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) 3744{ 3745 return (v & 0x1fU) << 16U; 3746} 3747static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) 3748{ 3749 return (v & 0x7U) << 21U; 3750} 3751static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff5_mod_value_f(u32 v) 3752{ 3753 return (v & 0x1fU) << 24U; 3754} 3755static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) 3756{ 3757 return 0x0041bfd4U; 3758} 3759static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) 3760{ 3761 return (v & 0xffffffU) << 0U; 3762} 3763static inline u32 gr_ppcs_wwdx_map_table_cfg2_r(void) 3764{ 3765 return 0x0041bfe4U; 3766} 3767static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff6_mod_value_f(u32 v) 3768{ 3769 return (v & 0x1fU) << 0U; 3770} 3771static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff7_mod_value_f(u32 v) 3772{ 3773 return (v & 0x1fU) << 5U; 3774} 3775static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff8_mod_value_f(u32 v) 3776{ 3777 return (v & 0x1fU) << 10U; 3778} 3779static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff9_mod_value_f(u32 v) 3780{ 3781 return (v & 0x1fU) << 15U; 3782} 3783static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff10_mod_value_f(u32 v) 3784{ 3785 return (v & 0x1fU) << 20U; 3786} 3787static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff11_mod_value_f(u32 v) 3788{ 3789 return (v & 0x1fU) << 25U; 3790} 3791static inline u32 gr_bes_zrop_settings_r(void) 3792{ 3793 return 0x00408850U; 3794} 3795static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) 3796{ 3797 return (v & 0xfU) << 0U; 3798} 3799static inline u32 gr_be0_crop_debug3_r(void) 3800{ 3801 return 0x00410108U; 3802} 3803static inline u32 gr_bes_crop_debug3_r(void) 3804{ 3805 return 0x00408908U; 3806} 3807static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) 3808{ 3809 return 0x1U << 31U; 3810} 3811static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void) 3812{ 3813 return 0x1U << 1U; 3814} 3815static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void) 3816{ 3817 return 0x0U; 3818} 3819static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void) 3820{ 3821 return 0x2U; 3822} 3823static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void) 3824{ 3825 return 0x1U << 2U; 3826} 3827static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void) 3828{ 3829 return 0x0U; 3830} 3831static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void) 3832{ 3833 return 0x4U; 3834} 3835static inline u32 gr_bes_crop_debug4_r(void) 3836{ 3837 return 0x0040894cU; 3838} 3839static inline u32 gr_bes_crop_debug4_clamp_fp_blend_m(void) 3840{ 3841 return 0x1U << 18U; 3842} 3843static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_inf_f(void) 3844{ 3845 return 0x0U; 3846} 3847static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_maxval_f(void) 3848{ 3849 return 0x40000U; 3850} 3851static inline u32 gr_bes_crop_settings_r(void) 3852{ 3853 return 0x00408958U; 3854} 3855static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) 3856{ 3857 return (v & 0xfU) << 0U; 3858} 3859static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) 3860{ 3861 return 0x00000020U; 3862} 3863static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) 3864{ 3865 return 0x00000020U; 3866} 3867static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) 3868{ 3869 return 0x000000c0U; 3870} 3871static inline u32 gr_zcull_subregion_qty_v(void) 3872{ 3873 return 0x00000010U; 3874} 3875static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel0_r(void) 3876{ 3877 return 0x00504604U; 3878} 3879static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel1_r(void) 3880{ 3881 return 0x00504608U; 3882} 3883static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r(void) 3884{ 3885 return 0x0050465cU; 3886} 3887static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control1_r(void) 3888{ 3889 return 0x00504660U; 3890} 3891static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control2_r(void) 3892{ 3893 return 0x00504664U; 3894} 3895static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control3_r(void) 3896{ 3897 return 0x00504668U; 3898} 3899static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control4_r(void) 3900{ 3901 return 0x0050466cU; 3902} 3903static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control5_r(void) 3904{ 3905 return 0x00504658U; 3906} 3907static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_control_r(void) 3908{ 3909 return 0x00504730U; 3910} 3911static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_control_r(void) 3912{ 3913 return 0x00504734U; 3914} 3915static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_control_r(void) 3916{ 3917 return 0x00504738U; 3918} 3919static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_control_r(void) 3920{ 3921 return 0x0050473cU; 3922} 3923static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_control_r(void) 3924{ 3925 return 0x00504740U; 3926} 3927static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_control_r(void) 3928{ 3929 return 0x00504744U; 3930} 3931static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_control_r(void) 3932{ 3933 return 0x00504748U; 3934} 3935static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_control_r(void) 3936{ 3937 return 0x0050474cU; 3938} 3939static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status_s1_r(void) 3940{ 3941 return 0x00504678U; 3942} 3943static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status1_r(void) 3944{ 3945 return 0x00504694U; 3946} 3947static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_s0_r(void) 3948{ 3949 return 0x005046f0U; 3950} 3951static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_s1_r(void) 3952{ 3953 return 0x00504700U; 3954} 3955static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_s0_r(void) 3956{ 3957 return 0x005046f4U; 3958} 3959static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_s1_r(void) 3960{ 3961 return 0x00504704U; 3962} 3963static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_s0_r(void) 3964{ 3965 return 0x005046f8U; 3966} 3967static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_s1_r(void) 3968{ 3969 return 0x00504708U; 3970} 3971static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_s0_r(void) 3972{ 3973 return 0x005046fcU; 3974} 3975static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_s1_r(void) 3976{ 3977 return 0x0050470cU; 3978} 3979static inline u32 gr_fe_pwr_mode_r(void) 3980{ 3981 return 0x00404170U; 3982} 3983static inline u32 gr_fe_pwr_mode_mode_auto_f(void) 3984{ 3985 return 0x0U; 3986} 3987static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) 3988{ 3989 return 0x2U; 3990} 3991static inline u32 gr_fe_pwr_mode_req_v(u32 r) 3992{ 3993 return (r >> 4U) & 0x1U; 3994} 3995static inline u32 gr_fe_pwr_mode_req_send_f(void) 3996{ 3997 return 0x10U; 3998} 3999static inline u32 gr_fe_pwr_mode_req_done_v(void) 4000{ 4001 return 0x00000000U; 4002} 4003static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) 4004{ 4005 return 0x00418880U; 4006} 4007static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) 4008{ 4009 return 0x1U << 0U; 4010} 4011static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) 4012{ 4013 return 0x1U << 11U; 4014} 4015static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) 4016{ 4017 return 0x1U << 1U; 4018} 4019static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) 4020{ 4021 return 0x1U << 2U; 4022} 4023static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) 4024{ 4025 return 0x3U << 3U; 4026} 4027static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) 4028{ 4029 return 0x3U << 5U; 4030} 4031static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) 4032{ 4033 return 0x3U << 28U; 4034} 4035static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) 4036{ 4037 return 0x1U << 30U; 4038} 4039static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) 4040{ 4041 return 0x1U << 31U; 4042} 4043static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) 4044{ 4045 return 0x00418890U; 4046} 4047static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) 4048{ 4049 return 0x00418894U; 4050} 4051static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) 4052{ 4053 return 0x004188b0U; 4054} 4055static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) 4056{ 4057 return (r >> 16U) & 0x1U; 4058} 4059static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) 4060{ 4061 return 0x00000001U; 4062} 4063static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) 4064{ 4065 return 0x004188b4U; 4066} 4067static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) 4068{ 4069 return 0x004188b8U; 4070} 4071static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) 4072{ 4073 return 0x004188acU; 4074} 4075static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_r(void) 4076{ 4077 return 0x00419e10U; 4078} 4079static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_f(u32 v) 4080{ 4081 return (v & 0x1U) << 0U; 4082} 4083static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_on_v(void) 4084{ 4085 return 0x00000001U; 4086} 4087static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_m(void) 4088{ 4089 return 0x1U << 31U; 4090} 4091static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_v(u32 r) 4092{ 4093 return (r >> 31U) & 0x1U; 4094} 4095static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(void) 4096{ 4097 return 0x80000000U; 4098} 4099static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_disable_f(void) 4100{ 4101 return 0x0U; 4102} 4103static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_m(void) 4104{ 4105 return 0x1U << 30U; 4106} 4107static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_v(u32 r) 4108{ 4109 return (r >> 30U) & 0x1U; 4110} 4111static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_task_f(void) 4112{ 4113 return 0x40000000U; 4114} 4115static inline u32 gr_fe_gfxp_wfi_timeout_r(void) 4116{ 4117 return 0x004041c0U; 4118} 4119static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v) 4120{ 4121 return (v & 0xffffffffU) << 0U; 4122} 4123static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void) 4124{ 4125 return 0x0U; 4126} 4127static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void) 4128{ 4129 return 0x00419c84U; 4130} 4131static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v) 4132{ 4133 return (v & 0x7U) << 8U; 4134} 4135static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void) 4136{ 4137 return 0x7U << 8U; 4138} 4139static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void) 4140{ 4141 return 0x100U; 4142} 4143static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void) 4144{ 4145 return 0x00419f78U; 4146} 4147static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void) 4148{ 4149 return 0x3U << 11U; 4150} 4151static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void) 4152{ 4153 return 0x1000U; 4154} 4155static inline u32 gr_gpcs_tc_debug0_r(void) 4156{ 4157 return 0x00418708U; 4158} 4159static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v) 4160{ 4161 return (v & 0x1ffU) << 0U; 4162} 4163static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void) 4164{ 4165 return 0x1ffU << 0U; 4166} 4167#endif
diff --git a/include/nvgpu/hw/gp106/hw_ltc_gp106.h b/include/nvgpu/hw/gp106/hw_ltc_gp106.h
deleted file mode 100644
index e4e87aa..0000000
--- a/include/nvgpu/hw/gp106/hw_ltc_gp106.h
+++ /dev/null
@@ -1,559 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ltc_gp106_h_ 57#define _hw_ltc_gp106_h_ 58 59static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) 60{ 61 return 0x0014046cU; 62} 63static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) 64{ 65 return 0x00140518U; 66} 67static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) 68{ 69 return 0x0017e318U; 70} 71static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) 72{ 73 return 0x1U << 15U; 74} 75static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) 76{ 77 return 0x00140494U; 78} 79static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) 80{ 81 return (r >> 0U) & 0xffffU; 82} 83static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) 84{ 85 return (r >> 16U) & 0x3U; 86} 87static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) 92{ 93 return 0x00000001U; 94} 95static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) 96{ 97 return 0x00000002U; 98} 99static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) 100{ 101 return 0x0017e26cU; 102} 103static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) 108{ 109 return 0x2U; 110} 111static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) 112{ 113 return (r >> 2U) & 0x1U; 114} 115static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) 116{ 117 return 0x00000001U; 118} 119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) 120{ 121 return 0x4U; 122} 123static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) 124{ 125 return 0x0014046cU; 126} 127static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) 128{ 129 return 0x0017e270U; 130} 131static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) 132{ 133 return (v & 0x3ffffU) << 0U; 134} 135static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) 136{ 137 return 0x0017e274U; 138} 139static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) 140{ 141 return (v & 0x3ffffU) << 0U; 142} 143static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) 144{ 145 return 0x0003ffffU; 146} 147static inline u32 ltc_ltcs_ltss_cbc_base_r(void) 148{ 149 return 0x0017e278U; 150} 151static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) 152{ 153 return 0x0000000bU; 154} 155static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) 156{ 157 return (r >> 0U) & 0x3ffffffU; 158} 159static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) 160{ 161 return 0x0017e27cU; 162} 163static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) 164{ 165 return 0x0017e000U; 166} 167static inline u32 ltc_ltcs_ltss_cbc_param_r(void) 168{ 169 return 0x0017e280U; 170} 171static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) 172{ 173 return (r >> 0U) & 0xffffU; 174} 175static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) 176{ 177 return (r >> 24U) & 0xfU; 178} 179static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) 180{ 181 return (r >> 28U) & 0xfU; 182} 183static inline u32 ltc_ltcs_ltss_cbc_param2_r(void) 184{ 185 return 0x0017e3f4U; 186} 187static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r) 188{ 189 return (r >> 0U) & 0xffffU; 190} 191static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) 192{ 193 return 0x0017e2acU; 194} 195static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) 196{ 197 return (v & 0x1fU) << 16U; 198} 199static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) 200{ 201 return 0x0017e338U; 202} 203static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) 204{ 205 return (v & 0xfU) << 0U; 206} 207static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) 208{ 209 return 0x0017e33cU + i*4U; 210} 211static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) 212{ 213 return 0x00000004U; 214} 215static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) 216{ 217 return 0x0017e34cU; 218} 219static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) 220{ 221 return 32U; 222} 223static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) 224{ 225 return (v & 0xffffffffU) << 0U; 226} 227static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) 228{ 229 return 0xffffffffU << 0U; 230} 231static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) 232{ 233 return (r >> 0U) & 0xffffffffU; 234} 235static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) 236{ 237 return 0x0017e2b0U; 238} 239static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) 240{ 241 return 0x10000000U; 242} 243static inline u32 ltc_ltcs_ltss_g_elpg_r(void) 244{ 245 return 0x0017e214U; 246} 247static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) 248{ 249 return (r >> 0U) & 0x1U; 250} 251static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) 252{ 253 return 0x00000001U; 254} 255static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) 256{ 257 return 0x1U; 258} 259static inline u32 ltc_ltc0_ltss_g_elpg_r(void) 260{ 261 return 0x00140214U; 262} 263static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) 264{ 265 return (r >> 0U) & 0x1U; 266} 267static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) 268{ 269 return 0x00000001U; 270} 271static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) 272{ 273 return 0x1U; 274} 275static inline u32 ltc_ltc1_ltss_g_elpg_r(void) 276{ 277 return 0x00142214U; 278} 279static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) 280{ 281 return (r >> 0U) & 0x1U; 282} 283static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) 284{ 285 return 0x00000001U; 286} 287static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) 288{ 289 return 0x1U; 290} 291static inline u32 ltc_ltcs_ltss_intr_r(void) 292{ 293 return 0x0017e20cU; 294} 295static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void) 296{ 297 return 0x100U; 298} 299static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void) 300{ 301 return 0x200U; 302} 303static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) 304{ 305 return 0x1U << 20U; 306} 307static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) 308{ 309 return 0x1U << 30U; 310} 311static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void) 312{ 313 return 0x1000000U; 314} 315static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void) 316{ 317 return 0x2000000U; 318} 319static inline u32 ltc_ltc0_lts0_intr_r(void) 320{ 321 return 0x0014040cU; 322} 323static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void) 324{ 325 return 0x0014051cU; 326} 327static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void) 328{ 329 return 0xffU << 0U; 330} 331static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r) 332{ 333 return (r >> 0U) & 0xffU; 334} 335static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void) 336{ 337 return 0xffU << 16U; 338} 339static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r) 340{ 341 return (r >> 16U) & 0xffU; 342} 343static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) 344{ 345 return 0x0017e2a0U; 346} 347static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) 348{ 349 return (r >> 0U) & 0x1U; 350} 351static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) 352{ 353 return 0x00000001U; 354} 355static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) 356{ 357 return 0x1U; 358} 359static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) 360{ 361 return (r >> 8U) & 0xfU; 362} 363static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) 364{ 365 return 0x00000003U; 366} 367static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) 368{ 369 return 0x300U; 370} 371static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) 372{ 373 return (r >> 28U) & 0x1U; 374} 375static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) 376{ 377 return 0x00000001U; 378} 379static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) 380{ 381 return 0x10000000U; 382} 383static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) 384{ 385 return (r >> 29U) & 0x1U; 386} 387static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) 388{ 389 return 0x00000001U; 390} 391static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) 392{ 393 return 0x20000000U; 394} 395static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) 396{ 397 return (r >> 30U) & 0x1U; 398} 399static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) 400{ 401 return 0x00000001U; 402} 403static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) 404{ 405 return 0x40000000U; 406} 407static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) 408{ 409 return 0x0017e2a4U; 410} 411static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) 412{ 413 return (r >> 0U) & 0x1U; 414} 415static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) 416{ 417 return 0x00000001U; 418} 419static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) 420{ 421 return 0x1U; 422} 423static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) 424{ 425 return (r >> 8U) & 0xfU; 426} 427static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) 428{ 429 return 0x00000003U; 430} 431static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) 432{ 433 return 0x300U; 434} 435static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) 436{ 437 return (r >> 16U) & 0x1U; 438} 439static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) 440{ 441 return 0x00000001U; 442} 443static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) 444{ 445 return 0x10000U; 446} 447static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) 448{ 449 return (r >> 28U) & 0x1U; 450} 451static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) 452{ 453 return 0x00000001U; 454} 455static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) 456{ 457 return 0x10000000U; 458} 459static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) 460{ 461 return (r >> 29U) & 0x1U; 462} 463static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) 464{ 465 return 0x00000001U; 466} 467static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) 468{ 469 return 0x20000000U; 470} 471static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) 472{ 473 return (r >> 30U) & 0x1U; 474} 475static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) 476{ 477 return 0x00000001U; 478} 479static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) 480{ 481 return 0x40000000U; 482} 483static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) 484{ 485 return 0x001402a0U; 486} 487static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) 488{ 489 return (r >> 0U) & 0x1U; 490} 491static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) 492{ 493 return 0x00000001U; 494} 495static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) 496{ 497 return 0x1U; 498} 499static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) 500{ 501 return 0x001402a4U; 502} 503static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) 504{ 505 return (r >> 0U) & 0x1U; 506} 507static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) 508{ 509 return 0x00000001U; 510} 511static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) 512{ 513 return 0x1U; 514} 515static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) 516{ 517 return 0x001422a0U; 518} 519static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) 520{ 521 return (r >> 0U) & 0x1U; 522} 523static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) 524{ 525 return 0x00000001U; 526} 527static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) 528{ 529 return 0x1U; 530} 531static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) 532{ 533 return 0x001422a4U; 534} 535static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) 536{ 537 return (r >> 0U) & 0x1U; 538} 539static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) 540{ 541 return 0x00000001U; 542} 543static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) 544{ 545 return 0x1U; 546} 547static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void) 548{ 549 return 0x0014058cU; 550} 551static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r) 552{ 553 return (r >> 0U) & 0xffffU; 554} 555static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r) 556{ 557 return (r >> 16U) & 0x1fU; 558} 559#endif
diff --git a/include/nvgpu/hw/gp106/hw_mc_gp106.h b/include/nvgpu/hw/gp106/hw_mc_gp106.h
deleted file mode 100644
index 349e2d7..0000000
--- a/include/nvgpu/hw/gp106/hw_mc_gp106.h
+++ /dev/null
@@ -1,251 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_mc_gp106_h_ 57#define _hw_mc_gp106_h_ 58 59static inline u32 mc_boot_0_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 mc_boot_0_architecture_v(u32 r) 64{ 65 return (r >> 24U) & 0x1fU; 66} 67static inline u32 mc_boot_0_implementation_v(u32 r) 68{ 69 return (r >> 20U) & 0xfU; 70} 71static inline u32 mc_boot_0_major_revision_v(u32 r) 72{ 73 return (r >> 4U) & 0xfU; 74} 75static inline u32 mc_boot_0_minor_revision_v(u32 r) 76{ 77 return (r >> 0U) & 0xfU; 78} 79static inline u32 mc_intr_r(u32 i) 80{ 81 return 0x00000100U + i*4U; 82} 83static inline u32 mc_intr_pfifo_pending_f(void) 84{ 85 return 0x100U; 86} 87static inline u32 mc_intr_replayable_fault_pending_f(void) 88{ 89 return 0x200U; 90} 91static inline u32 mc_intr_pgraph_pending_f(void) 92{ 93 return 0x1000U; 94} 95static inline u32 mc_intr_pmu_pending_f(void) 96{ 97 return 0x1000000U; 98} 99static inline u32 mc_intr_ltc_pending_f(void) 100{ 101 return 0x2000000U; 102} 103static inline u32 mc_intr_priv_ring_pending_f(void) 104{ 105 return 0x40000000U; 106} 107static inline u32 mc_intr_pbus_pending_f(void) 108{ 109 return 0x10000000U; 110} 111static inline u32 mc_intr_en_r(u32 i) 112{ 113 return 0x00000140U + i*4U; 114} 115static inline u32 mc_intr_en_set_r(u32 i) 116{ 117 return 0x00000160U + i*4U; 118} 119static inline u32 mc_intr_en_clear_r(u32 i) 120{ 121 return 0x00000180U + i*4U; 122} 123static inline u32 mc_enable_r(void) 124{ 125 return 0x00000200U; 126} 127static inline u32 mc_enable_xbar_enabled_f(void) 128{ 129 return 0x4U; 130} 131static inline u32 mc_enable_l2_enabled_f(void) 132{ 133 return 0x8U; 134} 135static inline u32 mc_enable_pmedia_s(void) 136{ 137 return 1U; 138} 139static inline u32 mc_enable_pmedia_f(u32 v) 140{ 141 return (v & 0x1U) << 4U; 142} 143static inline u32 mc_enable_pmedia_m(void) 144{ 145 return 0x1U << 4U; 146} 147static inline u32 mc_enable_pmedia_v(u32 r) 148{ 149 return (r >> 4U) & 0x1U; 150} 151static inline u32 mc_enable_priv_ring_enabled_f(void) 152{ 153 return 0x20U; 154} 155static inline u32 mc_enable_ce0_m(void) 156{ 157 return 0x1U << 6U; 158} 159static inline u32 mc_enable_pfifo_enabled_f(void) 160{ 161 return 0x100U; 162} 163static inline u32 mc_enable_pgraph_enabled_f(void) 164{ 165 return 0x1000U; 166} 167static inline u32 mc_enable_pwr_v(u32 r) 168{ 169 return (r >> 13U) & 0x1U; 170} 171static inline u32 mc_enable_pwr_disabled_v(void) 172{ 173 return 0x00000000U; 174} 175static inline u32 mc_enable_pwr_enabled_f(void) 176{ 177 return 0x2000U; 178} 179static inline u32 mc_enable_pfb_enabled_f(void) 180{ 181 return 0x100000U; 182} 183static inline u32 mc_enable_ce2_m(void) 184{ 185 return 0x1U << 21U; 186} 187static inline u32 mc_enable_ce2_enabled_f(void) 188{ 189 return 0x200000U; 190} 191static inline u32 mc_enable_blg_enabled_f(void) 192{ 193 return 0x8000000U; 194} 195static inline u32 mc_enable_perfmon_enabled_f(void) 196{ 197 return 0x10000000U; 198} 199static inline u32 mc_enable_hub_enabled_f(void) 200{ 201 return 0x20000000U; 202} 203static inline u32 mc_intr_ltc_r(void) 204{ 205 return 0x000001c0U; 206} 207static inline u32 mc_enable_pb_r(void) 208{ 209 return 0x00000204U; 210} 211static inline u32 mc_enable_pb_0_s(void) 212{ 213 return 1U; 214} 215static inline u32 mc_enable_pb_0_f(u32 v) 216{ 217 return (v & 0x1U) << 0U; 218} 219static inline u32 mc_enable_pb_0_m(void) 220{ 221 return 0x1U << 0U; 222} 223static inline u32 mc_enable_pb_0_v(u32 r) 224{ 225 return (r >> 0U) & 0x1U; 226} 227static inline u32 mc_enable_pb_0_enabled_v(void) 228{ 229 return 0x00000001U; 230} 231static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) 232{ 233 return (v & 0x1U) << (0U + i*1U); 234} 235static inline u32 mc_elpg_enable_r(void) 236{ 237 return 0x0000020cU; 238} 239static inline u32 mc_elpg_enable_xbar_enabled_f(void) 240{ 241 return 0x4U; 242} 243static inline u32 mc_elpg_enable_pfb_enabled_f(void) 244{ 245 return 0x100000U; 246} 247static inline u32 mc_elpg_enable_hub_enabled_f(void) 248{ 249 return 0x20000000U; 250} 251#endif
diff --git a/include/nvgpu/hw/gp106/hw_pbdma_gp106.h b/include/nvgpu/hw/gp106/hw_pbdma_gp106.h
deleted file mode 100644
index 1005c5a..0000000
--- a/include/nvgpu/hw/gp106/hw_pbdma_gp106.h
+++ /dev/null
@@ -1,535 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pbdma_gp106_h_ 57#define _hw_pbdma_gp106_h_ 58 59static inline u32 pbdma_gp_entry1_r(void) 60{ 61 return 0x10000004U; 62} 63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) 64{ 65 return (r >> 0U) & 0xffU; 66} 67static inline u32 pbdma_gp_entry1_length_f(u32 v) 68{ 69 return (v & 0x1fffffU) << 10U; 70} 71static inline u32 pbdma_gp_entry1_length_v(u32 r) 72{ 73 return (r >> 10U) & 0x1fffffU; 74} 75static inline u32 pbdma_gp_base_r(u32 i) 76{ 77 return 0x00040048U + i*8192U; 78} 79static inline u32 pbdma_gp_base__size_1_v(void) 80{ 81 return 0x00000004U; 82} 83static inline u32 pbdma_gp_base_offset_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 3U; 86} 87static inline u32 pbdma_gp_base_rsvd_s(void) 88{ 89 return 3U; 90} 91static inline u32 pbdma_gp_base_hi_r(u32 i) 92{ 93 return 0x0004004cU + i*8192U; 94} 95static inline u32 pbdma_gp_base_hi_offset_f(u32 v) 96{ 97 return (v & 0xffU) << 0U; 98} 99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) 100{ 101 return (v & 0x1fU) << 16U; 102} 103static inline u32 pbdma_gp_fetch_r(u32 i) 104{ 105 return 0x00040050U + i*8192U; 106} 107static inline u32 pbdma_gp_get_r(u32 i) 108{ 109 return 0x00040014U + i*8192U; 110} 111static inline u32 pbdma_gp_put_r(u32 i) 112{ 113 return 0x00040000U + i*8192U; 114} 115static inline u32 pbdma_pb_fetch_r(u32 i) 116{ 117 return 0x00040054U + i*8192U; 118} 119static inline u32 pbdma_pb_fetch_hi_r(u32 i) 120{ 121 return 0x00040058U + i*8192U; 122} 123static inline u32 pbdma_get_r(u32 i) 124{ 125 return 0x00040018U + i*8192U; 126} 127static inline u32 pbdma_get_hi_r(u32 i) 128{ 129 return 0x0004001cU + i*8192U; 130} 131static inline u32 pbdma_put_r(u32 i) 132{ 133 return 0x0004005cU + i*8192U; 134} 135static inline u32 pbdma_put_hi_r(u32 i) 136{ 137 return 0x00040060U + i*8192U; 138} 139static inline u32 pbdma_formats_r(u32 i) 140{ 141 return 0x0004009cU + i*8192U; 142} 143static inline u32 pbdma_formats_gp_fermi0_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 pbdma_formats_pb_fermi1_f(void) 148{ 149 return 0x100U; 150} 151static inline u32 pbdma_formats_mp_fermi0_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 pbdma_pb_header_r(u32 i) 156{ 157 return 0x00040084U + i*8192U; 158} 159static inline u32 pbdma_pb_header_priv_user_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 pbdma_pb_header_method_zero_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 pbdma_pb_header_subchannel_zero_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 pbdma_pb_header_level_main_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 pbdma_pb_header_first_true_f(void) 176{ 177 return 0x400000U; 178} 179static inline u32 pbdma_pb_header_type_inc_f(void) 180{ 181 return 0x20000000U; 182} 183static inline u32 pbdma_pb_header_type_non_inc_f(void) 184{ 185 return 0x60000000U; 186} 187static inline u32 pbdma_hdr_shadow_r(u32 i) 188{ 189 return 0x00040118U + i*8192U; 190} 191static inline u32 pbdma_gp_shadow_0_r(u32 i) 192{ 193 return 0x00040110U + i*8192U; 194} 195static inline u32 pbdma_gp_shadow_1_r(u32 i) 196{ 197 return 0x00040114U + i*8192U; 198} 199static inline u32 pbdma_subdevice_r(u32 i) 200{ 201 return 0x00040094U + i*8192U; 202} 203static inline u32 pbdma_subdevice_id_f(u32 v) 204{ 205 return (v & 0xfffU) << 0U; 206} 207static inline u32 pbdma_subdevice_status_active_f(void) 208{ 209 return 0x10000000U; 210} 211static inline u32 pbdma_subdevice_channel_dma_enable_f(void) 212{ 213 return 0x20000000U; 214} 215static inline u32 pbdma_method0_r(u32 i) 216{ 217 return 0x000400c0U + i*8192U; 218} 219static inline u32 pbdma_method0_fifo_size_v(void) 220{ 221 return 0x00000004U; 222} 223static inline u32 pbdma_method0_addr_f(u32 v) 224{ 225 return (v & 0xfffU) << 2U; 226} 227static inline u32 pbdma_method0_addr_v(u32 r) 228{ 229 return (r >> 2U) & 0xfffU; 230} 231static inline u32 pbdma_method0_subch_v(u32 r) 232{ 233 return (r >> 16U) & 0x7U; 234} 235static inline u32 pbdma_method0_first_true_f(void) 236{ 237 return 0x400000U; 238} 239static inline u32 pbdma_method0_valid_true_f(void) 240{ 241 return 0x80000000U; 242} 243static inline u32 pbdma_method1_r(u32 i) 244{ 245 return 0x000400c8U + i*8192U; 246} 247static inline u32 pbdma_method2_r(u32 i) 248{ 249 return 0x000400d0U + i*8192U; 250} 251static inline u32 pbdma_method3_r(u32 i) 252{ 253 return 0x000400d8U + i*8192U; 254} 255static inline u32 pbdma_data0_r(u32 i) 256{ 257 return 0x000400c4U + i*8192U; 258} 259static inline u32 pbdma_target_r(u32 i) 260{ 261 return 0x000400acU + i*8192U; 262} 263static inline u32 pbdma_target_engine_sw_f(void) 264{ 265 return 0x1fU; 266} 267static inline u32 pbdma_acquire_r(u32 i) 268{ 269 return 0x00040030U + i*8192U; 270} 271static inline u32 pbdma_acquire_retry_man_2_f(void) 272{ 273 return 0x2U; 274} 275static inline u32 pbdma_acquire_retry_exp_2_f(void) 276{ 277 return 0x100U; 278} 279static inline u32 pbdma_acquire_timeout_exp_max_f(void) 280{ 281 return 0x7800U; 282} 283static inline u32 pbdma_acquire_timeout_man_max_f(void) 284{ 285 return 0x7fff8000U; 286} 287static inline u32 pbdma_acquire_timeout_en_disable_f(void) 288{ 289 return 0x0U; 290} 291static inline u32 pbdma_status_r(u32 i) 292{ 293 return 0x00040100U + i*8192U; 294} 295static inline u32 pbdma_channel_r(u32 i) 296{ 297 return 0x00040120U + i*8192U; 298} 299static inline u32 pbdma_signature_r(u32 i) 300{ 301 return 0x00040010U + i*8192U; 302} 303static inline u32 pbdma_signature_hw_valid_f(void) 304{ 305 return 0xfaceU; 306} 307static inline u32 pbdma_signature_sw_zero_f(void) 308{ 309 return 0x0U; 310} 311static inline u32 pbdma_userd_r(u32 i) 312{ 313 return 0x00040008U + i*8192U; 314} 315static inline u32 pbdma_userd_target_vid_mem_f(void) 316{ 317 return 0x0U; 318} 319static inline u32 pbdma_userd_target_sys_mem_coh_f(void) 320{ 321 return 0x2U; 322} 323static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) 324{ 325 return 0x3U; 326} 327static inline u32 pbdma_userd_addr_f(u32 v) 328{ 329 return (v & 0x7fffffU) << 9U; 330} 331static inline u32 pbdma_userd_hi_r(u32 i) 332{ 333 return 0x0004000cU + i*8192U; 334} 335static inline u32 pbdma_userd_hi_addr_f(u32 v) 336{ 337 return (v & 0xffU) << 0U; 338} 339static inline u32 pbdma_config_r(u32 i) 340{ 341 return 0x000400f4U + i*8192U; 342} 343static inline u32 pbdma_config_auth_level_privileged_f(void) 344{ 345 return 0x100U; 346} 347static inline u32 pbdma_hce_ctrl_r(u32 i) 348{ 349 return 0x000400e4U + i*8192U; 350} 351static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) 352{ 353 return 0x20U; 354} 355static inline u32 pbdma_intr_0_r(u32 i) 356{ 357 return 0x00040108U + i*8192U; 358} 359static inline u32 pbdma_intr_0_memreq_v(u32 r) 360{ 361 return (r >> 0U) & 0x1U; 362} 363static inline u32 pbdma_intr_0_memreq_pending_f(void) 364{ 365 return 0x1U; 366} 367static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) 368{ 369 return 0x2U; 370} 371static inline u32 pbdma_intr_0_memack_extra_pending_f(void) 372{ 373 return 0x4U; 374} 375static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) 376{ 377 return 0x8U; 378} 379static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) 380{ 381 return 0x10U; 382} 383static inline u32 pbdma_intr_0_memflush_pending_f(void) 384{ 385 return 0x20U; 386} 387static inline u32 pbdma_intr_0_memop_pending_f(void) 388{ 389 return 0x40U; 390} 391static inline u32 pbdma_intr_0_lbconnect_pending_f(void) 392{ 393 return 0x80U; 394} 395static inline u32 pbdma_intr_0_lbreq_pending_f(void) 396{ 397 return 0x100U; 398} 399static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) 400{ 401 return 0x200U; 402} 403static inline u32 pbdma_intr_0_lback_extra_pending_f(void) 404{ 405 return 0x400U; 406} 407static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) 408{ 409 return 0x800U; 410} 411static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) 412{ 413 return 0x1000U; 414} 415static inline u32 pbdma_intr_0_gpfifo_pending_f(void) 416{ 417 return 0x2000U; 418} 419static inline u32 pbdma_intr_0_gpptr_pending_f(void) 420{ 421 return 0x4000U; 422} 423static inline u32 pbdma_intr_0_gpentry_pending_f(void) 424{ 425 return 0x8000U; 426} 427static inline u32 pbdma_intr_0_gpcrc_pending_f(void) 428{ 429 return 0x10000U; 430} 431static inline u32 pbdma_intr_0_pbptr_pending_f(void) 432{ 433 return 0x20000U; 434} 435static inline u32 pbdma_intr_0_pbentry_pending_f(void) 436{ 437 return 0x40000U; 438} 439static inline u32 pbdma_intr_0_pbcrc_pending_f(void) 440{ 441 return 0x80000U; 442} 443static inline u32 pbdma_intr_0_xbarconnect_pending_f(void) 444{ 445 return 0x100000U; 446} 447static inline u32 pbdma_intr_0_method_pending_f(void) 448{ 449 return 0x200000U; 450} 451static inline u32 pbdma_intr_0_methodcrc_pending_f(void) 452{ 453 return 0x400000U; 454} 455static inline u32 pbdma_intr_0_device_pending_f(void) 456{ 457 return 0x800000U; 458} 459static inline u32 pbdma_intr_0_semaphore_pending_f(void) 460{ 461 return 0x2000000U; 462} 463static inline u32 pbdma_intr_0_acquire_pending_f(void) 464{ 465 return 0x4000000U; 466} 467static inline u32 pbdma_intr_0_pri_pending_f(void) 468{ 469 return 0x8000000U; 470} 471static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) 472{ 473 return 0x20000000U; 474} 475static inline u32 pbdma_intr_0_pbseg_pending_f(void) 476{ 477 return 0x40000000U; 478} 479static inline u32 pbdma_intr_0_signature_pending_f(void) 480{ 481 return 0x80000000U; 482} 483static inline u32 pbdma_intr_1_r(u32 i) 484{ 485 return 0x00040148U + i*8192U; 486} 487static inline u32 pbdma_intr_en_0_r(u32 i) 488{ 489 return 0x0004010cU + i*8192U; 490} 491static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) 492{ 493 return 0x100U; 494} 495static inline u32 pbdma_intr_en_1_r(u32 i) 496{ 497 return 0x0004014cU + i*8192U; 498} 499static inline u32 pbdma_intr_stall_r(u32 i) 500{ 501 return 0x0004013cU + i*8192U; 502} 503static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) 504{ 505 return 0x100U; 506} 507static inline u32 pbdma_intr_stall_1_r(u32 i) 508{ 509 return 0x00040140U + i*8192U; 510} 511static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void) 512{ 513 return 0x1U; 514} 515static inline u32 pbdma_udma_nop_r(void) 516{ 517 return 0x00000008U; 518} 519static inline u32 pbdma_runlist_timeslice_r(u32 i) 520{ 521 return 0x000400f8U + i*8192U; 522} 523static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) 524{ 525 return 0x80U; 526} 527static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) 528{ 529 return 0x3000U; 530} 531static inline u32 pbdma_runlist_timeslice_enable_true_f(void) 532{ 533 return 0x10000000U; 534} 535#endif
diff --git a/include/nvgpu/hw/gp106/hw_perf_gp106.h b/include/nvgpu/hw/gp106/hw_perf_gp106.h
deleted file mode 100644
index 334cd20..0000000
--- a/include/nvgpu/hw/gp106/hw_perf_gp106.h
+++ /dev/null
@@ -1,219 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_perf_gp106_h_ 57#define _hw_perf_gp106_h_ 58 59static inline u32 perf_pmmsys_base_v(void) 60{ 61 return 0x001b0000U; 62} 63static inline u32 perf_pmmsys_extent_v(void) 64{ 65 return 0x001b0fffU; 66} 67static inline u32 perf_pmasys_control_r(void) 68{ 69 return 0x001b4000U; 70} 71static inline u32 perf_pmasys_control_membuf_status_v(u32 r) 72{ 73 return (r >> 4U) & 0x1U; 74} 75static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) 76{ 77 return 0x00000001U; 78} 79static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) 84{ 85 return (v & 0x1U) << 5U; 86} 87static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) 88{ 89 return (r >> 5U) & 0x1U; 90} 91static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) 92{ 93 return 0x00000001U; 94} 95static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) 96{ 97 return 0x20U; 98} 99static inline u32 perf_pmasys_mem_block_r(void) 100{ 101 return 0x001b4070U; 102} 103static inline u32 perf_pmasys_mem_block_base_f(u32 v) 104{ 105 return (v & 0xfffffffU) << 0U; 106} 107static inline u32 perf_pmasys_mem_block_target_f(u32 v) 108{ 109 return (v & 0x3U) << 28U; 110} 111static inline u32 perf_pmasys_mem_block_target_v(u32 r) 112{ 113 return (r >> 28U) & 0x3U; 114} 115static inline u32 perf_pmasys_mem_block_target_lfb_v(void) 116{ 117 return 0x00000000U; 118} 119static inline u32 perf_pmasys_mem_block_target_lfb_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) 128{ 129 return 0x20000000U; 130} 131static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) 132{ 133 return 0x00000003U; 134} 135static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) 136{ 137 return 0x30000000U; 138} 139static inline u32 perf_pmasys_mem_block_valid_f(u32 v) 140{ 141 return (v & 0x1U) << 31U; 142} 143static inline u32 perf_pmasys_mem_block_valid_v(u32 r) 144{ 145 return (r >> 31U) & 0x1U; 146} 147static inline u32 perf_pmasys_mem_block_valid_true_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 perf_pmasys_mem_block_valid_true_f(void) 152{ 153 return 0x80000000U; 154} 155static inline u32 perf_pmasys_mem_block_valid_false_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 perf_pmasys_mem_block_valid_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 perf_pmasys_outbase_r(void) 164{ 165 return 0x001b4074U; 166} 167static inline u32 perf_pmasys_outbase_ptr_f(u32 v) 168{ 169 return (v & 0x7ffffffU) << 5U; 170} 171static inline u32 perf_pmasys_outbaseupper_r(void) 172{ 173 return 0x001b4078U; 174} 175static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) 176{ 177 return (v & 0xffU) << 0U; 178} 179static inline u32 perf_pmasys_outsize_r(void) 180{ 181 return 0x001b407cU; 182} 183static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) 184{ 185 return (v & 0x7ffffffU) << 5U; 186} 187static inline u32 perf_pmasys_mem_bytes_r(void) 188{ 189 return 0x001b4084U; 190} 191static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) 192{ 193 return (v & 0xfffffffU) << 4U; 194} 195static inline u32 perf_pmasys_mem_bump_r(void) 196{ 197 return 0x001b4088U; 198} 199static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) 200{ 201 return (v & 0xfffffffU) << 4U; 202} 203static inline u32 perf_pmasys_enginestatus_r(void) 204{ 205 return 0x001b40a4U; 206} 207static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) 208{ 209 return (v & 0x1U) << 4U; 210} 211static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) 216{ 217 return 0x10U; 218} 219#endif
diff --git a/include/nvgpu/hw/gp106/hw_pram_gp106.h b/include/nvgpu/hw/gp106/hw_pram_gp106.h
deleted file mode 100644
index 7e33e71..0000000
--- a/include/nvgpu/hw/gp106/hw_pram_gp106.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pram_gp106_h_ 57#define _hw_pram_gp106_h_ 58 59static inline u32 pram_data032_r(u32 i) 60{ 61 return 0x00700000U + i*4U; 62} 63#endif
diff --git a/include/nvgpu/hw/gp106/hw_pri_ringmaster_gp106.h b/include/nvgpu/hw/gp106/hw_pri_ringmaster_gp106.h
deleted file mode 100644
index efdedc3..0000000
--- a/include/nvgpu/hw/gp106/hw_pri_ringmaster_gp106.h
+++ /dev/null
@@ -1,151 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringmaster_gp106_h_ 57#define _hw_pri_ringmaster_gp106_h_ 58 59static inline u32 pri_ringmaster_command_r(void) 60{ 61 return 0x0012004cU; 62} 63static inline u32 pri_ringmaster_command_cmd_m(void) 64{ 65 return 0x3fU << 0U; 66} 67static inline u32 pri_ringmaster_command_cmd_v(u32 r) 68{ 69 return (r >> 0U) & 0x3fU; 70} 71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 pri_ringmaster_command_data_r(void) 92{ 93 return 0x00120048U; 94} 95static inline u32 pri_ringmaster_start_results_r(void) 96{ 97 return 0x00120050U; 98} 99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 pri_ringmaster_intr_status0_r(void) 108{ 109 return 0x00120058U; 110} 111static inline u32 pri_ringmaster_intr_status1_r(void) 112{ 113 return 0x0012005cU; 114} 115static inline u32 pri_ringmaster_global_ctl_r(void) 116{ 117 return 0x00120060U; 118} 119static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) 120{ 121 return 0x1U; 122} 123static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 pri_ringmaster_enum_fbp_r(void) 128{ 129 return 0x00120074U; 130} 131static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) 132{ 133 return (r >> 0U) & 0x1fU; 134} 135static inline u32 pri_ringmaster_enum_gpc_r(void) 136{ 137 return 0x00120078U; 138} 139static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) 140{ 141 return (r >> 0U) & 0x1fU; 142} 143static inline u32 pri_ringmaster_enum_ltc_r(void) 144{ 145 return 0x0012006cU; 146} 147static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) 148{ 149 return (r >> 0U) & 0x1fU; 150} 151#endif
diff --git a/include/nvgpu/hw/gp106/hw_pri_ringstation_gpc_gp106.h b/include/nvgpu/hw/gp106/hw_pri_ringstation_gpc_gp106.h
deleted file mode 100644
index 711938d..0000000
--- a/include/nvgpu/hw/gp106/hw_pri_ringstation_gpc_gp106.h
+++ /dev/null
@@ -1,79 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_gpc_gp106_h_ 57#define _hw_pri_ringstation_gpc_gp106_h_ 58 59static inline u32 pri_ringstation_gpc_master_config_r(u32 i) 60{ 61 return 0x00128300U + i*4U; 62} 63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) 64{ 65 return 0x00128120U; 66} 67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) 68{ 69 return 0x00128124U; 70} 71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) 72{ 73 return 0x00128128U; 74} 75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) 76{ 77 return 0x0012812cU; 78} 79#endif
diff --git a/include/nvgpu/hw/gp106/hw_pri_ringstation_sys_gp106.h b/include/nvgpu/hw/gp106/hw_pri_ringstation_sys_gp106.h
deleted file mode 100644
index a3a1447..0000000
--- a/include/nvgpu/hw/gp106/hw_pri_ringstation_sys_gp106.h
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_sys_gp106_h_ 57#define _hw_pri_ringstation_sys_gp106_h_ 58 59static inline u32 pri_ringstation_sys_master_config_r(u32 i) 60{ 61 return 0x00122300U + i*4U; 62} 63static inline u32 pri_ringstation_sys_decode_config_r(void) 64{ 65 return 0x00122204U; 66} 67static inline u32 pri_ringstation_sys_decode_config_ring_m(void) 68{ 69 return 0x7U << 0U; 70} 71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 pri_ringstation_sys_priv_error_adr_r(void) 76{ 77 return 0x00122120U; 78} 79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) 80{ 81 return 0x00122124U; 82} 83static inline u32 pri_ringstation_sys_priv_error_info_r(void) 84{ 85 return 0x00122128U; 86} 87static inline u32 pri_ringstation_sys_priv_error_code_r(void) 88{ 89 return 0x0012212cU; 90} 91#endif
diff --git a/include/nvgpu/hw/gp106/hw_proj_gp106.h b/include/nvgpu/hw/gp106/hw_proj_gp106.h
deleted file mode 100644
index 866bc7b..0000000
--- a/include/nvgpu/hw/gp106/hw_proj_gp106.h
+++ /dev/null
@@ -1,179 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_proj_gp106_h_ 57#define _hw_proj_gp106_h_ 58 59static inline u32 proj_gpc_base_v(void) 60{ 61 return 0x00500000U; 62} 63static inline u32 proj_gpc_shared_base_v(void) 64{ 65 return 0x00418000U; 66} 67static inline u32 proj_gpc_stride_v(void) 68{ 69 return 0x00008000U; 70} 71static inline u32 proj_gpc_priv_stride_v(void) 72{ 73 return 0x00000800U; 74} 75static inline u32 proj_ltc_stride_v(void) 76{ 77 return 0x00002000U; 78} 79static inline u32 proj_lts_stride_v(void) 80{ 81 return 0x00000200U; 82} 83static inline u32 proj_fbpa_base_v(void) 84{ 85 return 0x00900000U; 86} 87static inline u32 proj_fbpa_shared_base_v(void) 88{ 89 return 0x009a0000U; 90} 91static inline u32 proj_fbpa_stride_v(void) 92{ 93 return 0x00004000U; 94} 95static inline u32 proj_ppc_in_gpc_base_v(void) 96{ 97 return 0x00003000U; 98} 99static inline u32 proj_ppc_in_gpc_shared_base_v(void) 100{ 101 return 0x00003e00U; 102} 103static inline u32 proj_ppc_in_gpc_stride_v(void) 104{ 105 return 0x00000200U; 106} 107static inline u32 proj_rop_base_v(void) 108{ 109 return 0x00410000U; 110} 111static inline u32 proj_rop_shared_base_v(void) 112{ 113 return 0x00408800U; 114} 115static inline u32 proj_rop_stride_v(void) 116{ 117 return 0x00000400U; 118} 119static inline u32 proj_tpc_in_gpc_base_v(void) 120{ 121 return 0x00004000U; 122} 123static inline u32 proj_tpc_in_gpc_stride_v(void) 124{ 125 return 0x00000800U; 126} 127static inline u32 proj_tpc_in_gpc_shared_base_v(void) 128{ 129 return 0x00001800U; 130} 131static inline u32 proj_host_num_engines_v(void) 132{ 133 return 0x00000009U; 134} 135static inline u32 proj_host_num_pbdma_v(void) 136{ 137 return 0x00000004U; 138} 139static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) 140{ 141 return 0x00000005U; 142} 143static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) 144{ 145 return 0x00000001U; 146} 147static inline u32 proj_scal_litter_num_fbps_v(void) 148{ 149 return 0x00000006U; 150} 151static inline u32 proj_scal_litter_num_fbpas_v(void) 152{ 153 return 0x00000006U; 154} 155static inline u32 proj_scal_litter_num_gpcs_v(void) 156{ 157 return 0x00000006U; 158} 159static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) 160{ 161 return 0x00000003U; 162} 163static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) 164{ 165 return 0x00000002U; 166} 167static inline u32 proj_scal_litter_num_zcull_banks_v(void) 168{ 169 return 0x00000004U; 170} 171static inline u32 proj_scal_max_gpcs_v(void) 172{ 173 return 0x00000020U; 174} 175static inline u32 proj_scal_max_tpc_per_gpc_v(void) 176{ 177 return 0x00000008U; 178} 179#endif
diff --git a/include/nvgpu/hw/gp106/hw_psec_gp106.h b/include/nvgpu/hw/gp106/hw_psec_gp106.h
deleted file mode 100644
index b91c09b..0000000
--- a/include/nvgpu/hw/gp106/hw_psec_gp106.h
+++ /dev/null
@@ -1,615 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_psec_gp106_h_ 57#define _hw_psec_gp106_h_ 58 59static inline u32 psec_falcon_irqsset_r(void) 60{ 61 return 0x00087000U; 62} 63static inline u32 psec_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 psec_falcon_irqsclr_r(void) 68{ 69 return 0x00087004U; 70} 71static inline u32 psec_falcon_irqstat_r(void) 72{ 73 return 0x00087008U; 74} 75static inline u32 psec_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 psec_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 psec_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 psec_falcon_irqmode_r(void) 88{ 89 return 0x0008700cU; 90} 91static inline u32 psec_falcon_irqmset_r(void) 92{ 93 return 0x00087010U; 94} 95static inline u32 psec_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 psec_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 psec_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 psec_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 psec_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 psec_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 psec_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 psec_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 psec_falcon_irqmclr_r(void) 128{ 129 return 0x00087014U; 130} 131static inline u32 psec_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 psec_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 psec_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 psec_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 psec_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 psec_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 psec_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 psec_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 psec_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 psec_falcon_irqmask_r(void) 168{ 169 return 0x00087018U; 170} 171static inline u32 psec_falcon_irqdest_r(void) 172{ 173 return 0x0008701cU; 174} 175static inline u32 psec_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 psec_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 psec_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 psec_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 psec_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 psec_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 psec_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 psec_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 psec_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 psec_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 psec_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 psec_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 psec_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 psec_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 psec_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 psec_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 psec_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 psec_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 psec_falcon_curctx_r(void) 248{ 249 return 0x00087050U; 250} 251static inline u32 psec_falcon_nxtctx_r(void) 252{ 253 return 0x00087054U; 254} 255static inline u32 psec_falcon_mailbox0_r(void) 256{ 257 return 0x00087040U; 258} 259static inline u32 psec_falcon_mailbox1_r(void) 260{ 261 return 0x00087044U; 262} 263static inline u32 psec_falcon_itfen_r(void) 264{ 265 return 0x00087048U; 266} 267static inline u32 psec_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 psec_falcon_idlestate_r(void) 272{ 273 return 0x0008704cU; 274} 275static inline u32 psec_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 psec_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 psec_falcon_os_r(void) 284{ 285 return 0x00087080U; 286} 287static inline u32 psec_falcon_engctl_r(void) 288{ 289 return 0x000870a4U; 290} 291static inline u32 psec_falcon_cpuctl_r(void) 292{ 293 return 0x00087100U; 294} 295static inline u32 psec_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 psec_falcon_cpuctl_halt_intr_f(u32 v) 300{ 301 return (v & 0x1U) << 4U; 302} 303static inline u32 psec_falcon_cpuctl_halt_intr_m(void) 304{ 305 return 0x1U << 4U; 306} 307static inline u32 psec_falcon_cpuctl_halt_intr_v(u32 r) 308{ 309 return (r >> 4U) & 0x1U; 310} 311static inline u32 psec_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 312{ 313 return (v & 0x1U) << 6U; 314} 315static inline u32 psec_falcon_cpuctl_cpuctl_alias_en_m(void) 316{ 317 return 0x1U << 6U; 318} 319static inline u32 psec_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 320{ 321 return (r >> 6U) & 0x1U; 322} 323static inline u32 psec_falcon_cpuctl_alias_r(void) 324{ 325 return 0x00087130U; 326} 327static inline u32 psec_falcon_cpuctl_alias_startcpu_f(u32 v) 328{ 329 return (v & 0x1U) << 1U; 330} 331static inline u32 psec_falcon_imemc_r(u32 i) 332{ 333 return 0x00087180U + i*16U; 334} 335static inline u32 psec_falcon_imemc_offs_f(u32 v) 336{ 337 return (v & 0x3fU) << 2U; 338} 339static inline u32 psec_falcon_imemc_blk_f(u32 v) 340{ 341 return (v & 0xffU) << 8U; 342} 343static inline u32 psec_falcon_imemc_aincw_f(u32 v) 344{ 345 return (v & 0x1U) << 24U; 346} 347static inline u32 psec_falcon_imemd_r(u32 i) 348{ 349 return 0x00087184U + i*16U; 350} 351static inline u32 psec_falcon_imemt_r(u32 i) 352{ 353 return 0x00087188U + i*16U; 354} 355static inline u32 psec_falcon_sctl_r(void) 356{ 357 return 0x00087240U; 358} 359static inline u32 psec_falcon_mmu_phys_sec_r(void) 360{ 361 return 0x00100ce4U; 362} 363static inline u32 psec_falcon_bootvec_r(void) 364{ 365 return 0x00087104U; 366} 367static inline u32 psec_falcon_bootvec_vec_f(u32 v) 368{ 369 return (v & 0xffffffffU) << 0U; 370} 371static inline u32 psec_falcon_dmactl_r(void) 372{ 373 return 0x0008710cU; 374} 375static inline u32 psec_falcon_dmactl_dmem_scrubbing_m(void) 376{ 377 return 0x1U << 1U; 378} 379static inline u32 psec_falcon_dmactl_imem_scrubbing_m(void) 380{ 381 return 0x1U << 2U; 382} 383static inline u32 psec_falcon_dmactl_require_ctx_f(u32 v) 384{ 385 return (v & 0x1U) << 0U; 386} 387static inline u32 psec_falcon_hwcfg_r(void) 388{ 389 return 0x00087108U; 390} 391static inline u32 psec_falcon_hwcfg_imem_size_v(u32 r) 392{ 393 return (r >> 0U) & 0x1ffU; 394} 395static inline u32 psec_falcon_hwcfg_dmem_size_v(u32 r) 396{ 397 return (r >> 9U) & 0x1ffU; 398} 399static inline u32 psec_falcon_dmatrfbase_r(void) 400{ 401 return 0x00087110U; 402} 403static inline u32 psec_falcon_dmatrfbase1_r(void) 404{ 405 return 0x00087128U; 406} 407static inline u32 psec_falcon_dmatrfmoffs_r(void) 408{ 409 return 0x00087114U; 410} 411static inline u32 psec_falcon_dmatrfcmd_r(void) 412{ 413 return 0x00087118U; 414} 415static inline u32 psec_falcon_dmatrfcmd_imem_f(u32 v) 416{ 417 return (v & 0x1U) << 4U; 418} 419static inline u32 psec_falcon_dmatrfcmd_write_f(u32 v) 420{ 421 return (v & 0x1U) << 5U; 422} 423static inline u32 psec_falcon_dmatrfcmd_size_f(u32 v) 424{ 425 return (v & 0x7U) << 8U; 426} 427static inline u32 psec_falcon_dmatrfcmd_ctxdma_f(u32 v) 428{ 429 return (v & 0x7U) << 12U; 430} 431static inline u32 psec_falcon_dmatrffboffs_r(void) 432{ 433 return 0x0008711cU; 434} 435static inline u32 psec_falcon_exterraddr_r(void) 436{ 437 return 0x00087168U; 438} 439static inline u32 psec_falcon_exterrstat_r(void) 440{ 441 return 0x0008716cU; 442} 443static inline u32 psec_falcon_exterrstat_valid_m(void) 444{ 445 return 0x1U << 31U; 446} 447static inline u32 psec_falcon_exterrstat_valid_v(u32 r) 448{ 449 return (r >> 31U) & 0x1U; 450} 451static inline u32 psec_falcon_exterrstat_valid_true_v(void) 452{ 453 return 0x00000001U; 454} 455static inline u32 psec_sec2_falcon_icd_cmd_r(void) 456{ 457 return 0x00087200U; 458} 459static inline u32 psec_sec2_falcon_icd_cmd_opc_s(void) 460{ 461 return 4U; 462} 463static inline u32 psec_sec2_falcon_icd_cmd_opc_f(u32 v) 464{ 465 return (v & 0xfU) << 0U; 466} 467static inline u32 psec_sec2_falcon_icd_cmd_opc_m(void) 468{ 469 return 0xfU << 0U; 470} 471static inline u32 psec_sec2_falcon_icd_cmd_opc_v(u32 r) 472{ 473 return (r >> 0U) & 0xfU; 474} 475static inline u32 psec_sec2_falcon_icd_cmd_opc_rreg_f(void) 476{ 477 return 0x8U; 478} 479static inline u32 psec_sec2_falcon_icd_cmd_opc_rstat_f(void) 480{ 481 return 0xeU; 482} 483static inline u32 psec_sec2_falcon_icd_cmd_idx_f(u32 v) 484{ 485 return (v & 0x1fU) << 8U; 486} 487static inline u32 psec_sec2_falcon_icd_rdata_r(void) 488{ 489 return 0x0008720cU; 490} 491static inline u32 psec_falcon_dmemc_r(u32 i) 492{ 493 return 0x000871c0U + i*8U; 494} 495static inline u32 psec_falcon_dmemc_offs_f(u32 v) 496{ 497 return (v & 0x3fU) << 2U; 498} 499static inline u32 psec_falcon_dmemc_offs_m(void) 500{ 501 return 0x3fU << 2U; 502} 503static inline u32 psec_falcon_dmemc_blk_f(u32 v) 504{ 505 return (v & 0xffU) << 8U; 506} 507static inline u32 psec_falcon_dmemc_blk_m(void) 508{ 509 return 0xffU << 8U; 510} 511static inline u32 psec_falcon_dmemc_aincw_f(u32 v) 512{ 513 return (v & 0x1U) << 24U; 514} 515static inline u32 psec_falcon_dmemc_aincr_f(u32 v) 516{ 517 return (v & 0x1U) << 25U; 518} 519static inline u32 psec_falcon_dmemd_r(u32 i) 520{ 521 return 0x000871c4U + i*8U; 522} 523static inline u32 psec_falcon_debug1_r(void) 524{ 525 return 0x00087090U; 526} 527static inline u32 psec_falcon_debug1_ctxsw_mode_s(void) 528{ 529 return 1U; 530} 531static inline u32 psec_falcon_debug1_ctxsw_mode_f(u32 v) 532{ 533 return (v & 0x1U) << 16U; 534} 535static inline u32 psec_falcon_debug1_ctxsw_mode_m(void) 536{ 537 return 0x1U << 16U; 538} 539static inline u32 psec_falcon_debug1_ctxsw_mode_v(u32 r) 540{ 541 return (r >> 16U) & 0x1U; 542} 543static inline u32 psec_falcon_debug1_ctxsw_mode_init_f(void) 544{ 545 return 0x0U; 546} 547static inline u32 psec_fbif_transcfg_r(u32 i) 548{ 549 return 0x00087600U + i*4U; 550} 551static inline u32 psec_fbif_transcfg_target_local_fb_f(void) 552{ 553 return 0x0U; 554} 555static inline u32 psec_fbif_transcfg_target_coherent_sysmem_f(void) 556{ 557 return 0x1U; 558} 559static inline u32 psec_fbif_transcfg_target_noncoherent_sysmem_f(void) 560{ 561 return 0x2U; 562} 563static inline u32 psec_fbif_transcfg_mem_type_s(void) 564{ 565 return 1U; 566} 567static inline u32 psec_fbif_transcfg_mem_type_f(u32 v) 568{ 569 return (v & 0x1U) << 2U; 570} 571static inline u32 psec_fbif_transcfg_mem_type_m(void) 572{ 573 return 0x1U << 2U; 574} 575static inline u32 psec_fbif_transcfg_mem_type_v(u32 r) 576{ 577 return (r >> 2U) & 0x1U; 578} 579static inline u32 psec_fbif_transcfg_mem_type_virtual_f(void) 580{ 581 return 0x0U; 582} 583static inline u32 psec_fbif_transcfg_mem_type_physical_f(void) 584{ 585 return 0x4U; 586} 587static inline u32 psec_falcon_engine_r(void) 588{ 589 return 0x000873c0U; 590} 591static inline u32 psec_falcon_engine_reset_true_f(void) 592{ 593 return 0x1U; 594} 595static inline u32 psec_falcon_engine_reset_false_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 psec_fbif_ctl_r(void) 600{ 601 return 0x00087624U; 602} 603static inline u32 psec_fbif_ctl_allow_phys_no_ctx_init_f(void) 604{ 605 return 0x0U; 606} 607static inline u32 psec_fbif_ctl_allow_phys_no_ctx_disallow_f(void) 608{ 609 return 0x0U; 610} 611static inline u32 psec_fbif_ctl_allow_phys_no_ctx_allow_f(void) 612{ 613 return 0x80U; 614} 615#endif
diff --git a/include/nvgpu/hw/gp106/hw_pwr_gp106.h b/include/nvgpu/hw/gp106/hw_pwr_gp106.h
deleted file mode 100644
index 2e75fa6..0000000
--- a/include/nvgpu/hw/gp106/hw_pwr_gp106.h
+++ /dev/null
@@ -1,895 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pwr_gp106_h_ 57#define _hw_pwr_gp106_h_ 58 59static inline u32 pwr_falcon_irqsset_r(void) 60{ 61 return 0x0010a000U; 62} 63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pwr_falcon_irqsclr_r(void) 68{ 69 return 0x0010a004U; 70} 71static inline u32 pwr_falcon_irqstat_r(void) 72{ 73 return 0x0010a008U; 74} 75static inline u32 pwr_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pwr_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pwr_falcon_irqmode_r(void) 88{ 89 return 0x0010a00cU; 90} 91static inline u32 pwr_falcon_irqmset_r(void) 92{ 93 return 0x0010a010U; 94} 95static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 pwr_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 pwr_falcon_irqmclr_r(void) 128{ 129 return 0x0010a014U; 130} 131static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 pwr_falcon_irqmask_r(void) 168{ 169 return 0x0010a018U; 170} 171static inline u32 pwr_falcon_irqdest_r(void) 172{ 173 return 0x0010a01cU; 174} 175static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 pwr_falcon_curctx_r(void) 248{ 249 return 0x0010a050U; 250} 251static inline u32 pwr_falcon_nxtctx_r(void) 252{ 253 return 0x0010a054U; 254} 255static inline u32 pwr_falcon_mailbox0_r(void) 256{ 257 return 0x0010a040U; 258} 259static inline u32 pwr_falcon_mailbox1_r(void) 260{ 261 return 0x0010a044U; 262} 263static inline u32 pwr_falcon_itfen_r(void) 264{ 265 return 0x0010a048U; 266} 267static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 pwr_falcon_idlestate_r(void) 272{ 273 return 0x0010a04cU; 274} 275static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 pwr_falcon_os_r(void) 284{ 285 return 0x0010a080U; 286} 287static inline u32 pwr_falcon_engctl_r(void) 288{ 289 return 0x0010a0a4U; 290} 291static inline u32 pwr_falcon_cpuctl_r(void) 292{ 293 return 0x0010a100U; 294} 295static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) 300{ 301 return (v & 0x1U) << 4U; 302} 303static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) 304{ 305 return 0x1U << 4U; 306} 307static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) 308{ 309 return (r >> 4U) & 0x1U; 310} 311static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 312{ 313 return (v & 0x1U) << 6U; 314} 315static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) 316{ 317 return 0x1U << 6U; 318} 319static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 320{ 321 return (r >> 6U) & 0x1U; 322} 323static inline u32 pwr_falcon_cpuctl_alias_r(void) 324{ 325 return 0x0010a130U; 326} 327static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) 328{ 329 return (v & 0x1U) << 1U; 330} 331static inline u32 pwr_pmu_scpctl_stat_r(void) 332{ 333 return 0x0010ac08U; 334} 335static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) 336{ 337 return (v & 0x1U) << 20U; 338} 339static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) 340{ 341 return 0x1U << 20U; 342} 343static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) 344{ 345 return (r >> 20U) & 0x1U; 346} 347static inline u32 pwr_falcon_imemc_r(u32 i) 348{ 349 return 0x0010a180U + i*16U; 350} 351static inline u32 pwr_falcon_imemc_offs_f(u32 v) 352{ 353 return (v & 0x3fU) << 2U; 354} 355static inline u32 pwr_falcon_imemc_blk_f(u32 v) 356{ 357 return (v & 0xffU) << 8U; 358} 359static inline u32 pwr_falcon_imemc_aincw_f(u32 v) 360{ 361 return (v & 0x1U) << 24U; 362} 363static inline u32 pwr_falcon_imemd_r(u32 i) 364{ 365 return 0x0010a184U + i*16U; 366} 367static inline u32 pwr_falcon_imemt_r(u32 i) 368{ 369 return 0x0010a188U + i*16U; 370} 371static inline u32 pwr_falcon_sctl_r(void) 372{ 373 return 0x0010a240U; 374} 375static inline u32 pwr_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 pwr_falcon_bootvec_r(void) 380{ 381 return 0x0010a104U; 382} 383static inline u32 pwr_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 pwr_falcon_dmactl_r(void) 388{ 389 return 0x0010a10cU; 390} 391static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 pwr_falcon_dmactl_require_ctx_f(u32 v) 400{ 401 return (v & 0x1U) << 0U; 402} 403static inline u32 pwr_falcon_hwcfg_r(void) 404{ 405 return 0x0010a108U; 406} 407static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) 408{ 409 return (r >> 0U) & 0x1ffU; 410} 411static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) 412{ 413 return (r >> 9U) & 0x1ffU; 414} 415static inline u32 pwr_falcon_dmatrfbase_r(void) 416{ 417 return 0x0010a110U; 418} 419static inline u32 pwr_falcon_dmatrfbase1_r(void) 420{ 421 return 0x0010a128U; 422} 423static inline u32 pwr_falcon_dmatrfmoffs_r(void) 424{ 425 return 0x0010a114U; 426} 427static inline u32 pwr_falcon_dmatrfcmd_r(void) 428{ 429 return 0x0010a118U; 430} 431static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) 432{ 433 return (v & 0x1U) << 4U; 434} 435static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) 436{ 437 return (v & 0x1U) << 5U; 438} 439static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) 440{ 441 return (v & 0x7U) << 8U; 442} 443static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) 444{ 445 return (v & 0x7U) << 12U; 446} 447static inline u32 pwr_falcon_dmatrffboffs_r(void) 448{ 449 return 0x0010a11cU; 450} 451static inline u32 pwr_falcon_exterraddr_r(void) 452{ 453 return 0x0010a168U; 454} 455static inline u32 pwr_falcon_exterrstat_r(void) 456{ 457 return 0x0010a16cU; 458} 459static inline u32 pwr_falcon_exterrstat_valid_m(void) 460{ 461 return 0x1U << 31U; 462} 463static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) 464{ 465 return (r >> 31U) & 0x1U; 466} 467static inline u32 pwr_falcon_exterrstat_valid_true_v(void) 468{ 469 return 0x00000001U; 470} 471static inline u32 pwr_pmu_falcon_icd_cmd_r(void) 472{ 473 return 0x0010a200U; 474} 475static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) 476{ 477 return 4U; 478} 479static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) 480{ 481 return (v & 0xfU) << 0U; 482} 483static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) 484{ 485 return 0xfU << 0U; 486} 487static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) 488{ 489 return (r >> 0U) & 0xfU; 490} 491static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) 492{ 493 return 0x8U; 494} 495static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) 496{ 497 return 0xeU; 498} 499static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) 500{ 501 return (v & 0x1fU) << 8U; 502} 503static inline u32 pwr_pmu_falcon_icd_rdata_r(void) 504{ 505 return 0x0010a20cU; 506} 507static inline u32 pwr_falcon_dmemc_r(u32 i) 508{ 509 return 0x0010a1c0U + i*8U; 510} 511static inline u32 pwr_falcon_dmemc_offs_f(u32 v) 512{ 513 return (v & 0x3fU) << 2U; 514} 515static inline u32 pwr_falcon_dmemc_offs_m(void) 516{ 517 return 0x3fU << 2U; 518} 519static inline u32 pwr_falcon_dmemc_blk_f(u32 v) 520{ 521 return (v & 0xffU) << 8U; 522} 523static inline u32 pwr_falcon_dmemc_blk_m(void) 524{ 525 return 0xffU << 8U; 526} 527static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) 528{ 529 return (v & 0x1U) << 24U; 530} 531static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) 532{ 533 return (v & 0x1U) << 25U; 534} 535static inline u32 pwr_falcon_dmemd_r(u32 i) 536{ 537 return 0x0010a1c4U + i*8U; 538} 539static inline u32 pwr_pmu_new_instblk_r(void) 540{ 541 return 0x0010a480U; 542} 543static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) 544{ 545 return (v & 0xfffffffU) << 0U; 546} 547static inline u32 pwr_pmu_new_instblk_target_fb_f(void) 548{ 549 return 0x0U; 550} 551static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) 552{ 553 return 0x20000000U; 554} 555static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) 556{ 557 return 0x30000000U; 558} 559static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) 560{ 561 return (v & 0x1U) << 30U; 562} 563static inline u32 pwr_pmu_mutex_id_r(void) 564{ 565 return 0x0010a488U; 566} 567static inline u32 pwr_pmu_mutex_id_value_v(u32 r) 568{ 569 return (r >> 0U) & 0xffU; 570} 571static inline u32 pwr_pmu_mutex_id_value_init_v(void) 572{ 573 return 0x00000000U; 574} 575static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) 576{ 577 return 0x000000ffU; 578} 579static inline u32 pwr_pmu_mutex_id_release_r(void) 580{ 581 return 0x0010a48cU; 582} 583static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) 584{ 585 return (v & 0xffU) << 0U; 586} 587static inline u32 pwr_pmu_mutex_id_release_value_m(void) 588{ 589 return 0xffU << 0U; 590} 591static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) 592{ 593 return 0x00000000U; 594} 595static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 pwr_pmu_mutex_r(u32 i) 600{ 601 return 0x0010a580U + i*4U; 602} 603static inline u32 pwr_pmu_mutex__size_1_v(void) 604{ 605 return 0x00000010U; 606} 607static inline u32 pwr_pmu_mutex_value_f(u32 v) 608{ 609 return (v & 0xffU) << 0U; 610} 611static inline u32 pwr_pmu_mutex_value_v(u32 r) 612{ 613 return (r >> 0U) & 0xffU; 614} 615static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) 616{ 617 return 0x0U; 618} 619static inline u32 pwr_pmu_queue_head_r(u32 i) 620{ 621 return 0x0010a4a0U + i*4U; 622} 623static inline u32 pwr_pmu_queue_head__size_1_v(void) 624{ 625 return 0x00000004U; 626} 627static inline u32 pwr_pmu_queue_head_address_f(u32 v) 628{ 629 return (v & 0xffffffffU) << 0U; 630} 631static inline u32 pwr_pmu_queue_head_address_v(u32 r) 632{ 633 return (r >> 0U) & 0xffffffffU; 634} 635static inline u32 pwr_pmu_queue_tail_r(u32 i) 636{ 637 return 0x0010a4b0U + i*4U; 638} 639static inline u32 pwr_pmu_queue_tail__size_1_v(void) 640{ 641 return 0x00000004U; 642} 643static inline u32 pwr_pmu_queue_tail_address_f(u32 v) 644{ 645 return (v & 0xffffffffU) << 0U; 646} 647static inline u32 pwr_pmu_queue_tail_address_v(u32 r) 648{ 649 return (r >> 0U) & 0xffffffffU; 650} 651static inline u32 pwr_pmu_msgq_head_r(void) 652{ 653 return 0x0010a4c8U; 654} 655static inline u32 pwr_pmu_msgq_head_val_f(u32 v) 656{ 657 return (v & 0xffffffffU) << 0U; 658} 659static inline u32 pwr_pmu_msgq_head_val_v(u32 r) 660{ 661 return (r >> 0U) & 0xffffffffU; 662} 663static inline u32 pwr_pmu_msgq_tail_r(void) 664{ 665 return 0x0010a4ccU; 666} 667static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) 668{ 669 return (v & 0xffffffffU) << 0U; 670} 671static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) 672{ 673 return (r >> 0U) & 0xffffffffU; 674} 675static inline u32 pwr_pmu_idle_mask_r(u32 i) 676{ 677 return 0x0010a504U + i*16U; 678} 679static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) 680{ 681 return 0x1U; 682} 683static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) 684{ 685 return 0x200000U; 686} 687static inline u32 pwr_pmu_idle_count_r(u32 i) 688{ 689 return 0x0010a508U + i*16U; 690} 691static inline u32 pwr_pmu_idle_count_value_f(u32 v) 692{ 693 return (v & 0x7fffffffU) << 0U; 694} 695static inline u32 pwr_pmu_idle_count_value_v(u32 r) 696{ 697 return (r >> 0U) & 0x7fffffffU; 698} 699static inline u32 pwr_pmu_idle_count_reset_f(u32 v) 700{ 701 return (v & 0x1U) << 31U; 702} 703static inline u32 pwr_pmu_idle_ctrl_r(u32 i) 704{ 705 return 0x0010a50cU + i*16U; 706} 707static inline u32 pwr_pmu_idle_ctrl_value_m(void) 708{ 709 return 0x3U << 0U; 710} 711static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) 712{ 713 return 0x2U; 714} 715static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) 716{ 717 return 0x3U; 718} 719static inline u32 pwr_pmu_idle_ctrl_filter_m(void) 720{ 721 return 0x1U << 2U; 722} 723static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) 724{ 725 return 0x0U; 726} 727static inline u32 pwr_pmu_idle_threshold_r(u32 i) 728{ 729 return 0x0010a8a0U + i*4U; 730} 731static inline u32 pwr_pmu_idle_threshold_value_f(u32 v) 732{ 733 return (v & 0x7fffffffU) << 0U; 734} 735static inline u32 pwr_pmu_idle_intr_r(void) 736{ 737 return 0x0010a9e8U; 738} 739static inline u32 pwr_pmu_idle_intr_en_f(u32 v) 740{ 741 return (v & 0x1U) << 0U; 742} 743static inline u32 pwr_pmu_idle_intr_en_disabled_v(void) 744{ 745 return 0x00000000U; 746} 747static inline u32 pwr_pmu_idle_intr_en_enabled_v(void) 748{ 749 return 0x00000001U; 750} 751static inline u32 pwr_pmu_idle_intr_status_r(void) 752{ 753 return 0x0010a9ecU; 754} 755static inline u32 pwr_pmu_idle_intr_status_intr_f(u32 v) 756{ 757 return (v & 0x1U) << 0U; 758} 759static inline u32 pwr_pmu_idle_intr_status_intr_m(void) 760{ 761 return U32(0x1U) << 0U; 762} 763static inline u32 pwr_pmu_idle_intr_status_intr_v(u32 r) 764{ 765 return (r >> 0U) & 0x1U; 766} 767static inline u32 pwr_pmu_idle_intr_status_intr_pending_v(void) 768{ 769 return 0x00000001U; 770} 771static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void) 772{ 773 return 0x00000001U; 774} 775static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) 776{ 777 return 0x0010a9f0U + i*8U; 778} 779static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) 780{ 781 return 0x0010a9f4U + i*8U; 782} 783static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) 784{ 785 return 0x0010aa30U + i*8U; 786} 787static inline u32 pwr_pmu_debug_r(u32 i) 788{ 789 return 0x0010a5c0U + i*4U; 790} 791static inline u32 pwr_pmu_debug__size_1_v(void) 792{ 793 return 0x00000004U; 794} 795static inline u32 pwr_pmu_mailbox_r(u32 i) 796{ 797 return 0x0010a450U + i*4U; 798} 799static inline u32 pwr_pmu_mailbox__size_1_v(void) 800{ 801 return 0x0000000cU; 802} 803static inline u32 pwr_pmu_bar0_addr_r(void) 804{ 805 return 0x0010a7a0U; 806} 807static inline u32 pwr_pmu_bar0_data_r(void) 808{ 809 return 0x0010a7a4U; 810} 811static inline u32 pwr_pmu_bar0_ctl_r(void) 812{ 813 return 0x0010a7acU; 814} 815static inline u32 pwr_pmu_bar0_timeout_r(void) 816{ 817 return 0x0010a7a8U; 818} 819static inline u32 pwr_pmu_bar0_fecs_error_r(void) 820{ 821 return 0x0010a988U; 822} 823static inline u32 pwr_pmu_bar0_error_status_r(void) 824{ 825 return 0x0010a7b0U; 826} 827static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) 828{ 829 return 0x0010a6c0U + i*4U; 830} 831static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) 832{ 833 return 0x0010a6e8U + i*4U; 834} 835static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) 836{ 837 return 0x0010a710U + i*4U; 838} 839static inline u32 pwr_pmu_pg_intren_r(u32 i) 840{ 841 return 0x0010a760U + i*4U; 842} 843static inline u32 pwr_fbif_transcfg_r(u32 i) 844{ 845 return 0x0010ae00U + i*4U; 846} 847static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) 848{ 849 return 0x0U; 850} 851static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) 852{ 853 return 0x1U; 854} 855static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) 856{ 857 return 0x2U; 858} 859static inline u32 pwr_fbif_transcfg_mem_type_s(void) 860{ 861 return 1U; 862} 863static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) 864{ 865 return (v & 0x1U) << 2U; 866} 867static inline u32 pwr_fbif_transcfg_mem_type_m(void) 868{ 869 return 0x1U << 2U; 870} 871static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) 872{ 873 return (r >> 2U) & 0x1U; 874} 875static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) 876{ 877 return 0x0U; 878} 879static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) 880{ 881 return 0x4U; 882} 883static inline u32 pwr_falcon_engine_r(void) 884{ 885 return 0x0010a3c0U; 886} 887static inline u32 pwr_falcon_engine_reset_true_f(void) 888{ 889 return 0x1U; 890} 891static inline u32 pwr_falcon_engine_reset_false_f(void) 892{ 893 return 0x0U; 894} 895#endif
diff --git a/include/nvgpu/hw/gp106/hw_ram_gp106.h b/include/nvgpu/hw/gp106/hw_ram_gp106.h
deleted file mode 100644
index 1de8aa2..0000000
--- a/include/nvgpu/hw/gp106/hw_ram_gp106.h
+++ /dev/null
@@ -1,507 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ram_gp106_h_ 57#define _hw_ram_gp106_h_ 58 59static inline u32 ram_in_ramfc_s(void) 60{ 61 return 4096U; 62} 63static inline u32 ram_in_ramfc_w(void) 64{ 65 return 0U; 66} 67static inline u32 ram_in_page_dir_base_target_f(u32 v) 68{ 69 return (v & 0x3U) << 0U; 70} 71static inline u32 ram_in_page_dir_base_target_w(void) 72{ 73 return 128U; 74} 75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 ram_in_page_dir_base_vol_w(void) 88{ 89 return 128U; 90} 91static inline u32 ram_in_page_dir_base_vol_true_f(void) 92{ 93 return 0x4U; 94} 95static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v) 96{ 97 return (v & 0x1U) << 4U; 98} 99static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void) 100{ 101 return 0x1U << 4U; 102} 103static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void) 104{ 105 return 128U; 106} 107static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void) 108{ 109 return 0x10U; 110} 111static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v) 112{ 113 return (v & 0x1U) << 5U; 114} 115static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void) 116{ 117 return 0x1U << 5U; 118} 119static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void) 120{ 121 return 128U; 122} 123static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void) 124{ 125 return 0x20U; 126} 127static inline u32 ram_in_use_ver2_pt_format_f(u32 v) 128{ 129 return (v & 0x1U) << 10U; 130} 131static inline u32 ram_in_use_ver2_pt_format_m(void) 132{ 133 return 0x1U << 10U; 134} 135static inline u32 ram_in_use_ver2_pt_format_w(void) 136{ 137 return 128U; 138} 139static inline u32 ram_in_use_ver2_pt_format_true_f(void) 140{ 141 return 0x400U; 142} 143static inline u32 ram_in_use_ver2_pt_format_false_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 ram_in_big_page_size_f(u32 v) 148{ 149 return (v & 0x1U) << 11U; 150} 151static inline u32 ram_in_big_page_size_m(void) 152{ 153 return 0x1U << 11U; 154} 155static inline u32 ram_in_big_page_size_w(void) 156{ 157 return 128U; 158} 159static inline u32 ram_in_big_page_size_128kb_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 ram_in_big_page_size_64kb_f(void) 164{ 165 return 0x800U; 166} 167static inline u32 ram_in_page_dir_base_lo_f(u32 v) 168{ 169 return (v & 0xfffffU) << 12U; 170} 171static inline u32 ram_in_page_dir_base_lo_w(void) 172{ 173 return 128U; 174} 175static inline u32 ram_in_page_dir_base_hi_f(u32 v) 176{ 177 return (v & 0xffffffffU) << 0U; 178} 179static inline u32 ram_in_page_dir_base_hi_w(void) 180{ 181 return 129U; 182} 183static inline u32 ram_in_adr_limit_lo_f(u32 v) 184{ 185 return (v & 0xfffffU) << 12U; 186} 187static inline u32 ram_in_adr_limit_lo_w(void) 188{ 189 return 130U; 190} 191static inline u32 ram_in_adr_limit_hi_f(u32 v) 192{ 193 return (v & 0xffffffffU) << 0U; 194} 195static inline u32 ram_in_adr_limit_hi_w(void) 196{ 197 return 131U; 198} 199static inline u32 ram_in_engine_cs_w(void) 200{ 201 return 132U; 202} 203static inline u32 ram_in_engine_cs_wfi_v(void) 204{ 205 return 0x00000000U; 206} 207static inline u32 ram_in_engine_cs_wfi_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 ram_in_engine_cs_fg_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 ram_in_engine_cs_fg_f(void) 216{ 217 return 0x8U; 218} 219static inline u32 ram_in_gr_cs_w(void) 220{ 221 return 132U; 222} 223static inline u32 ram_in_gr_cs_wfi_f(void) 224{ 225 return 0x0U; 226} 227static inline u32 ram_in_gr_wfi_target_w(void) 228{ 229 return 132U; 230} 231static inline u32 ram_in_gr_wfi_mode_w(void) 232{ 233 return 132U; 234} 235static inline u32 ram_in_gr_wfi_mode_physical_v(void) 236{ 237 return 0x00000000U; 238} 239static inline u32 ram_in_gr_wfi_mode_physical_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 ram_in_gr_wfi_mode_virtual_v(void) 244{ 245 return 0x00000001U; 246} 247static inline u32 ram_in_gr_wfi_mode_virtual_f(void) 248{ 249 return 0x4U; 250} 251static inline u32 ram_in_gr_wfi_ptr_lo_f(u32 v) 252{ 253 return (v & 0xfffffU) << 12U; 254} 255static inline u32 ram_in_gr_wfi_ptr_lo_w(void) 256{ 257 return 132U; 258} 259static inline u32 ram_in_gr_wfi_ptr_hi_f(u32 v) 260{ 261 return (v & 0xffU) << 0U; 262} 263static inline u32 ram_in_gr_wfi_ptr_hi_w(void) 264{ 265 return 133U; 266} 267static inline u32 ram_in_base_shift_v(void) 268{ 269 return 0x0000000cU; 270} 271static inline u32 ram_in_alloc_size_v(void) 272{ 273 return 0x00001000U; 274} 275static inline u32 ram_fc_size_val_v(void) 276{ 277 return 0x00000200U; 278} 279static inline u32 ram_fc_gp_put_w(void) 280{ 281 return 0U; 282} 283static inline u32 ram_fc_userd_w(void) 284{ 285 return 2U; 286} 287static inline u32 ram_fc_userd_hi_w(void) 288{ 289 return 3U; 290} 291static inline u32 ram_fc_signature_w(void) 292{ 293 return 4U; 294} 295static inline u32 ram_fc_gp_get_w(void) 296{ 297 return 5U; 298} 299static inline u32 ram_fc_pb_get_w(void) 300{ 301 return 6U; 302} 303static inline u32 ram_fc_pb_get_hi_w(void) 304{ 305 return 7U; 306} 307static inline u32 ram_fc_pb_top_level_get_w(void) 308{ 309 return 8U; 310} 311static inline u32 ram_fc_pb_top_level_get_hi_w(void) 312{ 313 return 9U; 314} 315static inline u32 ram_fc_acquire_w(void) 316{ 317 return 12U; 318} 319static inline u32 ram_fc_semaphorea_w(void) 320{ 321 return 14U; 322} 323static inline u32 ram_fc_semaphoreb_w(void) 324{ 325 return 15U; 326} 327static inline u32 ram_fc_semaphorec_w(void) 328{ 329 return 16U; 330} 331static inline u32 ram_fc_semaphored_w(void) 332{ 333 return 17U; 334} 335static inline u32 ram_fc_gp_base_w(void) 336{ 337 return 18U; 338} 339static inline u32 ram_fc_gp_base_hi_w(void) 340{ 341 return 19U; 342} 343static inline u32 ram_fc_gp_fetch_w(void) 344{ 345 return 20U; 346} 347static inline u32 ram_fc_pb_fetch_w(void) 348{ 349 return 21U; 350} 351static inline u32 ram_fc_pb_fetch_hi_w(void) 352{ 353 return 22U; 354} 355static inline u32 ram_fc_pb_put_w(void) 356{ 357 return 23U; 358} 359static inline u32 ram_fc_pb_put_hi_w(void) 360{ 361 return 24U; 362} 363static inline u32 ram_fc_pb_header_w(void) 364{ 365 return 33U; 366} 367static inline u32 ram_fc_pb_count_w(void) 368{ 369 return 34U; 370} 371static inline u32 ram_fc_subdevice_w(void) 372{ 373 return 37U; 374} 375static inline u32 ram_fc_formats_w(void) 376{ 377 return 39U; 378} 379static inline u32 ram_fc_target_w(void) 380{ 381 return 43U; 382} 383static inline u32 ram_fc_hce_ctrl_w(void) 384{ 385 return 57U; 386} 387static inline u32 ram_fc_chid_w(void) 388{ 389 return 58U; 390} 391static inline u32 ram_fc_chid_id_f(u32 v) 392{ 393 return (v & 0xfffU) << 0U; 394} 395static inline u32 ram_fc_chid_id_w(void) 396{ 397 return 0U; 398} 399static inline u32 ram_fc_config_w(void) 400{ 401 return 61U; 402} 403static inline u32 ram_fc_runlist_timeslice_w(void) 404{ 405 return 62U; 406} 407static inline u32 ram_userd_base_shift_v(void) 408{ 409 return 0x00000009U; 410} 411static inline u32 ram_userd_chan_size_v(void) 412{ 413 return 0x00000200U; 414} 415static inline u32 ram_userd_put_w(void) 416{ 417 return 16U; 418} 419static inline u32 ram_userd_get_w(void) 420{ 421 return 17U; 422} 423static inline u32 ram_userd_ref_w(void) 424{ 425 return 18U; 426} 427static inline u32 ram_userd_put_hi_w(void) 428{ 429 return 19U; 430} 431static inline u32 ram_userd_ref_threshold_w(void) 432{ 433 return 20U; 434} 435static inline u32 ram_userd_top_level_get_w(void) 436{ 437 return 22U; 438} 439static inline u32 ram_userd_top_level_get_hi_w(void) 440{ 441 return 23U; 442} 443static inline u32 ram_userd_get_hi_w(void) 444{ 445 return 24U; 446} 447static inline u32 ram_userd_gp_get_w(void) 448{ 449 return 34U; 450} 451static inline u32 ram_userd_gp_put_w(void) 452{ 453 return 35U; 454} 455static inline u32 ram_userd_gp_top_level_get_w(void) 456{ 457 return 22U; 458} 459static inline u32 ram_userd_gp_top_level_get_hi_w(void) 460{ 461 return 23U; 462} 463static inline u32 ram_rl_entry_size_v(void) 464{ 465 return 0x00000008U; 466} 467static inline u32 ram_rl_entry_chid_f(u32 v) 468{ 469 return (v & 0xfffU) << 0U; 470} 471static inline u32 ram_rl_entry_id_f(u32 v) 472{ 473 return (v & 0xfffU) << 0U; 474} 475static inline u32 ram_rl_entry_type_f(u32 v) 476{ 477 return (v & 0x1U) << 13U; 478} 479static inline u32 ram_rl_entry_type_chid_f(void) 480{ 481 return 0x0U; 482} 483static inline u32 ram_rl_entry_type_tsg_f(void) 484{ 485 return 0x2000U; 486} 487static inline u32 ram_rl_entry_timeslice_scale_f(u32 v) 488{ 489 return (v & 0xfU) << 14U; 490} 491static inline u32 ram_rl_entry_timeslice_scale_3_f(void) 492{ 493 return 0xc000U; 494} 495static inline u32 ram_rl_entry_timeslice_timeout_f(u32 v) 496{ 497 return (v & 0xffU) << 18U; 498} 499static inline u32 ram_rl_entry_timeslice_timeout_128_f(void) 500{ 501 return 0x2000000U; 502} 503static inline u32 ram_rl_entry_tsg_length_f(u32 v) 504{ 505 return (v & 0x3fU) << 26U; 506} 507#endif
diff --git a/include/nvgpu/hw/gp106/hw_therm_gp106.h b/include/nvgpu/hw/gp106/hw_therm_gp106.h
deleted file mode 100644
index ee58032..0000000
--- a/include/nvgpu/hw/gp106/hw_therm_gp106.h
+++ /dev/null
@@ -1,183 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_therm_gp106_h_ 57#define _hw_therm_gp106_h_ 58 59static inline u32 therm_temp_sensor_tsense_r(void) 60{ 61 return 0x00020460U; 62} 63static inline u32 therm_temp_sensor_tsense_fixed_point_f(u32 v) 64{ 65 return (v & 0x3fffU) << 3U; 66} 67static inline u32 therm_temp_sensor_tsense_fixed_point_m(void) 68{ 69 return 0x3fffU << 3U; 70} 71static inline u32 therm_temp_sensor_tsense_fixed_point_v(u32 r) 72{ 73 return (r >> 3U) & 0x3fffU; 74} 75static inline u32 therm_temp_sensor_tsense_fixed_point_min_v(void) 76{ 77 return 0x00003b00U; 78} 79static inline u32 therm_temp_sensor_tsense_fixed_point_max_v(void) 80{ 81 return 0x000010e0U; 82} 83static inline u32 therm_temp_sensor_tsense_state_f(u32 v) 84{ 85 return (v & 0x3U) << 29U; 86} 87static inline u32 therm_temp_sensor_tsense_state_m(void) 88{ 89 return 0x3U << 29U; 90} 91static inline u32 therm_temp_sensor_tsense_state_v(u32 r) 92{ 93 return (r >> 29U) & 0x3U; 94} 95static inline u32 therm_temp_sensor_tsense_state_valid_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 therm_temp_sensor_tsense_state_shadow_v(void) 100{ 101 return 0x00000002U; 102} 103static inline u32 therm_gate_ctrl_r(u32 i) 104{ 105 return 0x00020200U + i*4U; 106} 107static inline u32 therm_gate_ctrl_eng_clk_m(void) 108{ 109 return 0x3U << 0U; 110} 111static inline u32 therm_gate_ctrl_eng_clk_run_f(void) 112{ 113 return 0x0U; 114} 115static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) 116{ 117 return 0x1U; 118} 119static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) 120{ 121 return 0x2U; 122} 123static inline u32 therm_gate_ctrl_blk_clk_m(void) 124{ 125 return 0x3U << 2U; 126} 127static inline u32 therm_gate_ctrl_blk_clk_run_f(void) 128{ 129 return 0x0U; 130} 131static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) 132{ 133 return 0x4U; 134} 135static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) 136{ 137 return (v & 0x1fU) << 8U; 138} 139static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) 140{ 141 return 0x1fU << 8U; 142} 143static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) 144{ 145 return (v & 0x7U) << 13U; 146} 147static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) 148{ 149 return 0x7U << 13U; 150} 151static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) 152{ 153 return (v & 0xfU) << 16U; 154} 155static inline u32 therm_gate_ctrl_eng_delay_before_m(void) 156{ 157 return 0xfU << 16U; 158} 159static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) 160{ 161 return (v & 0xfU) << 20U; 162} 163static inline u32 therm_gate_ctrl_eng_delay_after_m(void) 164{ 165 return 0xfU << 20U; 166} 167static inline u32 therm_fecs_idle_filter_r(void) 168{ 169 return 0x00020288U; 170} 171static inline u32 therm_fecs_idle_filter_value_m(void) 172{ 173 return 0xffffffffU << 0U; 174} 175static inline u32 therm_hubmmu_idle_filter_r(void) 176{ 177 return 0x0002028cU; 178} 179static inline u32 therm_hubmmu_idle_filter_value_m(void) 180{ 181 return 0xffffffffU << 0U; 182} 183#endif
diff --git a/include/nvgpu/hw/gp106/hw_timer_gp106.h b/include/nvgpu/hw/gp106/hw_timer_gp106.h
deleted file mode 100644
index 7fd722f..0000000
--- a/include/nvgpu/hw/gp106/hw_timer_gp106.h
+++ /dev/null
@@ -1,115 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_timer_gp106_h_ 57#define _hw_timer_gp106_h_ 58 59static inline u32 timer_pri_timeout_r(void) 60{ 61 return 0x00009080U; 62} 63static inline u32 timer_pri_timeout_period_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 timer_pri_timeout_period_m(void) 68{ 69 return 0xffffffU << 0U; 70} 71static inline u32 timer_pri_timeout_period_v(u32 r) 72{ 73 return (r >> 0U) & 0xffffffU; 74} 75static inline u32 timer_pri_timeout_en_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 timer_pri_timeout_en_m(void) 80{ 81 return 0x1U << 31U; 82} 83static inline u32 timer_pri_timeout_en_v(u32 r) 84{ 85 return (r >> 31U) & 0x1U; 86} 87static inline u32 timer_pri_timeout_en_en_enabled_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 timer_pri_timeout_en_en_disabled_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 timer_pri_timeout_save_0_r(void) 96{ 97 return 0x00009084U; 98} 99static inline u32 timer_pri_timeout_save_1_r(void) 100{ 101 return 0x00009088U; 102} 103static inline u32 timer_pri_timeout_fecs_errcode_r(void) 104{ 105 return 0x0000908cU; 106} 107static inline u32 timer_time_0_r(void) 108{ 109 return 0x00009400U; 110} 111static inline u32 timer_time_1_r(void) 112{ 113 return 0x00009410U; 114} 115#endif
diff --git a/include/nvgpu/hw/gp106/hw_top_gp106.h b/include/nvgpu/hw/gp106/hw_top_gp106.h
deleted file mode 100644
index 749f66e..0000000
--- a/include/nvgpu/hw/gp106/hw_top_gp106.h
+++ /dev/null
@@ -1,255 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_top_gp106_h_ 57#define _hw_top_gp106_h_ 58 59static inline u32 top_num_gpcs_r(void) 60{ 61 return 0x00022430U; 62} 63static inline u32 top_num_gpcs_value_v(u32 r) 64{ 65 return (r >> 0U) & 0x1fU; 66} 67static inline u32 top_tpc_per_gpc_r(void) 68{ 69 return 0x00022434U; 70} 71static inline u32 top_tpc_per_gpc_value_v(u32 r) 72{ 73 return (r >> 0U) & 0x1fU; 74} 75static inline u32 top_num_fbps_r(void) 76{ 77 return 0x00022438U; 78} 79static inline u32 top_num_fbps_value_v(u32 r) 80{ 81 return (r >> 0U) & 0x1fU; 82} 83static inline u32 top_num_fbpas_r(void) 84{ 85 return 0x0002243cU; 86} 87static inline u32 top_num_fbpas_value_v(u32 r) 88{ 89 return (r >> 0U) & 0x1fU; 90} 91static inline u32 top_ltc_per_fbp_r(void) 92{ 93 return 0x00022450U; 94} 95static inline u32 top_ltc_per_fbp_value_v(u32 r) 96{ 97 return (r >> 0U) & 0x1fU; 98} 99static inline u32 top_slices_per_ltc_r(void) 100{ 101 return 0x0002245cU; 102} 103static inline u32 top_slices_per_ltc_value_v(u32 r) 104{ 105 return (r >> 0U) & 0x1fU; 106} 107static inline u32 top_num_ltcs_r(void) 108{ 109 return 0x00022454U; 110} 111static inline u32 top_device_info_r(u32 i) 112{ 113 return 0x00022700U + i*4U; 114} 115static inline u32 top_device_info__size_1_v(void) 116{ 117 return 0x00000040U; 118} 119static inline u32 top_device_info_chain_v(u32 r) 120{ 121 return (r >> 31U) & 0x1U; 122} 123static inline u32 top_device_info_chain_enable_v(void) 124{ 125 return 0x00000001U; 126} 127static inline u32 top_device_info_engine_enum_v(u32 r) 128{ 129 return (r >> 26U) & 0xfU; 130} 131static inline u32 top_device_info_runlist_enum_v(u32 r) 132{ 133 return (r >> 21U) & 0xfU; 134} 135static inline u32 top_device_info_intr_enum_v(u32 r) 136{ 137 return (r >> 15U) & 0x1fU; 138} 139static inline u32 top_device_info_reset_enum_v(u32 r) 140{ 141 return (r >> 9U) & 0x1fU; 142} 143static inline u32 top_device_info_type_enum_v(u32 r) 144{ 145 return (r >> 2U) & 0x1fffffffU; 146} 147static inline u32 top_device_info_type_enum_graphics_v(void) 148{ 149 return 0x00000000U; 150} 151static inline u32 top_device_info_type_enum_graphics_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 top_device_info_type_enum_copy0_v(void) 156{ 157 return 0x00000001U; 158} 159static inline u32 top_device_info_type_enum_copy0_f(void) 160{ 161 return 0x4U; 162} 163static inline u32 top_device_info_type_enum_copy2_v(void) 164{ 165 return 0x00000003U; 166} 167static inline u32 top_device_info_type_enum_copy2_f(void) 168{ 169 return 0xcU; 170} 171static inline u32 top_device_info_type_enum_lce_v(void) 172{ 173 return 0x00000013U; 174} 175static inline u32 top_device_info_type_enum_lce_f(void) 176{ 177 return 0x4cU; 178} 179static inline u32 top_device_info_engine_v(u32 r) 180{ 181 return (r >> 5U) & 0x1U; 182} 183static inline u32 top_device_info_runlist_v(u32 r) 184{ 185 return (r >> 4U) & 0x1U; 186} 187static inline u32 top_device_info_intr_v(u32 r) 188{ 189 return (r >> 3U) & 0x1U; 190} 191static inline u32 top_device_info_reset_v(u32 r) 192{ 193 return (r >> 2U) & 0x1U; 194} 195static inline u32 top_device_info_entry_v(u32 r) 196{ 197 return (r >> 0U) & 0x3U; 198} 199static inline u32 top_device_info_entry_not_valid_v(void) 200{ 201 return 0x00000000U; 202} 203static inline u32 top_device_info_entry_enum_v(void) 204{ 205 return 0x00000002U; 206} 207static inline u32 top_device_info_entry_engine_type_v(void) 208{ 209 return 0x00000003U; 210} 211static inline u32 top_device_info_entry_data_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 top_device_info_data_type_v(u32 r) 216{ 217 return (r >> 30U) & 0x1U; 218} 219static inline u32 top_device_info_data_type_enum2_v(void) 220{ 221 return 0x00000000U; 222} 223static inline u32 top_device_info_data_inst_id_v(u32 r) 224{ 225 return (r >> 26U) & 0xfU; 226} 227static inline u32 top_device_info_data_pri_base_v(u32 r) 228{ 229 return (r >> 12U) & 0xfffU; 230} 231static inline u32 top_device_info_data_pri_base_align_v(void) 232{ 233 return 0x0000000cU; 234} 235static inline u32 top_device_info_data_fault_id_enum_v(u32 r) 236{ 237 return (r >> 3U) & 0x1fU; 238} 239static inline u32 top_device_info_data_fault_id_v(u32 r) 240{ 241 return (r >> 2U) & 0x1U; 242} 243static inline u32 top_device_info_data_fault_id_valid_v(void) 244{ 245 return 0x00000001U; 246} 247static inline u32 top_scratch1_r(void) 248{ 249 return 0x0002240cU; 250} 251static inline u32 top_scratch1_devinit_completed_v(u32 r) 252{ 253 return (r >> 1U) & 0x1U; 254} 255#endif
diff --git a/include/nvgpu/hw/gp106/hw_trim_gp106.h b/include/nvgpu/hw/gp106/hw_trim_gp106.h
deleted file mode 100644
index cebb6d4..0000000
--- a/include/nvgpu/hw/gp106/hw_trim_gp106.h
+++ /dev/null
@@ -1,195 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_trim_gp106_h_ 57#define _hw_trim_gp106_h_ 58 59static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_r(void) 60{ 61 return 0x00132924U; 62} 63static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_s(void) 64{ 65 return 16U; 66} 67static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_f(u32 v) 68{ 69 return (v & 0xffffU) << 0U; 70} 71static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_m(void) 72{ 73 return 0xffffU << 0U; 74} 75static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_v(u32 r) 76{ 77 return (r >> 0U) & 0xffffU; 78} 79static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_s(void) 80{ 81 return 1U; 82} 83static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_f(u32 v) 84{ 85 return (v & 0x1U) << 16U; 86} 87static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_m(void) 88{ 89 return 0x1U << 16U; 90} 91static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_v(u32 r) 92{ 93 return (r >> 16U) & 0x1U; 94} 95static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_deasserted_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_asserted_f(void) 100{ 101 return 0x10000U; 102} 103static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_s(void) 104{ 105 return 1U; 106} 107static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_f(u32 v) 108{ 109 return (v & 0x1U) << 20U; 110} 111static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_m(void) 112{ 113 return 0x1U << 20U; 114} 115static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_v(u32 r) 116{ 117 return (r >> 20U) & 0x1U; 118} 119static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_asserted_f(void) 124{ 125 return 0x100000U; 126} 127static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_s(void) 128{ 129 return 1U; 130} 131static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_f(u32 v) 132{ 133 return (v & 0x1U) << 24U; 134} 135static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_m(void) 136{ 137 return 0x1U << 24U; 138} 139static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_v(u32 r) 140{ 141 return (r >> 24U) & 0x1U; 142} 143static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_deasserted_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_asserted_f(void) 148{ 149 return 0x1000000U; 150} 151static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_source_gpc2clk_f(void) 152{ 153 return 0x70000000U; 154} 155static inline u32 trim_gpc_bcast_clk_cntr_ncgpcclk_cnt_r(void) 156{ 157 return 0x00132928U; 158} 159static inline u32 trim_fbpa_bcast_clk_cntr_ncltcclk_cfg_r(void) 160{ 161 return 0x00132128U; 162} 163static inline u32 trim_fbpa_bcast_clk_cntr_ncltcclk_cfg_source_dramdiv4_rec_clk1_f(void) 164{ 165 return 0x30000000U; 166} 167static inline u32 trim_fbpa_bcast_clk_cntr_ncltcclk_cnt_r(void) 168{ 169 return 0x0013212cU; 170} 171static inline u32 trim_sys_clk_cntr_ncltcpll_cfg_r(void) 172{ 173 return 0x001373c0U; 174} 175static inline u32 trim_sys_clk_cntr_ncltcpll_cfg_source_xbar2clk_f(void) 176{ 177 return 0x20000000U; 178} 179static inline u32 trim_sys_clk_cntr_ncltcpll_cnt_r(void) 180{ 181 return 0x001373c4U; 182} 183static inline u32 trim_sys_clk_cntr_ncsyspll_cfg_r(void) 184{ 185 return 0x001373b0U; 186} 187static inline u32 trim_sys_clk_cntr_ncsyspll_cfg_source_sys2clk_f(void) 188{ 189 return 0x0U; 190} 191static inline u32 trim_sys_clk_cntr_ncsyspll_cnt_r(void) 192{ 193 return 0x001373b4U; 194} 195#endif
diff --git a/include/nvgpu/hw/gp106/hw_xp_gp106.h b/include/nvgpu/hw/gp106/hw_xp_gp106.h
deleted file mode 100644
index f6c843c..0000000
--- a/include/nvgpu/hw/gp106/hw_xp_gp106.h
+++ /dev/null
@@ -1,143 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_xp_gp106_h_ 57#define _hw_xp_gp106_h_ 58 59static inline u32 xp_dl_mgr_r(u32 i) 60{ 61 return 0x0008b8c0U + i*4U; 62} 63static inline u32 xp_dl_mgr_safe_timing_f(u32 v) 64{ 65 return (v & 0x1U) << 2U; 66} 67static inline u32 xp_pl_link_config_r(u32 i) 68{ 69 return 0x0008c040U + i*4U; 70} 71static inline u32 xp_pl_link_config_ltssm_status_f(u32 v) 72{ 73 return (v & 0x1U) << 4U; 74} 75static inline u32 xp_pl_link_config_ltssm_status_idle_v(void) 76{ 77 return 0x00000000U; 78} 79static inline u32 xp_pl_link_config_ltssm_directive_f(u32 v) 80{ 81 return (v & 0xfU) << 0U; 82} 83static inline u32 xp_pl_link_config_ltssm_directive_m(void) 84{ 85 return 0xfU << 0U; 86} 87static inline u32 xp_pl_link_config_ltssm_directive_normal_operations_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 xp_pl_link_config_ltssm_directive_change_speed_v(void) 92{ 93 return 0x00000001U; 94} 95static inline u32 xp_pl_link_config_max_link_rate_f(u32 v) 96{ 97 return (v & 0x3U) << 18U; 98} 99static inline u32 xp_pl_link_config_max_link_rate_m(void) 100{ 101 return 0x3U << 18U; 102} 103static inline u32 xp_pl_link_config_max_link_rate_2500_mtps_v(void) 104{ 105 return 0x00000002U; 106} 107static inline u32 xp_pl_link_config_max_link_rate_5000_mtps_v(void) 108{ 109 return 0x00000001U; 110} 111static inline u32 xp_pl_link_config_max_link_rate_8000_mtps_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 xp_pl_link_config_target_tx_width_f(u32 v) 116{ 117 return (v & 0x7U) << 20U; 118} 119static inline u32 xp_pl_link_config_target_tx_width_m(void) 120{ 121 return 0x7U << 20U; 122} 123static inline u32 xp_pl_link_config_target_tx_width_x1_v(void) 124{ 125 return 0x00000007U; 126} 127static inline u32 xp_pl_link_config_target_tx_width_x2_v(void) 128{ 129 return 0x00000006U; 130} 131static inline u32 xp_pl_link_config_target_tx_width_x4_v(void) 132{ 133 return 0x00000005U; 134} 135static inline u32 xp_pl_link_config_target_tx_width_x8_v(void) 136{ 137 return 0x00000004U; 138} 139static inline u32 xp_pl_link_config_target_tx_width_x16_v(void) 140{ 141 return 0x00000000U; 142} 143#endif
diff --git a/include/nvgpu/hw/gp106/hw_xve_gp106.h b/include/nvgpu/hw/gp106/hw_xve_gp106.h
deleted file mode 100644
index e61d13f..0000000
--- a/include/nvgpu/hw/gp106/hw_xve_gp106.h
+++ /dev/null
@@ -1,207 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_xve_gp106_h_ 57#define _hw_xve_gp106_h_ 58 59static inline u32 xve_rom_ctrl_r(void) 60{ 61 return 0x00000050U; 62} 63static inline u32 xve_rom_ctrl_rom_shadow_f(u32 v) 64{ 65 return (v & 0x1U) << 0U; 66} 67static inline u32 xve_rom_ctrl_rom_shadow_disabled_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 xve_rom_ctrl_rom_shadow_enabled_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 xve_link_control_status_r(void) 76{ 77 return 0x00000088U; 78} 79static inline u32 xve_link_control_status_link_speed_m(void) 80{ 81 return 0xfU << 16U; 82} 83static inline u32 xve_link_control_status_link_speed_v(u32 r) 84{ 85 return (r >> 16U) & 0xfU; 86} 87static inline u32 xve_link_control_status_link_speed_link_speed_2p5_v(void) 88{ 89 return 0x00000001U; 90} 91static inline u32 xve_link_control_status_link_speed_link_speed_5p0_v(void) 92{ 93 return 0x00000002U; 94} 95static inline u32 xve_link_control_status_link_speed_link_speed_8p0_v(void) 96{ 97 return 0x00000003U; 98} 99static inline u32 xve_link_control_status_link_width_m(void) 100{ 101 return 0x3fU << 20U; 102} 103static inline u32 xve_link_control_status_link_width_v(u32 r) 104{ 105 return (r >> 20U) & 0x3fU; 106} 107static inline u32 xve_link_control_status_link_width_x1_v(void) 108{ 109 return 0x00000001U; 110} 111static inline u32 xve_link_control_status_link_width_x2_v(void) 112{ 113 return 0x00000002U; 114} 115static inline u32 xve_link_control_status_link_width_x4_v(void) 116{ 117 return 0x00000004U; 118} 119static inline u32 xve_link_control_status_link_width_x8_v(void) 120{ 121 return 0x00000008U; 122} 123static inline u32 xve_link_control_status_link_width_x16_v(void) 124{ 125 return 0x00000010U; 126} 127static inline u32 xve_priv_xv_r(void) 128{ 129 return 0x00000150U; 130} 131static inline u32 xve_priv_xv_cya_l0s_enable_f(u32 v) 132{ 133 return (v & 0x1U) << 7U; 134} 135static inline u32 xve_priv_xv_cya_l0s_enable_m(void) 136{ 137 return 0x1U << 7U; 138} 139static inline u32 xve_priv_xv_cya_l0s_enable_v(u32 r) 140{ 141 return (r >> 7U) & 0x1U; 142} 143static inline u32 xve_priv_xv_cya_l1_enable_f(u32 v) 144{ 145 return (v & 0x1U) << 8U; 146} 147static inline u32 xve_priv_xv_cya_l1_enable_m(void) 148{ 149 return 0x1U << 8U; 150} 151static inline u32 xve_priv_xv_cya_l1_enable_v(u32 r) 152{ 153 return (r >> 8U) & 0x1U; 154} 155static inline u32 xve_cya_2_r(void) 156{ 157 return 0x00000704U; 158} 159static inline u32 xve_reset_r(void) 160{ 161 return 0x00000718U; 162} 163static inline u32 xve_reset_reset_m(void) 164{ 165 return 0x1U << 0U; 166} 167static inline u32 xve_reset_gpu_on_sw_reset_m(void) 168{ 169 return 0x1U << 1U; 170} 171static inline u32 xve_reset_counter_en_m(void) 172{ 173 return 0x1U << 2U; 174} 175static inline u32 xve_reset_counter_val_f(u32 v) 176{ 177 return (v & 0x7ffU) << 4U; 178} 179static inline u32 xve_reset_counter_val_m(void) 180{ 181 return 0x7ffU << 4U; 182} 183static inline u32 xve_reset_counter_val_v(u32 r) 184{ 185 return (r >> 4U) & 0x7ffU; 186} 187static inline u32 xve_reset_clock_on_sw_reset_m(void) 188{ 189 return 0x1U << 15U; 190} 191static inline u32 xve_reset_clock_counter_en_m(void) 192{ 193 return 0x1U << 16U; 194} 195static inline u32 xve_reset_clock_counter_val_f(u32 v) 196{ 197 return (v & 0x7ffU) << 17U; 198} 199static inline u32 xve_reset_clock_counter_val_m(void) 200{ 201 return 0x7ffU << 17U; 202} 203static inline u32 xve_reset_clock_counter_val_v(u32 r) 204{ 205 return (r >> 17U) & 0x7ffU; 206} 207#endif
diff --git a/include/nvgpu/hw/gp10b/hw_bus_gp10b.h b/include/nvgpu/hw/gp10b/hw_bus_gp10b.h
deleted file mode 100644
index b06ea66..0000000
--- a/include/nvgpu/hw/gp10b/hw_bus_gp10b.h
+++ /dev/null
@@ -1,223 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_bus_gp10b_h_ 57#define _hw_bus_gp10b_h_ 58 59static inline u32 bus_bar0_window_r(void) 60{ 61 return 0x00001700U; 62} 63static inline u32 bus_bar0_window_base_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 bus_bar0_window_target_vid_mem_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) 72{ 73 return 0x2000000U; 74} 75static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) 76{ 77 return 0x3000000U; 78} 79static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) 80{ 81 return 0x00000010U; 82} 83static inline u32 bus_bar1_block_r(void) 84{ 85 return 0x00001704U; 86} 87static inline u32 bus_bar1_block_ptr_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 0U; 90} 91static inline u32 bus_bar1_block_target_vid_mem_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) 96{ 97 return 0x20000000U; 98} 99static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) 100{ 101 return 0x30000000U; 102} 103static inline u32 bus_bar1_block_mode_virtual_f(void) 104{ 105 return 0x80000000U; 106} 107static inline u32 bus_bar2_block_r(void) 108{ 109 return 0x00001714U; 110} 111static inline u32 bus_bar2_block_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 bus_bar2_block_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 bus_bar2_block_mode_virtual_f(void) 128{ 129 return 0x80000000U; 130} 131static inline u32 bus_bar1_block_ptr_shift_v(void) 132{ 133 return 0x0000000cU; 134} 135static inline u32 bus_bar2_block_ptr_shift_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 bus_bind_status_r(void) 140{ 141 return 0x00001710U; 142} 143static inline u32 bus_bind_status_bar1_pending_v(u32 r) 144{ 145 return (r >> 0U) & 0x1U; 146} 147static inline u32 bus_bind_status_bar1_pending_empty_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 bus_bind_status_bar1_pending_busy_f(void) 152{ 153 return 0x1U; 154} 155static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) 156{ 157 return (r >> 1U) & 0x1U; 158} 159static inline u32 bus_bind_status_bar1_outstanding_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 bus_bind_status_bar1_outstanding_true_f(void) 164{ 165 return 0x2U; 166} 167static inline u32 bus_bind_status_bar2_pending_v(u32 r) 168{ 169 return (r >> 2U) & 0x1U; 170} 171static inline u32 bus_bind_status_bar2_pending_empty_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 bus_bind_status_bar2_pending_busy_f(void) 176{ 177 return 0x4U; 178} 179static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) 180{ 181 return (r >> 3U) & 0x1U; 182} 183static inline u32 bus_bind_status_bar2_outstanding_false_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 bus_bind_status_bar2_outstanding_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 bus_intr_0_r(void) 192{ 193 return 0x00001100U; 194} 195static inline u32 bus_intr_0_pri_squash_m(void) 196{ 197 return 0x1U << 1U; 198} 199static inline u32 bus_intr_0_pri_fecserr_m(void) 200{ 201 return 0x1U << 2U; 202} 203static inline u32 bus_intr_0_pri_timeout_m(void) 204{ 205 return 0x1U << 3U; 206} 207static inline u32 bus_intr_en_0_r(void) 208{ 209 return 0x00001140U; 210} 211static inline u32 bus_intr_en_0_pri_squash_m(void) 212{ 213 return 0x1U << 1U; 214} 215static inline u32 bus_intr_en_0_pri_fecserr_m(void) 216{ 217 return 0x1U << 2U; 218} 219static inline u32 bus_intr_en_0_pri_timeout_m(void) 220{ 221 return 0x1U << 3U; 222} 223#endif
diff --git a/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h b/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h
deleted file mode 100644
index 00879c1..0000000
--- a/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h
+++ /dev/null
@@ -1,163 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ccsr_gp10b_h_ 57#define _hw_ccsr_gp10b_h_ 58 59static inline u32 ccsr_channel_inst_r(u32 i) 60{ 61 return 0x00800000U + i*8U; 62} 63static inline u32 ccsr_channel_inst__size_1_v(void) 64{ 65 return 0x00000200U; 66} 67static inline u32 ccsr_channel_inst_ptr_f(u32 v) 68{ 69 return (v & 0xfffffffU) << 0U; 70} 71static inline u32 ccsr_channel_inst_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) 76{ 77 return 0x20000000U; 78} 79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) 80{ 81 return 0x30000000U; 82} 83static inline u32 ccsr_channel_inst_bind_false_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 ccsr_channel_inst_bind_true_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 ccsr_channel_r(u32 i) 92{ 93 return 0x00800004U + i*8U; 94} 95static inline u32 ccsr_channel__size_1_v(void) 96{ 97 return 0x00000200U; 98} 99static inline u32 ccsr_channel_enable_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 ccsr_channel_enable_set_f(u32 v) 104{ 105 return (v & 0x1U) << 10U; 106} 107static inline u32 ccsr_channel_enable_set_true_f(void) 108{ 109 return 0x400U; 110} 111static inline u32 ccsr_channel_enable_clr_true_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ccsr_channel_status_v(u32 r) 116{ 117 return (r >> 24U) & 0xfU; 118} 119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) 120{ 121 return 0x00000002U; 122} 123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) 124{ 125 return 0x00000004U; 126} 127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) 128{ 129 return 0x0000000aU; 130} 131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) 132{ 133 return 0x0000000bU; 134} 135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) 140{ 141 return 0x0000000dU; 142} 143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) 144{ 145 return 0x0000000eU; 146} 147static inline u32 ccsr_channel_next_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 ccsr_channel_next_true_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 ccsr_channel_force_ctx_reload_true_f(void) 156{ 157 return 0x100U; 158} 159static inline u32 ccsr_channel_busy_v(u32 r) 160{ 161 return (r >> 28U) & 0x1U; 162} 163#endif
diff --git a/include/nvgpu/hw/gp10b/hw_ce_gp10b.h b/include/nvgpu/hw/gp10b/hw_ce_gp10b.h
deleted file mode 100644
index c293771..0000000
--- a/include/nvgpu/hw/gp10b/hw_ce_gp10b.h
+++ /dev/null
@@ -1,87 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ce_gp10b_h_ 57#define _hw_ce_gp10b_h_ 58 59static inline u32 ce_intr_status_r(u32 i) 60{ 61 return 0x00104410U + i*128U; 62} 63static inline u32 ce_intr_status_blockpipe_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 ce_intr_status_blockpipe_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 ce_intr_status_nonblockpipe_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 ce_intr_status_nonblockpipe_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 ce_intr_status_launcherr_pending_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 ce_intr_status_launcherr_reset_f(void) 84{ 85 return 0x4U; 86} 87#endif
diff --git a/include/nvgpu/hw/gp10b/hw_ctxsw_prog_gp10b.h b/include/nvgpu/hw/gp10b/hw_ctxsw_prog_gp10b.h
deleted file mode 100644
index d83320f..0000000
--- a/include/nvgpu/hw/gp10b/hw_ctxsw_prog_gp10b.h
+++ /dev/null
@@ -1,491 +0,0 @@ 1/* 2 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ctxsw_prog_gp10b_h_ 57#define _hw_ctxsw_prog_gp10b_h_ 58 59static inline u32 ctxsw_prog_fecs_header_v(void) 60{ 61 return 0x00000100U; 62} 63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ctxsw_prog_main_image_patch_count_o(void) 68{ 69 return 0x00000010U; 70} 71static inline u32 ctxsw_prog_main_image_context_id_o(void) 72{ 73 return 0x000000f0U; 74} 75static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) 76{ 77 return 0x00000014U; 78} 79static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) 80{ 81 return 0x00000018U; 82} 83static inline u32 ctxsw_prog_main_image_zcull_o(void) 84{ 85 return 0x0000001cU; 86} 87static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) 88{ 89 return 0x00000001U; 90} 91static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) 92{ 93 return 0x00000002U; 94} 95static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) 96{ 97 return 0x00000020U; 98} 99static inline u32 ctxsw_prog_main_image_pm_o(void) 100{ 101 return 0x00000028U; 102} 103static inline u32 ctxsw_prog_main_image_pm_mode_m(void) 104{ 105 return 0x7U << 0U; 106} 107static inline u32 ctxsw_prog_main_image_pm_mode_ctxsw_f(void) 108{ 109 return 0x1U; 110} 111static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) 112{ 113 return 0x0U; 114} 115static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) 116{ 117 return 0x7U << 3U; 118} 119static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) 120{ 121 return 0x8U; 122} 123static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) 128{ 129 return 0x0000002cU; 130} 131static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) 132{ 133 return 0x000000f4U; 134} 135static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void) 136{ 137 return 0x000000d0U; 138} 139static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void) 140{ 141 return 0x000000d4U; 142} 143static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void) 144{ 145 return 0x000000d8U; 146} 147static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void) 148{ 149 return 0x000000dcU; 150} 151static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) 152{ 153 return 0x000000f8U; 154} 155static inline u32 ctxsw_prog_main_image_magic_value_o(void) 156{ 157 return 0x000000fcU; 158} 159static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) 160{ 161 return 0x600dc0deU; 162} 163static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) 164{ 165 return 0x0000000cU; 166} 167static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) 168{ 169 return (r >> 0U) & 0xffffU; 170} 171static inline u32 ctxsw_prog_local_image_ppc_info_o(void) 172{ 173 return 0x000000f4U; 174} 175static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) 176{ 177 return (r >> 0U) & 0xffffU; 178} 179static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) 180{ 181 return (r >> 16U) & 0xffffU; 182} 183static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) 184{ 185 return 0x000000f8U; 186} 187static inline u32 ctxsw_prog_local_magic_value_o(void) 188{ 189 return 0x000000fcU; 190} 191static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) 192{ 193 return 0xad0becabU; 194} 195static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) 196{ 197 return 0x000000ecU; 198} 199static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) 200{ 201 return (r >> 0U) & 0xffffU; 202} 203static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) 204{ 205 return (r >> 16U) & 0xffU; 206} 207static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) 208{ 209 return 0x00000100U; 210} 211static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) 212{ 213 return 0x00000004U; 214} 215static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) 216{ 217 return 0x00000000U; 218} 219static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) 220{ 221 return 0x00000002U; 222} 223static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) 224{ 225 return 0x000000a0U; 226} 227static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) 228{ 229 return 2U; 230} 231static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) 232{ 233 return (v & 0x3U) << 0U; 234} 235static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) 236{ 237 return 0x3U << 0U; 238} 239static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) 240{ 241 return (r >> 0U) & 0x3U; 242} 243static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) 244{ 245 return 0x0U; 246} 247static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) 248{ 249 return 0x2U; 250} 251static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) 252{ 253 return 0x000000a4U; 254} 255static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) 256{ 257 return 0x000000a8U; 258} 259static inline u32 ctxsw_prog_main_image_misc_options_o(void) 260{ 261 return 0x0000003cU; 262} 263static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) 264{ 265 return 0x1U << 3U; 266} 267static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 ctxsw_prog_main_image_pmu_options_o(void) 272{ 273 return 0x00000070U; 274} 275static inline u32 ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(u32 v) 276{ 277 return (v & 0x1U) << 0U; 278} 279static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void) 280{ 281 return 0x00000080U; 282} 283static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v) 284{ 285 return (v & 0x3U) << 0U; 286} 287static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void) 288{ 289 return 0x1U; 290} 291static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void) 292{ 293 return 0x00000068U; 294} 295static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void) 296{ 297 return 0x00000084U; 298} 299static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v) 300{ 301 return (v & 0x3U) << 0U; 302} 303static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void) 304{ 305 return 0x1U; 306} 307static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void) 308{ 309 return 0x2U; 310} 311static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_control_o(void) 312{ 313 return 0x000000acU; 314} 315static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f(u32 v) 316{ 317 return (v & 0xffffU) << 0U; 318} 319static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(void) 320{ 321 return 0x000000b0U; 322} 323static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_v_m(void) 324{ 325 return 0xfffffffU << 0U; 326} 327static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_m(void) 328{ 329 return 0x3U << 28U; 330} 331static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f(void) 332{ 333 return 0x0U; 334} 335static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_coherent_f(void) 336{ 337 return 0x20000000U; 338} 339static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(void) 340{ 341 return 0x30000000U; 342} 343static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(void) 344{ 345 return 0x000000b4U; 346} 347static inline u32 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(u32 v) 348{ 349 return (v & 0xffffffffU) << 0U; 350} 351static inline u32 ctxsw_prog_record_timestamp_record_size_in_bytes_v(void) 352{ 353 return 0x00000080U; 354} 355static inline u32 ctxsw_prog_record_timestamp_record_size_in_words_v(void) 356{ 357 return 0x00000020U; 358} 359static inline u32 ctxsw_prog_record_timestamp_magic_value_lo_o(void) 360{ 361 return 0x00000000U; 362} 363static inline u32 ctxsw_prog_record_timestamp_magic_value_lo_v_value_v(void) 364{ 365 return 0x00000000U; 366} 367static inline u32 ctxsw_prog_record_timestamp_magic_value_hi_o(void) 368{ 369 return 0x00000004U; 370} 371static inline u32 ctxsw_prog_record_timestamp_magic_value_hi_v_value_v(void) 372{ 373 return 0x600dbeefU; 374} 375static inline u32 ctxsw_prog_record_timestamp_context_id_o(void) 376{ 377 return 0x00000008U; 378} 379static inline u32 ctxsw_prog_record_timestamp_context_ptr_o(void) 380{ 381 return 0x0000000cU; 382} 383static inline u32 ctxsw_prog_record_timestamp_timestamp_lo_o(void) 384{ 385 return 0x00000018U; 386} 387static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_o(void) 388{ 389 return 0x0000001cU; 390} 391static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_v_f(u32 v) 392{ 393 return (v & 0xffffffU) << 0U; 394} 395static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_v_v(u32 r) 396{ 397 return (r >> 0U) & 0xffffffU; 398} 399static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_f(u32 v) 400{ 401 return (v & 0xffU) << 24U; 402} 403static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_m(void) 404{ 405 return 0xffU << 24U; 406} 407static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_v(u32 r) 408{ 409 return (r >> 24U) & 0xffU; 410} 411static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_ctxsw_req_by_host_v(void) 412{ 413 return 0x00000001U; 414} 415static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_ctxsw_req_by_host_f(void) 416{ 417 return 0x1000000U; 418} 419static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_v(void) 420{ 421 return 0x00000002U; 422} 423static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_f(void) 424{ 425 return 0x2000000U; 426} 427static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_wfi_v(void) 428{ 429 return 0x0000000aU; 430} 431static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_wfi_f(void) 432{ 433 return 0xa000000U; 434} 435static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_gfxp_v(void) 436{ 437 return 0x0000000bU; 438} 439static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_gfxp_f(void) 440{ 441 return 0xb000000U; 442} 443static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_ctap_v(void) 444{ 445 return 0x0000000cU; 446} 447static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_ctap_f(void) 448{ 449 return 0xc000000U; 450} 451static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_cilp_v(void) 452{ 453 return 0x0000000dU; 454} 455static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_fe_ack_cilp_f(void) 456{ 457 return 0xd000000U; 458} 459static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_save_end_v(void) 460{ 461 return 0x00000003U; 462} 463static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_save_end_f(void) 464{ 465 return 0x3000000U; 466} 467static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_restore_start_v(void) 468{ 469 return 0x00000004U; 470} 471static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_restore_start_f(void) 472{ 473 return 0x4000000U; 474} 475static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_context_start_v(void) 476{ 477 return 0x00000005U; 478} 479static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_context_start_f(void) 480{ 481 return 0x5000000U; 482} 483static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_v(void) 484{ 485 return 0x000000ffU; 486} 487static inline u32 ctxsw_prog_record_timestamp_timestamp_hi_tag_invalid_timestamp_f(void) 488{ 489 return 0xff000000U; 490} 491#endif
diff --git a/include/nvgpu/hw/gp10b/hw_falcon_gp10b.h b/include/nvgpu/hw/gp10b/hw_falcon_gp10b.h
deleted file mode 100644
index 6dc401d..0000000
--- a/include/nvgpu/hw/gp10b/hw_falcon_gp10b.h
+++ /dev/null
@@ -1,603 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_falcon_gp10b_h_ 57#define _hw_falcon_gp10b_h_ 58 59static inline u32 falcon_falcon_irqsset_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 falcon_falcon_irqsclr_r(void) 68{ 69 return 0x00000004U; 70} 71static inline u32 falcon_falcon_irqstat_r(void) 72{ 73 return 0x00000008U; 74} 75static inline u32 falcon_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 falcon_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 falcon_falcon_irqmode_r(void) 88{ 89 return 0x0000000cU; 90} 91static inline u32 falcon_falcon_irqmset_r(void) 92{ 93 return 0x00000010U; 94} 95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 falcon_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 falcon_falcon_irqmclr_r(void) 128{ 129 return 0x00000014U; 130} 131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 falcon_falcon_irqmask_r(void) 168{ 169 return 0x00000018U; 170} 171static inline u32 falcon_falcon_irqdest_r(void) 172{ 173 return 0x0000001cU; 174} 175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 falcon_falcon_curctx_r(void) 248{ 249 return 0x00000050U; 250} 251static inline u32 falcon_falcon_nxtctx_r(void) 252{ 253 return 0x00000054U; 254} 255static inline u32 falcon_falcon_mailbox0_r(void) 256{ 257 return 0x00000040U; 258} 259static inline u32 falcon_falcon_mailbox1_r(void) 260{ 261 return 0x00000044U; 262} 263static inline u32 falcon_falcon_itfen_r(void) 264{ 265 return 0x00000048U; 266} 267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 falcon_falcon_idlestate_r(void) 272{ 273 return 0x0000004cU; 274} 275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 falcon_falcon_os_r(void) 284{ 285 return 0x00000080U; 286} 287static inline u32 falcon_falcon_engctl_r(void) 288{ 289 return 0x000000a4U; 290} 291static inline u32 falcon_falcon_cpuctl_r(void) 292{ 293 return 0x00000100U; 294} 295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) 300{ 301 return (v & 0x1U) << 2U; 302} 303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) 304{ 305 return (v & 0x1U) << 3U; 306} 307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) 308{ 309 return (v & 0x1U) << 4U; 310} 311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) 312{ 313 return 0x1U << 4U; 314} 315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) 316{ 317 return (r >> 4U) & 0x1U; 318} 319static inline u32 falcon_falcon_cpuctl_stopped_m(void) 320{ 321 return 0x1U << 5U; 322} 323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 324{ 325 return (v & 0x1U) << 6U; 326} 327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) 328{ 329 return 0x1U << 6U; 330} 331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 332{ 333 return (r >> 6U) & 0x1U; 334} 335static inline u32 falcon_falcon_cpuctl_alias_r(void) 336{ 337 return 0x00000130U; 338} 339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) 340{ 341 return (v & 0x1U) << 1U; 342} 343static inline u32 falcon_falcon_imemc_r(u32 i) 344{ 345 return 0x00000180U + i*16U; 346} 347static inline u32 falcon_falcon_imemc_offs_f(u32 v) 348{ 349 return (v & 0x3fU) << 2U; 350} 351static inline u32 falcon_falcon_imemc_blk_f(u32 v) 352{ 353 return (v & 0xffU) << 8U; 354} 355static inline u32 falcon_falcon_imemc_aincw_f(u32 v) 356{ 357 return (v & 0x1U) << 24U; 358} 359static inline u32 falcon_falcon_imemc_secure_f(u32 v) 360{ 361 return (v & 0x1U) << 28U; 362} 363static inline u32 falcon_falcon_imemd_r(u32 i) 364{ 365 return 0x00000184U + i*16U; 366} 367static inline u32 falcon_falcon_imemt_r(u32 i) 368{ 369 return 0x00000188U + i*16U; 370} 371static inline u32 falcon_falcon_sctl_r(void) 372{ 373 return 0x00000240U; 374} 375static inline u32 falcon_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 falcon_falcon_bootvec_r(void) 380{ 381 return 0x00000104U; 382} 383static inline u32 falcon_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 falcon_falcon_dmactl_r(void) 388{ 389 return 0x0000010cU; 390} 391static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) 400{ 401 return (v & 0x1U) << 0U; 402} 403static inline u32 falcon_falcon_hwcfg_r(void) 404{ 405 return 0x00000108U; 406} 407static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) 408{ 409 return (r >> 0U) & 0x1ffU; 410} 411static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) 412{ 413 return (r >> 9U) & 0x1ffU; 414} 415static inline u32 falcon_falcon_dmatrfbase_r(void) 416{ 417 return 0x00000110U; 418} 419static inline u32 falcon_falcon_dmatrfbase1_r(void) 420{ 421 return 0x00000128U; 422} 423static inline u32 falcon_falcon_dmatrfmoffs_r(void) 424{ 425 return 0x00000114U; 426} 427static inline u32 falcon_falcon_imctl_debug_r(void) 428{ 429 return 0x0000015cU; 430} 431static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) 432{ 433 return (v & 0xffffffU) << 0U; 434} 435static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) 436{ 437 return (v & 0x7U) << 24U; 438} 439static inline u32 falcon_falcon_imstat_r(void) 440{ 441 return 0x00000144U; 442} 443static inline u32 falcon_falcon_traceidx_r(void) 444{ 445 return 0x00000148U; 446} 447static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) 448{ 449 return (r >> 16U) & 0xffU; 450} 451static inline u32 falcon_falcon_traceidx_idx_f(u32 v) 452{ 453 return (v & 0xffU) << 0U; 454} 455static inline u32 falcon_falcon_tracepc_r(void) 456{ 457 return 0x0000014cU; 458} 459static inline u32 falcon_falcon_tracepc_pc_v(u32 r) 460{ 461 return (r >> 0U) & 0xffffffU; 462} 463static inline u32 falcon_falcon_dmatrfcmd_r(void) 464{ 465 return 0x00000118U; 466} 467static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) 468{ 469 return (v & 0x1U) << 4U; 470} 471static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) 472{ 473 return (v & 0x1U) << 5U; 474} 475static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) 476{ 477 return (v & 0x7U) << 8U; 478} 479static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) 480{ 481 return (v & 0x7U) << 12U; 482} 483static inline u32 falcon_falcon_dmatrffboffs_r(void) 484{ 485 return 0x0000011cU; 486} 487static inline u32 falcon_falcon_exterraddr_r(void) 488{ 489 return 0x00000168U; 490} 491static inline u32 falcon_falcon_exterrstat_r(void) 492{ 493 return 0x0000016cU; 494} 495static inline u32 falcon_falcon_exterrstat_valid_m(void) 496{ 497 return 0x1U << 31U; 498} 499static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) 500{ 501 return (r >> 31U) & 0x1U; 502} 503static inline u32 falcon_falcon_exterrstat_valid_true_v(void) 504{ 505 return 0x00000001U; 506} 507static inline u32 falcon_falcon_icd_cmd_r(void) 508{ 509 return 0x00000200U; 510} 511static inline u32 falcon_falcon_icd_cmd_opc_s(void) 512{ 513 return 4U; 514} 515static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) 516{ 517 return (v & 0xfU) << 0U; 518} 519static inline u32 falcon_falcon_icd_cmd_opc_m(void) 520{ 521 return 0xfU << 0U; 522} 523static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) 524{ 525 return (r >> 0U) & 0xfU; 526} 527static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) 528{ 529 return 0x8U; 530} 531static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) 532{ 533 return 0xeU; 534} 535static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) 536{ 537 return (v & 0x1fU) << 8U; 538} 539static inline u32 falcon_falcon_icd_rdata_r(void) 540{ 541 return 0x0000020cU; 542} 543static inline u32 falcon_falcon_dmemc_r(u32 i) 544{ 545 return 0x000001c0U + i*8U; 546} 547static inline u32 falcon_falcon_dmemc_offs_f(u32 v) 548{ 549 return (v & 0x3fU) << 2U; 550} 551static inline u32 falcon_falcon_dmemc_offs_m(void) 552{ 553 return 0x3fU << 2U; 554} 555static inline u32 falcon_falcon_dmemc_blk_f(u32 v) 556{ 557 return (v & 0xffU) << 8U; 558} 559static inline u32 falcon_falcon_dmemc_blk_m(void) 560{ 561 return 0xffU << 8U; 562} 563static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) 564{ 565 return (v & 0x1U) << 24U; 566} 567static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) 568{ 569 return (v & 0x1U) << 25U; 570} 571static inline u32 falcon_falcon_dmemd_r(u32 i) 572{ 573 return 0x000001c4U + i*8U; 574} 575static inline u32 falcon_falcon_debug1_r(void) 576{ 577 return 0x00000090U; 578} 579static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) 580{ 581 return 1U; 582} 583static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) 584{ 585 return (v & 0x1U) << 16U; 586} 587static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) 588{ 589 return 0x1U << 16U; 590} 591static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) 592{ 593 return (r >> 16U) & 0x1U; 594} 595static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 falcon_falcon_debuginfo_r(void) 600{ 601 return 0x00000094U; 602} 603#endif
diff --git a/include/nvgpu/hw/gp10b/hw_fb_gp10b.h b/include/nvgpu/hw/gp10b/hw_fb_gp10b.h
deleted file mode 100644
index c1ef471..0000000
--- a/include/nvgpu/hw/gp10b/hw_fb_gp10b.h
+++ /dev/null
@@ -1,463 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fb_gp10b_h_ 57#define _hw_fb_gp10b_h_ 58 59static inline u32 fb_fbhub_num_active_ltcs_r(void) 60{ 61 return 0x00100800U; 62} 63static inline u32 fb_mmu_ctrl_r(void) 64{ 65 return 0x00100c80U; 66} 67static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) 68{ 69 return (r >> 15U) & 0x1U; 70} 71static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) 76{ 77 return (r >> 16U) & 0xffU; 78} 79static inline u32 fb_priv_mmu_phy_secure_r(void) 80{ 81 return 0x00100ce4U; 82} 83static inline u32 fb_mmu_invalidate_pdb_r(void) 84{ 85 return 0x00100cb8U; 86} 87static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) 92{ 93 return 0x2U; 94} 95static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) 96{ 97 return (v & 0xfffffffU) << 4U; 98} 99static inline u32 fb_mmu_invalidate_r(void) 100{ 101 return 0x00100cbcU; 102} 103static inline u32 fb_mmu_invalidate_all_va_true_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) 108{ 109 return 0x2U; 110} 111static inline u32 fb_mmu_invalidate_hubtlb_only_s(void) 112{ 113 return 1U; 114} 115static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v) 116{ 117 return (v & 0x1U) << 2U; 118} 119static inline u32 fb_mmu_invalidate_hubtlb_only_m(void) 120{ 121 return 0x1U << 2U; 122} 123static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r) 124{ 125 return (r >> 2U) & 0x1U; 126} 127static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void) 128{ 129 return 0x4U; 130} 131static inline u32 fb_mmu_invalidate_replay_s(void) 132{ 133 return 3U; 134} 135static inline u32 fb_mmu_invalidate_replay_f(u32 v) 136{ 137 return (v & 0x7U) << 3U; 138} 139static inline u32 fb_mmu_invalidate_replay_m(void) 140{ 141 return 0x7U << 3U; 142} 143static inline u32 fb_mmu_invalidate_replay_v(u32 r) 144{ 145 return (r >> 3U) & 0x7U; 146} 147static inline u32 fb_mmu_invalidate_replay_none_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 fb_mmu_invalidate_replay_start_f(void) 152{ 153 return 0x8U; 154} 155static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void) 156{ 157 return 0x10U; 158} 159static inline u32 fb_mmu_invalidate_replay_cancel_targeted_f(void) 160{ 161 return 0x18U; 162} 163static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void) 164{ 165 return 0x20U; 166} 167static inline u32 fb_mmu_invalidate_replay_cancel_f(void) 168{ 169 return 0x20U; 170} 171static inline u32 fb_mmu_invalidate_sys_membar_s(void) 172{ 173 return 1U; 174} 175static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v) 176{ 177 return (v & 0x1U) << 6U; 178} 179static inline u32 fb_mmu_invalidate_sys_membar_m(void) 180{ 181 return 0x1U << 6U; 182} 183static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r) 184{ 185 return (r >> 6U) & 0x1U; 186} 187static inline u32 fb_mmu_invalidate_sys_membar_true_f(void) 188{ 189 return 0x40U; 190} 191static inline u32 fb_mmu_invalidate_ack_s(void) 192{ 193 return 2U; 194} 195static inline u32 fb_mmu_invalidate_ack_f(u32 v) 196{ 197 return (v & 0x3U) << 7U; 198} 199static inline u32 fb_mmu_invalidate_ack_m(void) 200{ 201 return 0x3U << 7U; 202} 203static inline u32 fb_mmu_invalidate_ack_v(u32 r) 204{ 205 return (r >> 7U) & 0x3U; 206} 207static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void) 212{ 213 return 0x100U; 214} 215static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void) 216{ 217 return 0x80U; 218} 219static inline u32 fb_mmu_invalidate_cancel_client_id_s(void) 220{ 221 return 6U; 222} 223static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v) 224{ 225 return (v & 0x3fU) << 9U; 226} 227static inline u32 fb_mmu_invalidate_cancel_client_id_m(void) 228{ 229 return 0x3fU << 9U; 230} 231static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r) 232{ 233 return (r >> 9U) & 0x3fU; 234} 235static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void) 236{ 237 return 5U; 238} 239static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v) 240{ 241 return (v & 0x1fU) << 15U; 242} 243static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void) 244{ 245 return 0x1fU << 15U; 246} 247static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r) 248{ 249 return (r >> 15U) & 0x1fU; 250} 251static inline u32 fb_mmu_invalidate_cancel_client_type_s(void) 252{ 253 return 1U; 254} 255static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v) 256{ 257 return (v & 0x1U) << 20U; 258} 259static inline u32 fb_mmu_invalidate_cancel_client_type_m(void) 260{ 261 return 0x1U << 20U; 262} 263static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r) 264{ 265 return (r >> 20U) & 0x1U; 266} 267static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void) 272{ 273 return 0x100000U; 274} 275static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void) 276{ 277 return 3U; 278} 279static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v) 280{ 281 return (v & 0x7U) << 24U; 282} 283static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void) 284{ 285 return 0x7U << 24U; 286} 287static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r) 288{ 289 return (r >> 24U) & 0x7U; 290} 291static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void) 292{ 293 return 0x0U; 294} 295static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void) 296{ 297 return 0x1000000U; 298} 299static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void) 300{ 301 return 0x2000000U; 302} 303static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void) 304{ 305 return 0x3000000U; 306} 307static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void) 308{ 309 return 0x4000000U; 310} 311static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void) 312{ 313 return 0x5000000U; 314} 315static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void) 316{ 317 return 0x6000000U; 318} 319static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void) 320{ 321 return 0x7000000U; 322} 323static inline u32 fb_mmu_invalidate_trigger_s(void) 324{ 325 return 1U; 326} 327static inline u32 fb_mmu_invalidate_trigger_f(u32 v) 328{ 329 return (v & 0x1U) << 31U; 330} 331static inline u32 fb_mmu_invalidate_trigger_m(void) 332{ 333 return 0x1U << 31U; 334} 335static inline u32 fb_mmu_invalidate_trigger_v(u32 r) 336{ 337 return (r >> 31U) & 0x1U; 338} 339static inline u32 fb_mmu_invalidate_trigger_true_f(void) 340{ 341 return 0x80000000U; 342} 343static inline u32 fb_mmu_debug_wr_r(void) 344{ 345 return 0x00100cc8U; 346} 347static inline u32 fb_mmu_debug_wr_aperture_s(void) 348{ 349 return 2U; 350} 351static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) 352{ 353 return (v & 0x3U) << 0U; 354} 355static inline u32 fb_mmu_debug_wr_aperture_m(void) 356{ 357 return 0x3U << 0U; 358} 359static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) 360{ 361 return (r >> 0U) & 0x3U; 362} 363static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) 364{ 365 return 0x0U; 366} 367static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) 368{ 369 return 0x2U; 370} 371static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) 372{ 373 return 0x3U; 374} 375static inline u32 fb_mmu_debug_wr_vol_false_f(void) 376{ 377 return 0x0U; 378} 379static inline u32 fb_mmu_debug_wr_vol_true_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 fb_mmu_debug_wr_vol_true_f(void) 384{ 385 return 0x4U; 386} 387static inline u32 fb_mmu_debug_wr_addr_f(u32 v) 388{ 389 return (v & 0xfffffffU) << 4U; 390} 391static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) 392{ 393 return 0x0000000cU; 394} 395static inline u32 fb_mmu_debug_rd_r(void) 396{ 397 return 0x00100cccU; 398} 399static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) 400{ 401 return 0x0U; 402} 403static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) 404{ 405 return 0x2U; 406} 407static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) 408{ 409 return 0x3U; 410} 411static inline u32 fb_mmu_debug_rd_vol_false_f(void) 412{ 413 return 0x0U; 414} 415static inline u32 fb_mmu_debug_rd_addr_f(u32 v) 416{ 417 return (v & 0xfffffffU) << 4U; 418} 419static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) 420{ 421 return 0x0000000cU; 422} 423static inline u32 fb_mmu_debug_ctrl_r(void) 424{ 425 return 0x00100cc4U; 426} 427static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) 428{ 429 return (r >> 16U) & 0x1U; 430} 431static inline u32 fb_mmu_debug_ctrl_debug_m(void) 432{ 433 return 0x1U << 16U; 434} 435static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) 436{ 437 return 0x00000001U; 438} 439static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) 440{ 441 return 0x00000000U; 442} 443static inline u32 fb_mmu_vpr_info_r(void) 444{ 445 return 0x00100cd0U; 446} 447static inline u32 fb_mmu_vpr_info_fetch_v(u32 r) 448{ 449 return (r >> 2U) & 0x1U; 450} 451static inline u32 fb_mmu_vpr_info_fetch_false_v(void) 452{ 453 return 0x00000000U; 454} 455static inline u32 fb_mmu_vpr_info_fetch_true_v(void) 456{ 457 return 0x00000001U; 458} 459static inline u32 fb_niso_flush_sysmem_addr_r(void) 460{ 461 return 0x00100c10U; 462} 463#endif
diff --git a/include/nvgpu/hw/gp10b/hw_fifo_gp10b.h b/include/nvgpu/hw/gp10b/hw_fifo_gp10b.h
deleted file mode 100644
index 7170162..0000000
--- a/include/nvgpu/hw/gp10b/hw_fifo_gp10b.h
+++ /dev/null
@@ -1,699 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fifo_gp10b_h_ 57#define _hw_fifo_gp10b_h_ 58 59static inline u32 fifo_bar1_base_r(void) 60{ 61 return 0x00002254U; 62} 63static inline u32 fifo_bar1_base_ptr_f(u32 v) 64{ 65 return (v & 0xfffffffU) << 0U; 66} 67static inline u32 fifo_bar1_base_ptr_align_shift_v(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 fifo_bar1_base_valid_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fifo_bar1_base_valid_true_f(void) 76{ 77 return 0x10000000U; 78} 79static inline u32 fifo_runlist_base_r(void) 80{ 81 return 0x00002270U; 82} 83static inline u32 fifo_runlist_base_ptr_f(u32 v) 84{ 85 return (v & 0xfffffffU) << 0U; 86} 87static inline u32 fifo_runlist_base_target_vid_mem_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) 92{ 93 return 0x20000000U; 94} 95static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) 96{ 97 return 0x30000000U; 98} 99static inline u32 fifo_runlist_r(void) 100{ 101 return 0x00002274U; 102} 103static inline u32 fifo_runlist_engine_f(u32 v) 104{ 105 return (v & 0xfU) << 20U; 106} 107static inline u32 fifo_eng_runlist_base_r(u32 i) 108{ 109 return 0x00002280U + i*8U; 110} 111static inline u32 fifo_eng_runlist_base__size_1_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 fifo_eng_runlist_r(u32 i) 116{ 117 return 0x00002284U + i*8U; 118} 119static inline u32 fifo_eng_runlist__size_1_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 fifo_eng_runlist_length_f(u32 v) 124{ 125 return (v & 0xffffU) << 0U; 126} 127static inline u32 fifo_eng_runlist_length_max_v(void) 128{ 129 return 0x0000ffffU; 130} 131static inline u32 fifo_eng_runlist_pending_true_f(void) 132{ 133 return 0x100000U; 134} 135static inline u32 fifo_pb_timeslice_r(u32 i) 136{ 137 return 0x00002350U + i*4U; 138} 139static inline u32 fifo_pb_timeslice_timeout_16_f(void) 140{ 141 return 0x10U; 142} 143static inline u32 fifo_pb_timeslice_timescale_0_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 fifo_pb_timeslice_enable_true_f(void) 148{ 149 return 0x10000000U; 150} 151static inline u32 fifo_pbdma_map_r(u32 i) 152{ 153 return 0x00002390U + i*4U; 154} 155static inline u32 fifo_intr_0_r(void) 156{ 157 return 0x00002100U; 158} 159static inline u32 fifo_intr_0_bind_error_pending_f(void) 160{ 161 return 0x1U; 162} 163static inline u32 fifo_intr_0_bind_error_reset_f(void) 164{ 165 return 0x1U; 166} 167static inline u32 fifo_intr_0_sched_error_pending_f(void) 168{ 169 return 0x100U; 170} 171static inline u32 fifo_intr_0_sched_error_reset_f(void) 172{ 173 return 0x100U; 174} 175static inline u32 fifo_intr_0_chsw_error_pending_f(void) 176{ 177 return 0x10000U; 178} 179static inline u32 fifo_intr_0_chsw_error_reset_f(void) 180{ 181 return 0x10000U; 182} 183static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void) 184{ 185 return 0x800000U; 186} 187static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void) 188{ 189 return 0x800000U; 190} 191static inline u32 fifo_intr_0_lb_error_pending_f(void) 192{ 193 return 0x1000000U; 194} 195static inline u32 fifo_intr_0_lb_error_reset_f(void) 196{ 197 return 0x1000000U; 198} 199static inline u32 fifo_intr_0_replayable_fault_error_pending_f(void) 200{ 201 return 0x2000000U; 202} 203static inline u32 fifo_intr_0_dropped_mmu_fault_pending_f(void) 204{ 205 return 0x8000000U; 206} 207static inline u32 fifo_intr_0_dropped_mmu_fault_reset_f(void) 208{ 209 return 0x8000000U; 210} 211static inline u32 fifo_intr_0_mmu_fault_pending_f(void) 212{ 213 return 0x10000000U; 214} 215static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) 216{ 217 return 0x20000000U; 218} 219static inline u32 fifo_intr_0_runlist_event_pending_f(void) 220{ 221 return 0x40000000U; 222} 223static inline u32 fifo_intr_0_channel_intr_pending_f(void) 224{ 225 return 0x80000000U; 226} 227static inline u32 fifo_intr_en_0_r(void) 228{ 229 return 0x00002140U; 230} 231static inline u32 fifo_intr_en_0_sched_error_f(u32 v) 232{ 233 return (v & 0x1U) << 8U; 234} 235static inline u32 fifo_intr_en_0_sched_error_m(void) 236{ 237 return 0x1U << 8U; 238} 239static inline u32 fifo_intr_en_0_mmu_fault_f(u32 v) 240{ 241 return (v & 0x1U) << 28U; 242} 243static inline u32 fifo_intr_en_0_mmu_fault_m(void) 244{ 245 return 0x1U << 28U; 246} 247static inline u32 fifo_intr_en_1_r(void) 248{ 249 return 0x00002528U; 250} 251static inline u32 fifo_intr_bind_error_r(void) 252{ 253 return 0x0000252cU; 254} 255static inline u32 fifo_intr_sched_error_r(void) 256{ 257 return 0x0000254cU; 258} 259static inline u32 fifo_intr_sched_error_code_f(u32 v) 260{ 261 return (v & 0xffU) << 0U; 262} 263static inline u32 fifo_intr_sched_error_code_ctxsw_timeout_v(void) 264{ 265 return 0x0000000aU; 266} 267static inline u32 fifo_intr_chsw_error_r(void) 268{ 269 return 0x0000256cU; 270} 271static inline u32 fifo_intr_mmu_fault_id_r(void) 272{ 273 return 0x0000259cU; 274} 275static inline u32 fifo_intr_mmu_fault_eng_id_graphics_v(void) 276{ 277 return 0x00000000U; 278} 279static inline u32 fifo_intr_mmu_fault_eng_id_graphics_f(void) 280{ 281 return 0x0U; 282} 283static inline u32 fifo_intr_mmu_fault_inst_r(u32 i) 284{ 285 return 0x00002800U + i*16U; 286} 287static inline u32 fifo_intr_mmu_fault_inst_ptr_v(u32 r) 288{ 289 return (r >> 0U) & 0xfffffffU; 290} 291static inline u32 fifo_intr_mmu_fault_inst_ptr_align_shift_v(void) 292{ 293 return 0x0000000cU; 294} 295static inline u32 fifo_intr_mmu_fault_lo_r(u32 i) 296{ 297 return 0x00002804U + i*16U; 298} 299static inline u32 fifo_intr_mmu_fault_hi_r(u32 i) 300{ 301 return 0x00002808U + i*16U; 302} 303static inline u32 fifo_intr_mmu_fault_info_r(u32 i) 304{ 305 return 0x0000280cU + i*16U; 306} 307static inline u32 fifo_intr_mmu_fault_info_type_v(u32 r) 308{ 309 return (r >> 0U) & 0x1fU; 310} 311static inline u32 fifo_intr_mmu_fault_info_access_type_v(u32 r) 312{ 313 return (r >> 16U) & 0x7U; 314} 315static inline u32 fifo_intr_mmu_fault_info_client_type_v(u32 r) 316{ 317 return (r >> 20U) & 0x1U; 318} 319static inline u32 fifo_intr_mmu_fault_info_client_type_gpc_v(void) 320{ 321 return 0x00000000U; 322} 323static inline u32 fifo_intr_mmu_fault_info_client_type_hub_v(void) 324{ 325 return 0x00000001U; 326} 327static inline u32 fifo_intr_mmu_fault_info_client_v(u32 r) 328{ 329 return (r >> 8U) & 0x7fU; 330} 331static inline u32 fifo_intr_pbdma_id_r(void) 332{ 333 return 0x000025a0U; 334} 335static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) 336{ 337 return (v & 0x1U) << (0U + i*1U); 338} 339static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) 340{ 341 return (r >> (0U + i*1U)) & 0x1U; 342} 343static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) 344{ 345 return 0x00000001U; 346} 347static inline u32 fifo_intr_runlist_r(void) 348{ 349 return 0x00002a00U; 350} 351static inline u32 fifo_fb_timeout_r(void) 352{ 353 return 0x00002a04U; 354} 355static inline u32 fifo_fb_timeout_period_m(void) 356{ 357 return 0x3fffffffU << 0U; 358} 359static inline u32 fifo_fb_timeout_period_max_f(void) 360{ 361 return 0x3fffffffU; 362} 363static inline u32 fifo_error_sched_disable_r(void) 364{ 365 return 0x0000262cU; 366} 367static inline u32 fifo_sched_disable_r(void) 368{ 369 return 0x00002630U; 370} 371static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) 372{ 373 return (v & 0x1U) << (0U + i*1U); 374} 375static inline u32 fifo_sched_disable_runlist_m(u32 i) 376{ 377 return 0x1U << (0U + i*1U); 378} 379static inline u32 fifo_sched_disable_true_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 fifo_preempt_r(void) 384{ 385 return 0x00002634U; 386} 387static inline u32 fifo_preempt_pending_true_f(void) 388{ 389 return 0x100000U; 390} 391static inline u32 fifo_preempt_type_channel_f(void) 392{ 393 return 0x0U; 394} 395static inline u32 fifo_preempt_type_tsg_f(void) 396{ 397 return 0x1000000U; 398} 399static inline u32 fifo_preempt_chid_f(u32 v) 400{ 401 return (v & 0xfffU) << 0U; 402} 403static inline u32 fifo_preempt_id_f(u32 v) 404{ 405 return (v & 0xfffU) << 0U; 406} 407static inline u32 fifo_trigger_mmu_fault_r(u32 i) 408{ 409 return 0x00002a30U + i*4U; 410} 411static inline u32 fifo_trigger_mmu_fault_id_f(u32 v) 412{ 413 return (v & 0x1fU) << 0U; 414} 415static inline u32 fifo_trigger_mmu_fault_enable_f(u32 v) 416{ 417 return (v & 0x1U) << 8U; 418} 419static inline u32 fifo_engine_status_r(u32 i) 420{ 421 return 0x00002640U + i*8U; 422} 423static inline u32 fifo_engine_status__size_1_v(void) 424{ 425 return 0x00000002U; 426} 427static inline u32 fifo_engine_status_id_v(u32 r) 428{ 429 return (r >> 0U) & 0xfffU; 430} 431static inline u32 fifo_engine_status_id_type_v(u32 r) 432{ 433 return (r >> 12U) & 0x1U; 434} 435static inline u32 fifo_engine_status_id_type_chid_v(void) 436{ 437 return 0x00000000U; 438} 439static inline u32 fifo_engine_status_id_type_tsgid_v(void) 440{ 441 return 0x00000001U; 442} 443static inline u32 fifo_engine_status_ctx_status_v(u32 r) 444{ 445 return (r >> 13U) & 0x7U; 446} 447static inline u32 fifo_engine_status_ctx_status_invalid_v(void) 448{ 449 return 0x00000000U; 450} 451static inline u32 fifo_engine_status_ctx_status_valid_v(void) 452{ 453 return 0x00000001U; 454} 455static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) 456{ 457 return 0x00000005U; 458} 459static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) 460{ 461 return 0x00000006U; 462} 463static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) 464{ 465 return 0x00000007U; 466} 467static inline u32 fifo_engine_status_next_id_v(u32 r) 468{ 469 return (r >> 16U) & 0xfffU; 470} 471static inline u32 fifo_engine_status_next_id_type_v(u32 r) 472{ 473 return (r >> 28U) & 0x1U; 474} 475static inline u32 fifo_engine_status_next_id_type_chid_v(void) 476{ 477 return 0x00000000U; 478} 479static inline u32 fifo_engine_status_faulted_v(u32 r) 480{ 481 return (r >> 30U) & 0x1U; 482} 483static inline u32 fifo_engine_status_faulted_true_v(void) 484{ 485 return 0x00000001U; 486} 487static inline u32 fifo_engine_status_engine_v(u32 r) 488{ 489 return (r >> 31U) & 0x1U; 490} 491static inline u32 fifo_engine_status_engine_idle_v(void) 492{ 493 return 0x00000000U; 494} 495static inline u32 fifo_engine_status_engine_busy_v(void) 496{ 497 return 0x00000001U; 498} 499static inline u32 fifo_engine_status_ctxsw_v(u32 r) 500{ 501 return (r >> 15U) & 0x1U; 502} 503static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) 504{ 505 return 0x00000001U; 506} 507static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) 508{ 509 return 0x8000U; 510} 511static inline u32 fifo_pbdma_status_r(u32 i) 512{ 513 return 0x00003080U + i*4U; 514} 515static inline u32 fifo_pbdma_status__size_1_v(void) 516{ 517 return 0x00000001U; 518} 519static inline u32 fifo_pbdma_status_id_v(u32 r) 520{ 521 return (r >> 0U) & 0xfffU; 522} 523static inline u32 fifo_pbdma_status_id_type_v(u32 r) 524{ 525 return (r >> 12U) & 0x1U; 526} 527static inline u32 fifo_pbdma_status_id_type_chid_v(void) 528{ 529 return 0x00000000U; 530} 531static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) 532{ 533 return 0x00000001U; 534} 535static inline u32 fifo_pbdma_status_chan_status_v(u32 r) 536{ 537 return (r >> 13U) & 0x7U; 538} 539static inline u32 fifo_pbdma_status_chan_status_valid_v(void) 540{ 541 return 0x00000001U; 542} 543static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) 544{ 545 return 0x00000005U; 546} 547static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) 548{ 549 return 0x00000006U; 550} 551static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) 552{ 553 return 0x00000007U; 554} 555static inline u32 fifo_pbdma_status_next_id_v(u32 r) 556{ 557 return (r >> 16U) & 0xfffU; 558} 559static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) 560{ 561 return (r >> 28U) & 0x1U; 562} 563static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) 564{ 565 return 0x00000000U; 566} 567static inline u32 fifo_pbdma_status_chsw_v(u32 r) 568{ 569 return (r >> 15U) & 0x1U; 570} 571static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) 572{ 573 return 0x00000001U; 574} 575static inline u32 fifo_replay_fault_buffer_lo_r(void) 576{ 577 return 0x00002a70U; 578} 579static inline u32 fifo_replay_fault_buffer_lo_enable_v(u32 r) 580{ 581 return (r >> 0U) & 0x1U; 582} 583static inline u32 fifo_replay_fault_buffer_lo_enable_true_v(void) 584{ 585 return 0x00000001U; 586} 587static inline u32 fifo_replay_fault_buffer_lo_enable_false_v(void) 588{ 589 return 0x00000000U; 590} 591static inline u32 fifo_replay_fault_buffer_lo_base_f(u32 v) 592{ 593 return (v & 0xfffffU) << 12U; 594} 595static inline u32 fifo_replay_fault_buffer_lo_base_reset_v(void) 596{ 597 return 0x00000000U; 598} 599static inline u32 fifo_replay_fault_buffer_hi_r(void) 600{ 601 return 0x00002a74U; 602} 603static inline u32 fifo_replay_fault_buffer_hi_base_f(u32 v) 604{ 605 return (v & 0xffU) << 0U; 606} 607static inline u32 fifo_replay_fault_buffer_hi_base_reset_v(void) 608{ 609 return 0x00000000U; 610} 611static inline u32 fifo_replay_fault_buffer_size_r(void) 612{ 613 return 0x00002a78U; 614} 615static inline u32 fifo_replay_fault_buffer_size_hw_f(u32 v) 616{ 617 return (v & 0x1ffU) << 0U; 618} 619static inline u32 fifo_replay_fault_buffer_size_hw_entries_v(void) 620{ 621 return 0x000000c0U; 622} 623static inline u32 fifo_replay_fault_buffer_get_r(void) 624{ 625 return 0x00002a7cU; 626} 627static inline u32 fifo_replay_fault_buffer_get_offset_hw_f(u32 v) 628{ 629 return (v & 0x1ffU) << 0U; 630} 631static inline u32 fifo_replay_fault_buffer_get_offset_hw_init_v(void) 632{ 633 return 0x00000000U; 634} 635static inline u32 fifo_replay_fault_buffer_put_r(void) 636{ 637 return 0x00002a80U; 638} 639static inline u32 fifo_replay_fault_buffer_put_offset_hw_f(u32 v) 640{ 641 return (v & 0x1ffU) << 0U; 642} 643static inline u32 fifo_replay_fault_buffer_put_offset_hw_init_v(void) 644{ 645 return 0x00000000U; 646} 647static inline u32 fifo_replay_fault_buffer_info_r(void) 648{ 649 return 0x00002a84U; 650} 651static inline u32 fifo_replay_fault_buffer_info_overflow_f(u32 v) 652{ 653 return (v & 0x1U) << 0U; 654} 655static inline u32 fifo_replay_fault_buffer_info_overflow_false_v(void) 656{ 657 return 0x00000000U; 658} 659static inline u32 fifo_replay_fault_buffer_info_overflow_true_v(void) 660{ 661 return 0x00000001U; 662} 663static inline u32 fifo_replay_fault_buffer_info_overflow_clear_v(void) 664{ 665 return 0x00000001U; 666} 667static inline u32 fifo_replay_fault_buffer_info_write_nack_f(u32 v) 668{ 669 return (v & 0x1U) << 24U; 670} 671static inline u32 fifo_replay_fault_buffer_info_write_nack_false_v(void) 672{ 673 return 0x00000000U; 674} 675static inline u32 fifo_replay_fault_buffer_info_write_nack_true_v(void) 676{ 677 return 0x00000001U; 678} 679static inline u32 fifo_replay_fault_buffer_info_write_nack_clear_v(void) 680{ 681 return 0x00000001U; 682} 683static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_f(u32 v) 684{ 685 return (v & 0x1U) << 28U; 686} 687static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_false_v(void) 688{ 689 return 0x00000000U; 690} 691static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_true_v(void) 692{ 693 return 0x00000001U; 694} 695static inline u32 fifo_replay_fault_buffer_info_fault_while_buffer_disabled_clear_v(void) 696{ 697 return 0x00000001U; 698} 699#endif
diff --git a/include/nvgpu/hw/gp10b/hw_flush_gp10b.h b/include/nvgpu/hw/gp10b/hw_flush_gp10b.h
deleted file mode 100644
index ae6eabf..0000000
--- a/include/nvgpu/hw/gp10b/hw_flush_gp10b.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_flush_gp10b_h_ 57#define _hw_flush_gp10b_h_ 58 59static inline u32 flush_l2_system_invalidate_r(void) 60{ 61 return 0x00070004U; 62} 63static inline u32 flush_l2_system_invalidate_pending_v(u32 r) 64{ 65 return (r >> 0U) & 0x1U; 66} 67static inline u32 flush_l2_system_invalidate_pending_busy_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 flush_l2_system_invalidate_pending_busy_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) 76{ 77 return (r >> 1U) & 0x1U; 78} 79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 flush_l2_flush_dirty_r(void) 84{ 85 return 0x00070010U; 86} 87static inline u32 flush_l2_flush_dirty_pending_v(u32 r) 88{ 89 return (r >> 0U) & 0x1U; 90} 91static inline u32 flush_l2_flush_dirty_pending_empty_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 flush_l2_flush_dirty_pending_empty_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 flush_l2_flush_dirty_pending_busy_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 flush_l2_flush_dirty_pending_busy_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 flush_l2_clean_comptags_r(void) 124{ 125 return 0x0007000cU; 126} 127static inline u32 flush_l2_clean_comptags_pending_v(u32 r) 128{ 129 return (r >> 0U) & 0x1U; 130} 131static inline u32 flush_l2_clean_comptags_pending_empty_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 flush_l2_clean_comptags_pending_empty_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 flush_l2_clean_comptags_pending_busy_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 flush_l2_clean_comptags_pending_busy_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 flush_fb_flush_r(void) 164{ 165 return 0x00070000U; 166} 167static inline u32 flush_fb_flush_pending_v(u32 r) 168{ 169 return (r >> 0U) & 0x1U; 170} 171static inline u32 flush_fb_flush_pending_busy_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 flush_fb_flush_pending_busy_f(void) 176{ 177 return 0x1U; 178} 179static inline u32 flush_fb_flush_outstanding_v(u32 r) 180{ 181 return (r >> 1U) & 0x1U; 182} 183static inline u32 flush_fb_flush_outstanding_true_v(void) 184{ 185 return 0x00000001U; 186} 187#endif
diff --git a/include/nvgpu/hw/gp10b/hw_fuse_gp10b.h b/include/nvgpu/hw/gp10b/hw_fuse_gp10b.h
deleted file mode 100644
index 521dcfe..0000000
--- a/include/nvgpu/hw/gp10b/hw_fuse_gp10b.h
+++ /dev/null
@@ -1,155 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fuse_gp10b_h_ 57#define _hw_fuse_gp10b_h_ 58 59static inline u32 fuse_status_opt_gpc_r(void) 60{ 61 return 0x00021c1cU; 62} 63static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) 64{ 65 return 0x00021c38U + i*4U; 66} 67static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) 68{ 69 return 0x00021838U + i*4U; 70} 71static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) 72{ 73 return 0x00021944U; 74} 75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) 76{ 77 return (v & 0xffU) << 0U; 78} 79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) 80{ 81 return 0xffU << 0U; 82} 83static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) 84{ 85 return (r >> 0U) & 0xffU; 86} 87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) 88{ 89 return 0x00021948U; 90} 91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) 92{ 93 return (v & 0x1U) << 0U; 94} 95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) 96{ 97 return 0x1U << 0U; 98} 99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 fuse_status_opt_fbio_r(void) 112{ 113 return 0x00021c14U; 114} 115static inline u32 fuse_status_opt_fbio_data_f(u32 v) 116{ 117 return (v & 0xffffU) << 0U; 118} 119static inline u32 fuse_status_opt_fbio_data_m(void) 120{ 121 return 0xffffU << 0U; 122} 123static inline u32 fuse_status_opt_fbio_data_v(u32 r) 124{ 125 return (r >> 0U) & 0xffffU; 126} 127static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) 128{ 129 return 0x00021d70U + i*4U; 130} 131static inline u32 fuse_status_opt_fbp_r(void) 132{ 133 return 0x00021d38U; 134} 135static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) 136{ 137 return (r >> (0U + i*1U)) & 0x1U; 138} 139static inline u32 fuse_opt_ecc_en_r(void) 140{ 141 return 0x00021228U; 142} 143static inline u32 fuse_opt_feature_fuses_override_disable_r(void) 144{ 145 return 0x000213f0U; 146} 147static inline u32 fuse_opt_sec_debug_en_r(void) 148{ 149 return 0x00021218U; 150} 151static inline u32 fuse_opt_priv_sec_en_r(void) 152{ 153 return 0x00021434U; 154} 155#endif
diff --git a/include/nvgpu/hw/gp10b/hw_gmmu_gp10b.h b/include/nvgpu/hw/gp10b/hw_gmmu_gp10b.h
deleted file mode 100644
index 6aeb435..0000000
--- a/include/nvgpu/hw/gp10b/hw_gmmu_gp10b.h
+++ /dev/null
@@ -1,331 +0,0 @@ 1/* 2 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gmmu_gp10b_h_ 57#define _hw_gmmu_gp10b_h_ 58 59static inline u32 gmmu_new_pde_is_pte_w(void) 60{ 61 return 0U; 62} 63static inline u32 gmmu_new_pde_is_pte_false_f(void) 64{ 65 return 0x0U; 66} 67static inline u32 gmmu_new_pde_aperture_w(void) 68{ 69 return 0U; 70} 71static inline u32 gmmu_new_pde_aperture_invalid_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 gmmu_new_pde_aperture_video_memory_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void) 84{ 85 return 0x6U; 86} 87static inline u32 gmmu_new_pde_address_sys_f(u32 v) 88{ 89 return (v & 0xffffffU) << 8U; 90} 91static inline u32 gmmu_new_pde_address_sys_w(void) 92{ 93 return 0U; 94} 95static inline u32 gmmu_new_pde_vol_w(void) 96{ 97 return 0U; 98} 99static inline u32 gmmu_new_pde_vol_true_f(void) 100{ 101 return 0x8U; 102} 103static inline u32 gmmu_new_pde_vol_false_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 gmmu_new_pde_address_shift_v(void) 108{ 109 return 0x0000000cU; 110} 111static inline u32 gmmu_new_pde__size_v(void) 112{ 113 return 0x00000008U; 114} 115static inline u32 gmmu_new_dual_pde_is_pte_w(void) 116{ 117 return 0U; 118} 119static inline u32 gmmu_new_dual_pde_is_pte_false_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 gmmu_new_dual_pde_aperture_big_w(void) 124{ 125 return 0U; 126} 127static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void) 128{ 129 return 0x0U; 130} 131static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void) 132{ 133 return 0x2U; 134} 135static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void) 136{ 137 return 0x4U; 138} 139static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void) 140{ 141 return 0x6U; 142} 143static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v) 144{ 145 return (v & 0xfffffffU) << 4U; 146} 147static inline u32 gmmu_new_dual_pde_address_big_sys_w(void) 148{ 149 return 0U; 150} 151static inline u32 gmmu_new_dual_pde_aperture_small_w(void) 152{ 153 return 2U; 154} 155static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void) 164{ 165 return 0x4U; 166} 167static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void) 168{ 169 return 0x6U; 170} 171static inline u32 gmmu_new_dual_pde_vol_small_w(void) 172{ 173 return 2U; 174} 175static inline u32 gmmu_new_dual_pde_vol_small_true_f(void) 176{ 177 return 0x8U; 178} 179static inline u32 gmmu_new_dual_pde_vol_small_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 gmmu_new_dual_pde_vol_big_w(void) 184{ 185 return 0U; 186} 187static inline u32 gmmu_new_dual_pde_vol_big_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 gmmu_new_dual_pde_vol_big_false_f(void) 192{ 193 return 0x0U; 194} 195static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v) 196{ 197 return (v & 0xffffffU) << 8U; 198} 199static inline u32 gmmu_new_dual_pde_address_small_sys_w(void) 200{ 201 return 2U; 202} 203static inline u32 gmmu_new_dual_pde_address_shift_v(void) 204{ 205 return 0x0000000cU; 206} 207static inline u32 gmmu_new_dual_pde_address_big_shift_v(void) 208{ 209 return 0x00000008U; 210} 211static inline u32 gmmu_new_dual_pde__size_v(void) 212{ 213 return 0x00000010U; 214} 215static inline u32 gmmu_new_pte__size_v(void) 216{ 217 return 0x00000008U; 218} 219static inline u32 gmmu_new_pte_valid_w(void) 220{ 221 return 0U; 222} 223static inline u32 gmmu_new_pte_valid_true_f(void) 224{ 225 return 0x1U; 226} 227static inline u32 gmmu_new_pte_valid_false_f(void) 228{ 229 return 0x0U; 230} 231static inline u32 gmmu_new_pte_privilege_w(void) 232{ 233 return 0U; 234} 235static inline u32 gmmu_new_pte_privilege_true_f(void) 236{ 237 return 0x20U; 238} 239static inline u32 gmmu_new_pte_privilege_false_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gmmu_new_pte_address_sys_f(u32 v) 244{ 245 return (v & 0xffffffU) << 8U; 246} 247static inline u32 gmmu_new_pte_address_sys_w(void) 248{ 249 return 0U; 250} 251static inline u32 gmmu_new_pte_address_vid_f(u32 v) 252{ 253 return (v & 0xffffffU) << 8U; 254} 255static inline u32 gmmu_new_pte_address_vid_w(void) 256{ 257 return 0U; 258} 259static inline u32 gmmu_new_pte_vol_w(void) 260{ 261 return 0U; 262} 263static inline u32 gmmu_new_pte_vol_true_f(void) 264{ 265 return 0x8U; 266} 267static inline u32 gmmu_new_pte_vol_false_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 gmmu_new_pte_aperture_w(void) 272{ 273 return 0U; 274} 275static inline u32 gmmu_new_pte_aperture_video_memory_f(void) 276{ 277 return 0x0U; 278} 279static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void) 280{ 281 return 0x4U; 282} 283static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void) 284{ 285 return 0x6U; 286} 287static inline u32 gmmu_new_pte_read_only_w(void) 288{ 289 return 0U; 290} 291static inline u32 gmmu_new_pte_read_only_true_f(void) 292{ 293 return 0x40U; 294} 295static inline u32 gmmu_new_pte_comptagline_f(u32 v) 296{ 297 return (v & 0x3ffffU) << 4U; 298} 299static inline u32 gmmu_new_pte_comptagline_w(void) 300{ 301 return 1U; 302} 303static inline u32 gmmu_new_pte_kind_f(u32 v) 304{ 305 return (v & 0xffU) << 24U; 306} 307static inline u32 gmmu_new_pte_kind_w(void) 308{ 309 return 1U; 310} 311static inline u32 gmmu_new_pte_address_shift_v(void) 312{ 313 return 0x0000000cU; 314} 315static inline u32 gmmu_pte_kind_f(u32 v) 316{ 317 return (v & 0xffU) << 4U; 318} 319static inline u32 gmmu_pte_kind_w(void) 320{ 321 return 1U; 322} 323static inline u32 gmmu_pte_kind_invalid_v(void) 324{ 325 return 0x000000ffU; 326} 327static inline u32 gmmu_pte_kind_pitch_v(void) 328{ 329 return 0x00000000U; 330} 331#endif
diff --git a/include/nvgpu/hw/gp10b/hw_gr_gp10b.h b/include/nvgpu/hw/gp10b/hw_gr_gp10b.h
deleted file mode 100644
index 89c6bba..0000000
--- a/include/nvgpu/hw/gp10b/hw_gr_gp10b.h
+++ /dev/null
@@ -1,4419 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gr_gp10b_h_ 57#define _hw_gr_gp10b_h_ 58 59static inline u32 gr_intr_r(void) 60{ 61 return 0x00400100U; 62} 63static inline u32 gr_intr_notify_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 gr_intr_notify_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gr_intr_semaphore_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gr_intr_semaphore_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gr_intr_illegal_method_pending_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 gr_intr_illegal_method_reset_f(void) 84{ 85 return 0x10U; 86} 87static inline u32 gr_intr_illegal_notify_pending_f(void) 88{ 89 return 0x40U; 90} 91static inline u32 gr_intr_illegal_notify_reset_f(void) 92{ 93 return 0x40U; 94} 95static inline u32 gr_intr_firmware_method_f(u32 v) 96{ 97 return (v & 0x1U) << 8U; 98} 99static inline u32 gr_intr_firmware_method_pending_f(void) 100{ 101 return 0x100U; 102} 103static inline u32 gr_intr_firmware_method_reset_f(void) 104{ 105 return 0x100U; 106} 107static inline u32 gr_intr_illegal_class_pending_f(void) 108{ 109 return 0x20U; 110} 111static inline u32 gr_intr_illegal_class_reset_f(void) 112{ 113 return 0x20U; 114} 115static inline u32 gr_intr_fecs_error_pending_f(void) 116{ 117 return 0x80000U; 118} 119static inline u32 gr_intr_fecs_error_reset_f(void) 120{ 121 return 0x80000U; 122} 123static inline u32 gr_intr_class_error_pending_f(void) 124{ 125 return 0x100000U; 126} 127static inline u32 gr_intr_class_error_reset_f(void) 128{ 129 return 0x100000U; 130} 131static inline u32 gr_intr_exception_pending_f(void) 132{ 133 return 0x200000U; 134} 135static inline u32 gr_intr_exception_reset_f(void) 136{ 137 return 0x200000U; 138} 139static inline u32 gr_fecs_intr_r(void) 140{ 141 return 0x00400144U; 142} 143static inline u32 gr_class_error_r(void) 144{ 145 return 0x00400110U; 146} 147static inline u32 gr_class_error_code_v(u32 r) 148{ 149 return (r >> 0U) & 0xffffU; 150} 151static inline u32 gr_intr_nonstall_r(void) 152{ 153 return 0x00400120U; 154} 155static inline u32 gr_intr_nonstall_trap_pending_f(void) 156{ 157 return 0x2U; 158} 159static inline u32 gr_intr_en_r(void) 160{ 161 return 0x0040013cU; 162} 163static inline u32 gr_exception_r(void) 164{ 165 return 0x00400108U; 166} 167static inline u32 gr_exception_fe_m(void) 168{ 169 return 0x1U << 0U; 170} 171static inline u32 gr_exception_gpc_m(void) 172{ 173 return 0x1U << 24U; 174} 175static inline u32 gr_exception_memfmt_m(void) 176{ 177 return 0x1U << 1U; 178} 179static inline u32 gr_exception_ds_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 gr_exception_sked_m(void) 184{ 185 return 0x1U << 8U; 186} 187static inline u32 gr_exception_pd_m(void) 188{ 189 return 0x1U << 2U; 190} 191static inline u32 gr_exception_scc_m(void) 192{ 193 return 0x1U << 3U; 194} 195static inline u32 gr_exception_ssync_m(void) 196{ 197 return 0x1U << 5U; 198} 199static inline u32 gr_exception_mme_m(void) 200{ 201 return 0x1U << 7U; 202} 203static inline u32 gr_exception1_r(void) 204{ 205 return 0x00400118U; 206} 207static inline u32 gr_exception1_gpc_0_pending_f(void) 208{ 209 return 0x1U; 210} 211static inline u32 gr_exception2_r(void) 212{ 213 return 0x0040011cU; 214} 215static inline u32 gr_exception_en_r(void) 216{ 217 return 0x00400138U; 218} 219static inline u32 gr_exception_en_fe_m(void) 220{ 221 return 0x1U << 0U; 222} 223static inline u32 gr_exception1_en_r(void) 224{ 225 return 0x00400130U; 226} 227static inline u32 gr_exception2_en_r(void) 228{ 229 return 0x00400134U; 230} 231static inline u32 gr_gpfifo_ctl_r(void) 232{ 233 return 0x00400500U; 234} 235static inline u32 gr_gpfifo_ctl_access_f(u32 v) 236{ 237 return (v & 0x1U) << 0U; 238} 239static inline u32 gr_gpfifo_ctl_access_disabled_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gr_gpfifo_ctl_access_enabled_f(void) 244{ 245 return 0x1U; 246} 247static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) 248{ 249 return (v & 0x1U) << 16U; 250} 251static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) 252{ 253 return 0x00000001U; 254} 255static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) 256{ 257 return 0x10000U; 258} 259static inline u32 gr_gpfifo_status_r(void) 260{ 261 return 0x00400504U; 262} 263static inline u32 gr_trapped_addr_r(void) 264{ 265 return 0x00400704U; 266} 267static inline u32 gr_trapped_addr_mthd_v(u32 r) 268{ 269 return (r >> 2U) & 0xfffU; 270} 271static inline u32 gr_trapped_addr_subch_v(u32 r) 272{ 273 return (r >> 16U) & 0x7U; 274} 275static inline u32 gr_trapped_addr_mme_generated_v(u32 r) 276{ 277 return (r >> 20U) & 0x1U; 278} 279static inline u32 gr_trapped_addr_datahigh_v(u32 r) 280{ 281 return (r >> 24U) & 0x1U; 282} 283static inline u32 gr_trapped_addr_priv_v(u32 r) 284{ 285 return (r >> 28U) & 0x1U; 286} 287static inline u32 gr_trapped_addr_status_v(u32 r) 288{ 289 return (r >> 31U) & 0x1U; 290} 291static inline u32 gr_trapped_data_lo_r(void) 292{ 293 return 0x00400708U; 294} 295static inline u32 gr_trapped_data_hi_r(void) 296{ 297 return 0x0040070cU; 298} 299static inline u32 gr_trapped_data_mme_r(void) 300{ 301 return 0x00400710U; 302} 303static inline u32 gr_trapped_data_mme_pc_v(u32 r) 304{ 305 return (r >> 0U) & 0xfffU; 306} 307static inline u32 gr_status_r(void) 308{ 309 return 0x00400700U; 310} 311static inline u32 gr_status_fe_method_upper_v(u32 r) 312{ 313 return (r >> 1U) & 0x1U; 314} 315static inline u32 gr_status_fe_method_lower_v(u32 r) 316{ 317 return (r >> 2U) & 0x1U; 318} 319static inline u32 gr_status_fe_method_lower_idle_v(void) 320{ 321 return 0x00000000U; 322} 323static inline u32 gr_status_fe_gi_v(u32 r) 324{ 325 return (r >> 21U) & 0x1U; 326} 327static inline u32 gr_status_mask_r(void) 328{ 329 return 0x00400610U; 330} 331static inline u32 gr_status_1_r(void) 332{ 333 return 0x00400604U; 334} 335static inline u32 gr_status_2_r(void) 336{ 337 return 0x00400608U; 338} 339static inline u32 gr_engine_status_r(void) 340{ 341 return 0x0040060cU; 342} 343static inline u32 gr_engine_status_value_busy_f(void) 344{ 345 return 0x1U; 346} 347static inline u32 gr_pri_be0_becs_be_exception_r(void) 348{ 349 return 0x00410204U; 350} 351static inline u32 gr_pri_be0_becs_be_exception_en_r(void) 352{ 353 return 0x00410208U; 354} 355static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) 356{ 357 return 0x00502c90U; 358} 359static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) 360{ 361 return 0x00502c94U; 362} 363static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) 364{ 365 return 0x00504508U; 366} 367static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 368{ 369 return 0x0050450cU; 370} 371static inline u32 gr_activity_0_r(void) 372{ 373 return 0x00400380U; 374} 375static inline u32 gr_activity_1_r(void) 376{ 377 return 0x00400384U; 378} 379static inline u32 gr_activity_2_r(void) 380{ 381 return 0x00400388U; 382} 383static inline u32 gr_activity_4_r(void) 384{ 385 return 0x00400390U; 386} 387static inline u32 gr_activity_4_gpc0_s(void) 388{ 389 return 3U; 390} 391static inline u32 gr_activity_4_gpc0_f(u32 v) 392{ 393 return (v & 0x7U) << 0U; 394} 395static inline u32 gr_activity_4_gpc0_m(void) 396{ 397 return 0x7U << 0U; 398} 399static inline u32 gr_activity_4_gpc0_v(u32 r) 400{ 401 return (r >> 0U) & 0x7U; 402} 403static inline u32 gr_activity_4_gpc0_empty_v(void) 404{ 405 return 0x00000000U; 406} 407static inline u32 gr_activity_4_gpc0_preempted_v(void) 408{ 409 return 0x00000004U; 410} 411static inline u32 gr_pri_gpc0_gcc_dbg_r(void) 412{ 413 return 0x00501000U; 414} 415static inline u32 gr_pri_gpcs_gcc_dbg_r(void) 416{ 417 return 0x00419000U; 418} 419static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) 420{ 421 return 0x1U << 1U; 422} 423static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) 424{ 425 return 0x005046a4U; 426} 427static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) 428{ 429 return 0x00419ea4U; 430} 431static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) 432{ 433 return 0x1U << 0U; 434} 435static inline u32 gr_pri_sked_activity_r(void) 436{ 437 return 0x00407054U; 438} 439static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) 440{ 441 return 0x00502c80U; 442} 443static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) 444{ 445 return 0x00502c84U; 446} 447static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) 448{ 449 return 0x00502c88U; 450} 451static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) 452{ 453 return 0x00502c8cU; 454} 455static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) 456{ 457 return 0x00504500U; 458} 459static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) 460{ 461 return 0x00504d00U; 462} 463static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) 464{ 465 return 0x00501d00U; 466} 467static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) 468{ 469 return 0x0041ac80U; 470} 471static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) 472{ 473 return 0x0041ac84U; 474} 475static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) 476{ 477 return 0x0041ac88U; 478} 479static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) 480{ 481 return 0x0041ac8cU; 482} 483static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) 484{ 485 return 0x0041c500U; 486} 487static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) 488{ 489 return 0x0041cd00U; 490} 491static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) 492{ 493 return 0x00419d00U; 494} 495static inline u32 gr_pri_be0_becs_be_activity0_r(void) 496{ 497 return 0x00410200U; 498} 499static inline u32 gr_pri_be1_becs_be_activity0_r(void) 500{ 501 return 0x00410600U; 502} 503static inline u32 gr_pri_bes_becs_be_activity0_r(void) 504{ 505 return 0x00408a00U; 506} 507static inline u32 gr_pri_ds_mpipe_status_r(void) 508{ 509 return 0x00405858U; 510} 511static inline u32 gr_pri_fe_go_idle_info_r(void) 512{ 513 return 0x00404194U; 514} 515static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) 516{ 517 return 0x00504238U; 518} 519static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void) 520{ 521 return 0x005046b8U; 522} 523static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp0_b(void) 524{ 525 return 4U; 526} 527static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp0_pending_f(void) 528{ 529 return 0x10U; 530} 531static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp1_pending_f(void) 532{ 533 return 0x20U; 534} 535static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp2_pending_f(void) 536{ 537 return 0x40U; 538} 539static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp3_pending_f(void) 540{ 541 return 0x80U; 542} 543static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp0_b(void) 544{ 545 return 8U; 546} 547static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp0_pending_f(void) 548{ 549 return 0x100U; 550} 551static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp1_pending_f(void) 552{ 553 return 0x200U; 554} 555static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp2_pending_f(void) 556{ 557 return 0x400U; 558} 559static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp3_pending_f(void) 560{ 561 return 0x800U; 562} 563static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_r(void) 564{ 565 return 0x005044a0U; 566} 567static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm0_pending_f(void) 568{ 569 return 0x1U; 570} 571static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm1_pending_f(void) 572{ 573 return 0x2U; 574} 575static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm0_pending_f(void) 576{ 577 return 0x10U; 578} 579static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f(void) 580{ 581 return 0x20U; 582} 583static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm0_pending_f(void) 584{ 585 return 0x100U; 586} 587static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f(void) 588{ 589 return 0x200U; 590} 591static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_single_err_count_r(void) 592{ 593 return 0x005046bcU; 594} 595static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r(void) 596{ 597 return 0x005046c0U; 598} 599static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_r(void) 600{ 601 return 0x005044a4U; 602} 603static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_corrected_m(void) 604{ 605 return 0xffU << 0U; 606} 607static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_corrected_v(u32 r) 608{ 609 return (r >> 0U) & 0xffU; 610} 611static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_detected_m(void) 612{ 613 return 0xffU << 8U; 614} 615static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_single_detected_v(u32 r) 616{ 617 return (r >> 8U) & 0xffU; 618} 619static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_double_detected_m(void) 620{ 621 return 0xffU << 16U; 622} 623static inline u32 gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_double_detected_v(u32 r) 624{ 625 return (r >> 16U) & 0xffU; 626} 627static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void) 628{ 629 return 0x005042c4U; 630} 631static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void) 632{ 633 return 0x0U; 634} 635static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void) 636{ 637 return 0x1U; 638} 639static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void) 640{ 641 return 0x2U; 642} 643static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_total_r(void) 644{ 645 return 0x00504218U; 646} 647static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_total_sec_m(void) 648{ 649 return 0xffffU << 0U; 650} 651static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_total_sec_v(u32 r) 652{ 653 return (r >> 0U) & 0xffffU; 654} 655static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_total_ded_m(void) 656{ 657 return 0xffffU << 16U; 658} 659static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_total_ded_v(u32 r) 660{ 661 return (r >> 16U) & 0xffffU; 662} 663static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_unique_r(void) 664{ 665 return 0x005042ecU; 666} 667static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_unique_sec_m(void) 668{ 669 return 0xffffU << 0U; 670} 671static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_unique_sec_v(u32 r) 672{ 673 return (r >> 0U) & 0xffffU; 674} 675static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_unique_ded_m(void) 676{ 677 return 0xffffU << 16U; 678} 679static inline u32 gr_pri_gpc0_tpc0_tex_m_ecc_cnt_unique_ded_v(u32 r) 680{ 681 return (r >> 16U) & 0xffffU; 682} 683static inline u32 gr_pri_be0_crop_status1_r(void) 684{ 685 return 0x00410134U; 686} 687static inline u32 gr_pri_bes_crop_status1_r(void) 688{ 689 return 0x00408934U; 690} 691static inline u32 gr_pri_be0_zrop_status_r(void) 692{ 693 return 0x00410048U; 694} 695static inline u32 gr_pri_be0_zrop_status2_r(void) 696{ 697 return 0x0041004cU; 698} 699static inline u32 gr_pri_bes_zrop_status_r(void) 700{ 701 return 0x00408848U; 702} 703static inline u32 gr_pri_bes_zrop_status2_r(void) 704{ 705 return 0x0040884cU; 706} 707static inline u32 gr_pipe_bundle_address_r(void) 708{ 709 return 0x00400200U; 710} 711static inline u32 gr_pipe_bundle_address_value_v(u32 r) 712{ 713 return (r >> 0U) & 0xffffU; 714} 715static inline u32 gr_pipe_bundle_data_r(void) 716{ 717 return 0x00400204U; 718} 719static inline u32 gr_pipe_bundle_config_r(void) 720{ 721 return 0x00400208U; 722} 723static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) 724{ 725 return 0x0U; 726} 727static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) 728{ 729 return 0x80000000U; 730} 731static inline u32 gr_fe_hww_esr_r(void) 732{ 733 return 0x00404000U; 734} 735static inline u32 gr_fe_hww_esr_reset_active_f(void) 736{ 737 return 0x40000000U; 738} 739static inline u32 gr_fe_hww_esr_en_enable_f(void) 740{ 741 return 0x80000000U; 742} 743static inline u32 gr_fe_hww_esr_info_r(void) 744{ 745 return 0x004041b0U; 746} 747static inline u32 gr_fe_go_idle_timeout_r(void) 748{ 749 return 0x00404154U; 750} 751static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) 752{ 753 return (v & 0xffffffffU) << 0U; 754} 755static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) 756{ 757 return 0x0U; 758} 759static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) 760{ 761 return 0x7fffffffU; 762} 763static inline u32 gr_fe_object_table_r(u32 i) 764{ 765 return 0x00404200U + i*4U; 766} 767static inline u32 gr_fe_object_table_nvclass_v(u32 r) 768{ 769 return (r >> 0U) & 0xffffU; 770} 771static inline u32 gr_fe_tpc_fs_r(void) 772{ 773 return 0x004041c4U; 774} 775static inline u32 gr_pri_mme_shadow_raw_index_r(void) 776{ 777 return 0x00404488U; 778} 779static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) 780{ 781 return 0x80000000U; 782} 783static inline u32 gr_pri_mme_shadow_raw_data_r(void) 784{ 785 return 0x0040448cU; 786} 787static inline u32 gr_mme_hww_esr_r(void) 788{ 789 return 0x00404490U; 790} 791static inline u32 gr_mme_hww_esr_reset_active_f(void) 792{ 793 return 0x40000000U; 794} 795static inline u32 gr_mme_hww_esr_en_enable_f(void) 796{ 797 return 0x80000000U; 798} 799static inline u32 gr_mme_hww_esr_info_r(void) 800{ 801 return 0x00404494U; 802} 803static inline u32 gr_memfmt_hww_esr_r(void) 804{ 805 return 0x00404600U; 806} 807static inline u32 gr_memfmt_hww_esr_reset_active_f(void) 808{ 809 return 0x40000000U; 810} 811static inline u32 gr_memfmt_hww_esr_en_enable_f(void) 812{ 813 return 0x80000000U; 814} 815static inline u32 gr_fecs_cpuctl_r(void) 816{ 817 return 0x00409100U; 818} 819static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) 820{ 821 return (v & 0x1U) << 1U; 822} 823static inline u32 gr_fecs_cpuctl_alias_r(void) 824{ 825 return 0x00409130U; 826} 827static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) 828{ 829 return (v & 0x1U) << 1U; 830} 831static inline u32 gr_fecs_dmactl_r(void) 832{ 833 return 0x0040910cU; 834} 835static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) 836{ 837 return (v & 0x1U) << 0U; 838} 839static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) 840{ 841 return 0x1U << 1U; 842} 843static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) 844{ 845 return 0x1U << 2U; 846} 847static inline u32 gr_fecs_os_r(void) 848{ 849 return 0x00409080U; 850} 851static inline u32 gr_fecs_idlestate_r(void) 852{ 853 return 0x0040904cU; 854} 855static inline u32 gr_fecs_mailbox0_r(void) 856{ 857 return 0x00409040U; 858} 859static inline u32 gr_fecs_mailbox1_r(void) 860{ 861 return 0x00409044U; 862} 863static inline u32 gr_fecs_irqstat_r(void) 864{ 865 return 0x00409008U; 866} 867static inline u32 gr_fecs_irqmode_r(void) 868{ 869 return 0x0040900cU; 870} 871static inline u32 gr_fecs_irqmask_r(void) 872{ 873 return 0x00409018U; 874} 875static inline u32 gr_fecs_irqdest_r(void) 876{ 877 return 0x0040901cU; 878} 879static inline u32 gr_fecs_curctx_r(void) 880{ 881 return 0x00409050U; 882} 883static inline u32 gr_fecs_nxtctx_r(void) 884{ 885 return 0x00409054U; 886} 887static inline u32 gr_fecs_engctl_r(void) 888{ 889 return 0x004090a4U; 890} 891static inline u32 gr_fecs_debug1_r(void) 892{ 893 return 0x00409090U; 894} 895static inline u32 gr_fecs_debuginfo_r(void) 896{ 897 return 0x00409094U; 898} 899static inline u32 gr_fecs_icd_cmd_r(void) 900{ 901 return 0x00409200U; 902} 903static inline u32 gr_fecs_icd_cmd_opc_s(void) 904{ 905 return 4U; 906} 907static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) 908{ 909 return (v & 0xfU) << 0U; 910} 911static inline u32 gr_fecs_icd_cmd_opc_m(void) 912{ 913 return 0xfU << 0U; 914} 915static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) 916{ 917 return (r >> 0U) & 0xfU; 918} 919static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) 920{ 921 return 0x8U; 922} 923static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) 924{ 925 return 0xeU; 926} 927static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) 928{ 929 return (v & 0x1fU) << 8U; 930} 931static inline u32 gr_fecs_icd_rdata_r(void) 932{ 933 return 0x0040920cU; 934} 935static inline u32 gr_fecs_imemc_r(u32 i) 936{ 937 return 0x00409180U + i*16U; 938} 939static inline u32 gr_fecs_imemc_offs_f(u32 v) 940{ 941 return (v & 0x3fU) << 2U; 942} 943static inline u32 gr_fecs_imemc_blk_f(u32 v) 944{ 945 return (v & 0xffU) << 8U; 946} 947static inline u32 gr_fecs_imemc_aincw_f(u32 v) 948{ 949 return (v & 0x1U) << 24U; 950} 951static inline u32 gr_fecs_imemd_r(u32 i) 952{ 953 return 0x00409184U + i*16U; 954} 955static inline u32 gr_fecs_imemt_r(u32 i) 956{ 957 return 0x00409188U + i*16U; 958} 959static inline u32 gr_fecs_imemt_tag_f(u32 v) 960{ 961 return (v & 0xffffU) << 0U; 962} 963static inline u32 gr_fecs_dmemc_r(u32 i) 964{ 965 return 0x004091c0U + i*8U; 966} 967static inline u32 gr_fecs_dmemc_offs_s(void) 968{ 969 return 6U; 970} 971static inline u32 gr_fecs_dmemc_offs_f(u32 v) 972{ 973 return (v & 0x3fU) << 2U; 974} 975static inline u32 gr_fecs_dmemc_offs_m(void) 976{ 977 return 0x3fU << 2U; 978} 979static inline u32 gr_fecs_dmemc_offs_v(u32 r) 980{ 981 return (r >> 2U) & 0x3fU; 982} 983static inline u32 gr_fecs_dmemc_blk_f(u32 v) 984{ 985 return (v & 0xffU) << 8U; 986} 987static inline u32 gr_fecs_dmemc_aincw_f(u32 v) 988{ 989 return (v & 0x1U) << 24U; 990} 991static inline u32 gr_fecs_dmemd_r(u32 i) 992{ 993 return 0x004091c4U + i*8U; 994} 995static inline u32 gr_fecs_dmatrfbase_r(void) 996{ 997 return 0x00409110U; 998} 999static inline u32 gr_fecs_dmatrfmoffs_r(void) 1000{ 1001 return 0x00409114U; 1002} 1003static inline u32 gr_fecs_dmatrffboffs_r(void) 1004{ 1005 return 0x0040911cU; 1006} 1007static inline u32 gr_fecs_dmatrfcmd_r(void) 1008{ 1009 return 0x00409118U; 1010} 1011static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) 1012{ 1013 return (v & 0x1U) << 4U; 1014} 1015static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) 1016{ 1017 return (v & 0x1U) << 5U; 1018} 1019static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) 1020{ 1021 return (v & 0x7U) << 8U; 1022} 1023static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) 1024{ 1025 return (v & 0x7U) << 12U; 1026} 1027static inline u32 gr_fecs_bootvec_r(void) 1028{ 1029 return 0x00409104U; 1030} 1031static inline u32 gr_fecs_bootvec_vec_f(u32 v) 1032{ 1033 return (v & 0xffffffffU) << 0U; 1034} 1035static inline u32 gr_fecs_falcon_hwcfg_r(void) 1036{ 1037 return 0x00409108U; 1038} 1039static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) 1040{ 1041 return 0x0041a108U; 1042} 1043static inline u32 gr_fecs_falcon_rm_r(void) 1044{ 1045 return 0x00409084U; 1046} 1047static inline u32 gr_fecs_current_ctx_r(void) 1048{ 1049 return 0x00409b00U; 1050} 1051static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) 1052{ 1053 return (v & 0xfffffffU) << 0U; 1054} 1055static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) 1056{ 1057 return (r >> 0U) & 0xfffffffU; 1058} 1059static inline u32 gr_fecs_current_ctx_target_s(void) 1060{ 1061 return 2U; 1062} 1063static inline u32 gr_fecs_current_ctx_target_f(u32 v) 1064{ 1065 return (v & 0x3U) << 28U; 1066} 1067static inline u32 gr_fecs_current_ctx_target_m(void) 1068{ 1069 return 0x3U << 28U; 1070} 1071static inline u32 gr_fecs_current_ctx_target_v(u32 r) 1072{ 1073 return (r >> 28U) & 0x3U; 1074} 1075static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) 1076{ 1077 return 0x0U; 1078} 1079static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) 1080{ 1081 return 0x20000000U; 1082} 1083static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) 1084{ 1085 return 0x30000000U; 1086} 1087static inline u32 gr_fecs_current_ctx_valid_s(void) 1088{ 1089 return 1U; 1090} 1091static inline u32 gr_fecs_current_ctx_valid_f(u32 v) 1092{ 1093 return (v & 0x1U) << 31U; 1094} 1095static inline u32 gr_fecs_current_ctx_valid_m(void) 1096{ 1097 return 0x1U << 31U; 1098} 1099static inline u32 gr_fecs_current_ctx_valid_v(u32 r) 1100{ 1101 return (r >> 31U) & 0x1U; 1102} 1103static inline u32 gr_fecs_current_ctx_valid_false_f(void) 1104{ 1105 return 0x0U; 1106} 1107static inline u32 gr_fecs_method_data_r(void) 1108{ 1109 return 0x00409500U; 1110} 1111static inline u32 gr_fecs_method_push_r(void) 1112{ 1113 return 0x00409504U; 1114} 1115static inline u32 gr_fecs_method_push_adr_f(u32 v) 1116{ 1117 return (v & 0xfffU) << 0U; 1118} 1119static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) 1120{ 1121 return 0x00000003U; 1122} 1123static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) 1124{ 1125 return 0x3U; 1126} 1127static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) 1128{ 1129 return 0x00000010U; 1130} 1131static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) 1132{ 1133 return 0x00000009U; 1134} 1135static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) 1136{ 1137 return 0x00000015U; 1138} 1139static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) 1140{ 1141 return 0x00000016U; 1142} 1143static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) 1144{ 1145 return 0x00000025U; 1146} 1147static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) 1148{ 1149 return 0x00000030U; 1150} 1151static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) 1152{ 1153 return 0x00000031U; 1154} 1155static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) 1156{ 1157 return 0x00000032U; 1158} 1159static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) 1160{ 1161 return 0x00000038U; 1162} 1163static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) 1164{ 1165 return 0x00000039U; 1166} 1167static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) 1168{ 1169 return 0x21U; 1170} 1171static inline u32 gr_fecs_method_push_adr_write_timestamp_record_v(void) 1172{ 1173 return 0x0000003dU; 1174} 1175static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void) 1176{ 1177 return 0x0000001aU; 1178} 1179static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) 1180{ 1181 return 0x00000004U; 1182} 1183static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void) 1184{ 1185 return 0x0000003aU; 1186} 1187static inline u32 gr_fecs_host_int_status_r(void) 1188{ 1189 return 0x00409c18U; 1190} 1191static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) 1192{ 1193 return (v & 0x1U) << 16U; 1194} 1195static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) 1196{ 1197 return (v & 0x1U) << 17U; 1198} 1199static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) 1200{ 1201 return (v & 0x1U) << 18U; 1202} 1203static inline u32 gr_fecs_host_int_status_watchdog_active_f(void) 1204{ 1205 return 0x80000U; 1206} 1207static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) 1208{ 1209 return (v & 0xffffU) << 0U; 1210} 1211static inline u32 gr_fecs_host_int_clear_r(void) 1212{ 1213 return 0x00409c20U; 1214} 1215static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) 1216{ 1217 return (v & 0x1U) << 1U; 1218} 1219static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) 1220{ 1221 return 0x2U; 1222} 1223static inline u32 gr_fecs_host_int_enable_r(void) 1224{ 1225 return 0x00409c24U; 1226} 1227static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) 1228{ 1229 return 0x2U; 1230} 1231static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) 1232{ 1233 return 0x10000U; 1234} 1235static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) 1236{ 1237 return 0x20000U; 1238} 1239static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) 1240{ 1241 return 0x40000U; 1242} 1243static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) 1244{ 1245 return 0x80000U; 1246} 1247static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) 1248{ 1249 return 0x00409614U; 1250} 1251static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) 1252{ 1253 return 0x0U; 1254} 1255static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) 1256{ 1257 return 0x0U; 1258} 1259static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) 1260{ 1261 return 0x0U; 1262} 1263static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) 1264{ 1265 return 0x10U; 1266} 1267static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) 1268{ 1269 return 0x20U; 1270} 1271static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) 1272{ 1273 return 0x40U; 1274} 1275static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) 1276{ 1277 return 0x0U; 1278} 1279static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) 1280{ 1281 return 0x100U; 1282} 1283static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) 1284{ 1285 return 0x0U; 1286} 1287static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) 1288{ 1289 return 0x200U; 1290} 1291static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) 1292{ 1293 return 1U; 1294} 1295static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) 1296{ 1297 return (v & 0x1U) << 10U; 1298} 1299static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) 1300{ 1301 return 0x1U << 10U; 1302} 1303static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) 1304{ 1305 return (r >> 10U) & 0x1U; 1306} 1307static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) 1308{ 1309 return 0x0U; 1310} 1311static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) 1312{ 1313 return 0x400U; 1314} 1315static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) 1316{ 1317 return 0x0040960cU; 1318} 1319static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) 1320{ 1321 return 0x00409800U + i*4U; 1322} 1323static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) 1324{ 1325 return 0x00000010U; 1326} 1327static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) 1328{ 1329 return (v & 0xffffffffU) << 0U; 1330} 1331static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) 1332{ 1333 return 0x00000001U; 1334} 1335static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) 1336{ 1337 return 0x00000002U; 1338} 1339static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) 1340{ 1341 return 0x004098c0U + i*4U; 1342} 1343static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) 1344{ 1345 return (v & 0xffffffffU) << 0U; 1346} 1347static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) 1348{ 1349 return 0x00409840U + i*4U; 1350} 1351static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) 1352{ 1353 return (v & 0xffffffffU) << 0U; 1354} 1355static inline u32 gr_fecs_fs_r(void) 1356{ 1357 return 0x00409604U; 1358} 1359static inline u32 gr_fecs_fs_num_available_gpcs_s(void) 1360{ 1361 return 5U; 1362} 1363static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) 1364{ 1365 return (v & 0x1fU) << 0U; 1366} 1367static inline u32 gr_fecs_fs_num_available_gpcs_m(void) 1368{ 1369 return 0x1fU << 0U; 1370} 1371static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) 1372{ 1373 return (r >> 0U) & 0x1fU; 1374} 1375static inline u32 gr_fecs_fs_num_available_fbps_s(void) 1376{ 1377 return 5U; 1378} 1379static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) 1380{ 1381 return (v & 0x1fU) << 16U; 1382} 1383static inline u32 gr_fecs_fs_num_available_fbps_m(void) 1384{ 1385 return 0x1fU << 16U; 1386} 1387static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) 1388{ 1389 return (r >> 16U) & 0x1fU; 1390} 1391static inline u32 gr_fecs_cfg_r(void) 1392{ 1393 return 0x00409620U; 1394} 1395static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) 1396{ 1397 return (r >> 0U) & 0xffU; 1398} 1399static inline u32 gr_fecs_rc_lanes_r(void) 1400{ 1401 return 0x00409880U; 1402} 1403static inline u32 gr_fecs_rc_lanes_num_chains_s(void) 1404{ 1405 return 6U; 1406} 1407static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) 1408{ 1409 return (v & 0x3fU) << 0U; 1410} 1411static inline u32 gr_fecs_rc_lanes_num_chains_m(void) 1412{ 1413 return 0x3fU << 0U; 1414} 1415static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) 1416{ 1417 return (r >> 0U) & 0x3fU; 1418} 1419static inline u32 gr_fecs_ctxsw_status_1_r(void) 1420{ 1421 return 0x00409400U; 1422} 1423static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) 1424{ 1425 return 1U; 1426} 1427static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) 1428{ 1429 return (v & 0x1U) << 12U; 1430} 1431static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) 1432{ 1433 return 0x1U << 12U; 1434} 1435static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) 1436{ 1437 return (r >> 12U) & 0x1U; 1438} 1439static inline u32 gr_fecs_arb_ctx_adr_r(void) 1440{ 1441 return 0x00409a24U; 1442} 1443static inline u32 gr_fecs_new_ctx_r(void) 1444{ 1445 return 0x00409b04U; 1446} 1447static inline u32 gr_fecs_new_ctx_ptr_s(void) 1448{ 1449 return 28U; 1450} 1451static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) 1452{ 1453 return (v & 0xfffffffU) << 0U; 1454} 1455static inline u32 gr_fecs_new_ctx_ptr_m(void) 1456{ 1457 return 0xfffffffU << 0U; 1458} 1459static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) 1460{ 1461 return (r >> 0U) & 0xfffffffU; 1462} 1463static inline u32 gr_fecs_new_ctx_target_s(void) 1464{ 1465 return 2U; 1466} 1467static inline u32 gr_fecs_new_ctx_target_f(u32 v) 1468{ 1469 return (v & 0x3U) << 28U; 1470} 1471static inline u32 gr_fecs_new_ctx_target_m(void) 1472{ 1473 return 0x3U << 28U; 1474} 1475static inline u32 gr_fecs_new_ctx_target_v(u32 r) 1476{ 1477 return (r >> 28U) & 0x3U; 1478} 1479static inline u32 gr_fecs_new_ctx_target_vid_mem_f(void) 1480{ 1481 return 0x0U; 1482} 1483static inline u32 gr_fecs_new_ctx_target_sys_mem_ncoh_f(void) 1484{ 1485 return 0x30000000U; 1486} 1487static inline u32 gr_fecs_new_ctx_target_sys_mem_coh_f(void) 1488{ 1489 return 0x20000000U; 1490} 1491static inline u32 gr_fecs_new_ctx_valid_s(void) 1492{ 1493 return 1U; 1494} 1495static inline u32 gr_fecs_new_ctx_valid_f(u32 v) 1496{ 1497 return (v & 0x1U) << 31U; 1498} 1499static inline u32 gr_fecs_new_ctx_valid_m(void) 1500{ 1501 return 0x1U << 31U; 1502} 1503static inline u32 gr_fecs_new_ctx_valid_v(u32 r) 1504{ 1505 return (r >> 31U) & 0x1U; 1506} 1507static inline u32 gr_fecs_arb_ctx_ptr_r(void) 1508{ 1509 return 0x00409a0cU; 1510} 1511static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) 1512{ 1513 return 28U; 1514} 1515static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) 1516{ 1517 return (v & 0xfffffffU) << 0U; 1518} 1519static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) 1520{ 1521 return 0xfffffffU << 0U; 1522} 1523static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) 1524{ 1525 return (r >> 0U) & 0xfffffffU; 1526} 1527static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) 1528{ 1529 return 2U; 1530} 1531static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) 1532{ 1533 return (v & 0x3U) << 28U; 1534} 1535static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) 1536{ 1537 return 0x3U << 28U; 1538} 1539static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) 1540{ 1541 return (r >> 28U) & 0x3U; 1542} 1543static inline u32 gr_fecs_arb_ctx_ptr_target_vid_mem_f(void) 1544{ 1545 return 0x0U; 1546} 1547static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(void) 1548{ 1549 return 0x30000000U; 1550} 1551static inline u32 gr_fecs_arb_ctx_ptr_target_sys_mem_coh_f(void) 1552{ 1553 return 0x20000000U; 1554} 1555static inline u32 gr_fecs_arb_ctx_cmd_r(void) 1556{ 1557 return 0x00409a10U; 1558} 1559static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) 1560{ 1561 return 5U; 1562} 1563static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) 1564{ 1565 return (v & 0x1fU) << 0U; 1566} 1567static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) 1568{ 1569 return 0x1fU << 0U; 1570} 1571static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) 1572{ 1573 return (r >> 0U) & 0x1fU; 1574} 1575static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) 1576{ 1577 return 0x00409c00U; 1578} 1579static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) 1580{ 1581 return 0x00502c04U; 1582} 1583static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) 1584{ 1585 return 0x00502400U; 1586} 1587static inline u32 gr_gpc0_gpccs_ctxsw_mailbox__size_1_v(void) 1588{ 1589 return 0x00000010U; 1590} 1591static inline u32 gr_fecs_ctxsw_idlestate_r(void) 1592{ 1593 return 0x00409420U; 1594} 1595static inline u32 gr_fecs_feature_override_ecc_r(void) 1596{ 1597 return 0x00409658U; 1598} 1599static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r) 1600{ 1601 return (r >> 3U) & 0x1U; 1602} 1603static inline u32 gr_fecs_feature_override_ecc_sm_shm_override_v(u32 r) 1604{ 1605 return (r >> 7U) & 0x1U; 1606} 1607static inline u32 gr_fecs_feature_override_ecc_tex_override_v(u32 r) 1608{ 1609 return (r >> 11U) & 0x1U; 1610} 1611static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r) 1612{ 1613 return (r >> 15U) & 0x1U; 1614} 1615static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r) 1616{ 1617 return (r >> 0U) & 0x1U; 1618} 1619static inline u32 gr_fecs_feature_override_ecc_sm_shm_v(u32 r) 1620{ 1621 return (r >> 4U) & 0x1U; 1622} 1623static inline u32 gr_fecs_feature_override_ecc_tex_v(u32 r) 1624{ 1625 return (r >> 8U) & 0x1U; 1626} 1627static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r) 1628{ 1629 return (r >> 12U) & 0x1U; 1630} 1631static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) 1632{ 1633 return 0x00502420U; 1634} 1635static inline u32 gr_rstr2d_gpc_map0_r(void) 1636{ 1637 return 0x0040780cU; 1638} 1639static inline u32 gr_rstr2d_gpc_map1_r(void) 1640{ 1641 return 0x00407810U; 1642} 1643static inline u32 gr_rstr2d_gpc_map2_r(void) 1644{ 1645 return 0x00407814U; 1646} 1647static inline u32 gr_rstr2d_gpc_map3_r(void) 1648{ 1649 return 0x00407818U; 1650} 1651static inline u32 gr_rstr2d_gpc_map4_r(void) 1652{ 1653 return 0x0040781cU; 1654} 1655static inline u32 gr_rstr2d_gpc_map5_r(void) 1656{ 1657 return 0x00407820U; 1658} 1659static inline u32 gr_rstr2d_map_table_cfg_r(void) 1660{ 1661 return 0x004078bcU; 1662} 1663static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) 1664{ 1665 return (v & 0xffU) << 0U; 1666} 1667static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) 1668{ 1669 return (v & 0xffU) << 8U; 1670} 1671static inline u32 gr_pd_hww_esr_r(void) 1672{ 1673 return 0x00406018U; 1674} 1675static inline u32 gr_pd_hww_esr_reset_active_f(void) 1676{ 1677 return 0x40000000U; 1678} 1679static inline u32 gr_pd_hww_esr_en_enable_f(void) 1680{ 1681 return 0x80000000U; 1682} 1683static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) 1684{ 1685 return 0x00406028U + i*4U; 1686} 1687static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) 1688{ 1689 return 0x00000004U; 1690} 1691static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) 1692{ 1693 return (v & 0xfU) << 0U; 1694} 1695static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) 1696{ 1697 return (v & 0xfU) << 4U; 1698} 1699static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) 1700{ 1701 return (v & 0xfU) << 8U; 1702} 1703static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) 1704{ 1705 return (v & 0xfU) << 12U; 1706} 1707static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) 1708{ 1709 return (v & 0xfU) << 16U; 1710} 1711static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) 1712{ 1713 return (v & 0xfU) << 20U; 1714} 1715static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) 1716{ 1717 return (v & 0xfU) << 24U; 1718} 1719static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) 1720{ 1721 return (v & 0xfU) << 28U; 1722} 1723static inline u32 gr_pd_ab_dist_cfg0_r(void) 1724{ 1725 return 0x004064c0U; 1726} 1727static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) 1728{ 1729 return 0x80000000U; 1730} 1731static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) 1732{ 1733 return 0x0U; 1734} 1735static inline u32 gr_pd_ab_dist_cfg1_r(void) 1736{ 1737 return 0x004064c4U; 1738} 1739static inline u32 gr_pd_ab_dist_cfg1_max_batches_f(u32 v) 1740{ 1741 return (v & 0xffffU) << 0U; 1742} 1743static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) 1744{ 1745 return 0xffffU; 1746} 1747static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) 1748{ 1749 return (v & 0xffffU) << 16U; 1750} 1751static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) 1752{ 1753 return 0x00000080U; 1754} 1755static inline u32 gr_pd_ab_dist_cfg2_r(void) 1756{ 1757 return 0x004064c8U; 1758} 1759static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) 1760{ 1761 return (v & 0x1fffU) << 0U; 1762} 1763static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) 1764{ 1765 return 0x000001c0U; 1766} 1767static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) 1768{ 1769 return (v & 0x1fffU) << 16U; 1770} 1771static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) 1772{ 1773 return 0x00000020U; 1774} 1775static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) 1776{ 1777 return 0x00000182U; 1778} 1779static inline u32 gr_pd_dist_skip_table_r(u32 i) 1780{ 1781 return 0x004064d0U + i*4U; 1782} 1783static inline u32 gr_pd_dist_skip_table__size_1_v(void) 1784{ 1785 return 0x00000008U; 1786} 1787static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) 1788{ 1789 return (v & 0xffU) << 0U; 1790} 1791static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) 1792{ 1793 return (v & 0xffU) << 8U; 1794} 1795static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) 1796{ 1797 return (v & 0xffU) << 16U; 1798} 1799static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) 1800{ 1801 return (v & 0xffU) << 24U; 1802} 1803static inline u32 gr_ds_debug_r(void) 1804{ 1805 return 0x00405800U; 1806} 1807static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) 1808{ 1809 return 0x0U; 1810} 1811static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) 1812{ 1813 return 0x8000000U; 1814} 1815static inline u32 gr_ds_zbc_color_r_r(void) 1816{ 1817 return 0x00405804U; 1818} 1819static inline u32 gr_ds_zbc_color_r_val_f(u32 v) 1820{ 1821 return (v & 0xffffffffU) << 0U; 1822} 1823static inline u32 gr_ds_zbc_color_g_r(void) 1824{ 1825 return 0x00405808U; 1826} 1827static inline u32 gr_ds_zbc_color_g_val_f(u32 v) 1828{ 1829 return (v & 0xffffffffU) << 0U; 1830} 1831static inline u32 gr_ds_zbc_color_b_r(void) 1832{ 1833 return 0x0040580cU; 1834} 1835static inline u32 gr_ds_zbc_color_b_val_f(u32 v) 1836{ 1837 return (v & 0xffffffffU) << 0U; 1838} 1839static inline u32 gr_ds_zbc_color_a_r(void) 1840{ 1841 return 0x00405810U; 1842} 1843static inline u32 gr_ds_zbc_color_a_val_f(u32 v) 1844{ 1845 return (v & 0xffffffffU) << 0U; 1846} 1847static inline u32 gr_ds_zbc_color_fmt_r(void) 1848{ 1849 return 0x00405814U; 1850} 1851static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) 1852{ 1853 return (v & 0x7fU) << 0U; 1854} 1855static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) 1856{ 1857 return 0x0U; 1858} 1859static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) 1860{ 1861 return 0x00000001U; 1862} 1863static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) 1864{ 1865 return 0x00000002U; 1866} 1867static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) 1868{ 1869 return 0x00000004U; 1870} 1871static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) 1872{ 1873 return 0x00000028U; 1874} 1875static inline u32 gr_ds_zbc_z_r(void) 1876{ 1877 return 0x00405818U; 1878} 1879static inline u32 gr_ds_zbc_z_val_s(void) 1880{ 1881 return 32U; 1882} 1883static inline u32 gr_ds_zbc_z_val_f(u32 v) 1884{ 1885 return (v & 0xffffffffU) << 0U; 1886} 1887static inline u32 gr_ds_zbc_z_val_m(void) 1888{ 1889 return 0xffffffffU << 0U; 1890} 1891static inline u32 gr_ds_zbc_z_val_v(u32 r) 1892{ 1893 return (r >> 0U) & 0xffffffffU; 1894} 1895static inline u32 gr_ds_zbc_z_val__init_v(void) 1896{ 1897 return 0x00000000U; 1898} 1899static inline u32 gr_ds_zbc_z_val__init_f(void) 1900{ 1901 return 0x0U; 1902} 1903static inline u32 gr_ds_zbc_z_fmt_r(void) 1904{ 1905 return 0x0040581cU; 1906} 1907static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) 1908{ 1909 return (v & 0x1U) << 0U; 1910} 1911static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) 1912{ 1913 return 0x0U; 1914} 1915static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) 1916{ 1917 return 0x00000001U; 1918} 1919static inline u32 gr_ds_zbc_tbl_index_r(void) 1920{ 1921 return 0x00405820U; 1922} 1923static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) 1924{ 1925 return (v & 0xfU) << 0U; 1926} 1927static inline u32 gr_ds_zbc_tbl_ld_r(void) 1928{ 1929 return 0x00405824U; 1930} 1931static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) 1932{ 1933 return 0x0U; 1934} 1935static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) 1936{ 1937 return 0x1U; 1938} 1939static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) 1940{ 1941 return 0x0U; 1942} 1943static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) 1944{ 1945 return 0x4U; 1946} 1947static inline u32 gr_ds_tga_constraintlogic_beta_r(void) 1948{ 1949 return 0x00405830U; 1950} 1951static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) 1952{ 1953 return (v & 0x3fffffU) << 0U; 1954} 1955static inline u32 gr_ds_tga_constraintlogic_alpha_r(void) 1956{ 1957 return 0x0040585cU; 1958} 1959static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) 1960{ 1961 return (v & 0xffffU) << 0U; 1962} 1963static inline u32 gr_ds_hww_esr_r(void) 1964{ 1965 return 0x00405840U; 1966} 1967static inline u32 gr_ds_hww_esr_reset_s(void) 1968{ 1969 return 1U; 1970} 1971static inline u32 gr_ds_hww_esr_reset_f(u32 v) 1972{ 1973 return (v & 0x1U) << 30U; 1974} 1975static inline u32 gr_ds_hww_esr_reset_m(void) 1976{ 1977 return 0x1U << 30U; 1978} 1979static inline u32 gr_ds_hww_esr_reset_v(u32 r) 1980{ 1981 return (r >> 30U) & 0x1U; 1982} 1983static inline u32 gr_ds_hww_esr_reset_task_v(void) 1984{ 1985 return 0x00000001U; 1986} 1987static inline u32 gr_ds_hww_esr_reset_task_f(void) 1988{ 1989 return 0x40000000U; 1990} 1991static inline u32 gr_ds_hww_esr_en_enabled_f(void) 1992{ 1993 return 0x80000000U; 1994} 1995static inline u32 gr_ds_hww_esr_2_r(void) 1996{ 1997 return 0x00405848U; 1998} 1999static inline u32 gr_ds_hww_esr_2_reset_s(void) 2000{ 2001 return 1U; 2002} 2003static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) 2004{ 2005 return (v & 0x1U) << 30U; 2006} 2007static inline u32 gr_ds_hww_esr_2_reset_m(void) 2008{ 2009 return 0x1U << 30U; 2010} 2011static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) 2012{ 2013 return (r >> 30U) & 0x1U; 2014} 2015static inline u32 gr_ds_hww_esr_2_reset_task_v(void) 2016{ 2017 return 0x00000001U; 2018} 2019static inline u32 gr_ds_hww_esr_2_reset_task_f(void) 2020{ 2021 return 0x40000000U; 2022} 2023static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) 2024{ 2025 return 0x80000000U; 2026} 2027static inline u32 gr_ds_hww_report_mask_r(void) 2028{ 2029 return 0x00405844U; 2030} 2031static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) 2032{ 2033 return 0x1U; 2034} 2035static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) 2036{ 2037 return 0x2U; 2038} 2039static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) 2040{ 2041 return 0x4U; 2042} 2043static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) 2044{ 2045 return 0x8U; 2046} 2047static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) 2048{ 2049 return 0x10U; 2050} 2051static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) 2052{ 2053 return 0x20U; 2054} 2055static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) 2056{ 2057 return 0x40U; 2058} 2059static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) 2060{ 2061 return 0x80U; 2062} 2063static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) 2064{ 2065 return 0x100U; 2066} 2067static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) 2068{ 2069 return 0x200U; 2070} 2071static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) 2072{ 2073 return 0x400U; 2074} 2075static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) 2076{ 2077 return 0x800U; 2078} 2079static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) 2080{ 2081 return 0x1000U; 2082} 2083static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) 2084{ 2085 return 0x2000U; 2086} 2087static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) 2088{ 2089 return 0x4000U; 2090} 2091static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) 2092{ 2093 return 0x8000U; 2094} 2095static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) 2096{ 2097 return 0x10000U; 2098} 2099static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) 2100{ 2101 return 0x20000U; 2102} 2103static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) 2104{ 2105 return 0x40000U; 2106} 2107static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) 2108{ 2109 return 0x80000U; 2110} 2111static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) 2112{ 2113 return 0x100000U; 2114} 2115static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) 2116{ 2117 return 0x200000U; 2118} 2119static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) 2120{ 2121 return 0x400000U; 2122} 2123static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) 2124{ 2125 return 0x800000U; 2126} 2127static inline u32 gr_ds_hww_report_mask_2_r(void) 2128{ 2129 return 0x0040584cU; 2130} 2131static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) 2132{ 2133 return 0x1U; 2134} 2135static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) 2136{ 2137 return 0x00405870U + i*4U; 2138} 2139static inline u32 gr_scc_bundle_cb_base_r(void) 2140{ 2141 return 0x00408004U; 2142} 2143static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) 2144{ 2145 return (v & 0xffffffffU) << 0U; 2146} 2147static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) 2148{ 2149 return 0x00000008U; 2150} 2151static inline u32 gr_scc_bundle_cb_size_r(void) 2152{ 2153 return 0x00408008U; 2154} 2155static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) 2156{ 2157 return (v & 0x7ffU) << 0U; 2158} 2159static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) 2160{ 2161 return 0x00000018U; 2162} 2163static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) 2164{ 2165 return 0x00000100U; 2166} 2167static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) 2168{ 2169 return 0x00000000U; 2170} 2171static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) 2172{ 2173 return 0x0U; 2174} 2175static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) 2176{ 2177 return 0x80000000U; 2178} 2179static inline u32 gr_scc_pagepool_base_r(void) 2180{ 2181 return 0x0040800cU; 2182} 2183static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) 2184{ 2185 return (v & 0xffffffffU) << 0U; 2186} 2187static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) 2188{ 2189 return 0x00000008U; 2190} 2191static inline u32 gr_scc_pagepool_r(void) 2192{ 2193 return 0x00408010U; 2194} 2195static inline u32 gr_scc_pagepool_total_pages_f(u32 v) 2196{ 2197 return (v & 0x3ffU) << 0U; 2198} 2199static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) 2200{ 2201 return 0x00000000U; 2202} 2203static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) 2204{ 2205 return 0x00000200U; 2206} 2207static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) 2208{ 2209 return 0x00000100U; 2210} 2211static inline u32 gr_scc_pagepool_max_valid_pages_s(void) 2212{ 2213 return 10U; 2214} 2215static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) 2216{ 2217 return (v & 0x3ffU) << 10U; 2218} 2219static inline u32 gr_scc_pagepool_max_valid_pages_m(void) 2220{ 2221 return 0x3ffU << 10U; 2222} 2223static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) 2224{ 2225 return (r >> 10U) & 0x3ffU; 2226} 2227static inline u32 gr_scc_pagepool_valid_true_f(void) 2228{ 2229 return 0x80000000U; 2230} 2231static inline u32 gr_scc_init_r(void) 2232{ 2233 return 0x0040802cU; 2234} 2235static inline u32 gr_scc_init_ram_trigger_f(void) 2236{ 2237 return 0x1U; 2238} 2239static inline u32 gr_scc_hww_esr_r(void) 2240{ 2241 return 0x00408030U; 2242} 2243static inline u32 gr_scc_hww_esr_reset_active_f(void) 2244{ 2245 return 0x40000000U; 2246} 2247static inline u32 gr_scc_hww_esr_en_enable_f(void) 2248{ 2249 return 0x80000000U; 2250} 2251static inline u32 gr_sked_hww_esr_r(void) 2252{ 2253 return 0x00407020U; 2254} 2255static inline u32 gr_sked_hww_esr_reset_active_f(void) 2256{ 2257 return 0x40000000U; 2258} 2259static inline u32 gr_cwd_fs_r(void) 2260{ 2261 return 0x00405b00U; 2262} 2263static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) 2264{ 2265 return (v & 0xffU) << 0U; 2266} 2267static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) 2268{ 2269 return (v & 0xffU) << 8U; 2270} 2271static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) 2272{ 2273 return 0x00405b60U + i*4U; 2274} 2275static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) 2276{ 2277 return 4U; 2278} 2279static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) 2280{ 2281 return (v & 0xfU) << 0U; 2282} 2283static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) 2284{ 2285 return 4U; 2286} 2287static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) 2288{ 2289 return (v & 0xfU) << 4U; 2290} 2291static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) 2292{ 2293 return (v & 0xfU) << 8U; 2294} 2295static inline u32 gr_cwd_sm_id_r(u32 i) 2296{ 2297 return 0x00405ba0U + i*4U; 2298} 2299static inline u32 gr_cwd_sm_id__size_1_v(void) 2300{ 2301 return 0x00000010U; 2302} 2303static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) 2304{ 2305 return (v & 0xffU) << 0U; 2306} 2307static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) 2308{ 2309 return (v & 0xffU) << 8U; 2310} 2311static inline u32 gr_gpc0_fs_gpc_r(void) 2312{ 2313 return 0x00502608U; 2314} 2315static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) 2316{ 2317 return (r >> 0U) & 0x1fU; 2318} 2319static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) 2320{ 2321 return (r >> 16U) & 0x1fU; 2322} 2323static inline u32 gr_gpc0_cfg_r(void) 2324{ 2325 return 0x00502620U; 2326} 2327static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) 2328{ 2329 return (r >> 0U) & 0xffU; 2330} 2331static inline u32 gr_gpccs_rc_lanes_r(void) 2332{ 2333 return 0x00502880U; 2334} 2335static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) 2336{ 2337 return 6U; 2338} 2339static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) 2340{ 2341 return (v & 0x3fU) << 0U; 2342} 2343static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) 2344{ 2345 return 0x3fU << 0U; 2346} 2347static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) 2348{ 2349 return (r >> 0U) & 0x3fU; 2350} 2351static inline u32 gr_gpccs_rc_lane_size_r(void) 2352{ 2353 return 0x00502910U; 2354} 2355static inline u32 gr_gpccs_rc_lane_size_v_s(void) 2356{ 2357 return 24U; 2358} 2359static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) 2360{ 2361 return (v & 0xffffffU) << 0U; 2362} 2363static inline u32 gr_gpccs_rc_lane_size_v_m(void) 2364{ 2365 return 0xffffffU << 0U; 2366} 2367static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) 2368{ 2369 return (r >> 0U) & 0xffffffU; 2370} 2371static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) 2372{ 2373 return 0x00000000U; 2374} 2375static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) 2376{ 2377 return 0x0U; 2378} 2379static inline u32 gr_gpc0_zcull_fs_r(void) 2380{ 2381 return 0x00500910U; 2382} 2383static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) 2384{ 2385 return (v & 0x1ffU) << 0U; 2386} 2387static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) 2388{ 2389 return (v & 0xfU) << 16U; 2390} 2391static inline u32 gr_gpc0_zcull_ram_addr_r(void) 2392{ 2393 return 0x00500914U; 2394} 2395static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) 2396{ 2397 return (v & 0xfU) << 0U; 2398} 2399static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) 2400{ 2401 return (v & 0xfU) << 8U; 2402} 2403static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) 2404{ 2405 return 0x00500918U; 2406} 2407static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) 2408{ 2409 return (v & 0xffffffU) << 0U; 2410} 2411static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) 2412{ 2413 return 0x00800000U; 2414} 2415static inline u32 gr_gpc0_zcull_total_ram_size_r(void) 2416{ 2417 return 0x00500920U; 2418} 2419static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) 2420{ 2421 return (v & 0xffffU) << 0U; 2422} 2423static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) 2424{ 2425 return 0x00500a04U + i*32U; 2426} 2427static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) 2428{ 2429 return 0x00000040U; 2430} 2431static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) 2432{ 2433 return 0x00000010U; 2434} 2435static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) 2436{ 2437 return 0x00500c10U + i*4U; 2438} 2439static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) 2440{ 2441 return (v & 0xffU) << 0U; 2442} 2443static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) 2444{ 2445 return 0x00500c30U + i*4U; 2446} 2447static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) 2448{ 2449 return (r >> 0U) & 0xffU; 2450} 2451static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) 2452{ 2453 return 0x00504088U; 2454} 2455static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) 2456{ 2457 return (v & 0xffffU) << 0U; 2458} 2459static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) 2460{ 2461 return 0x00504698U; 2462} 2463static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_f(u32 v) 2464{ 2465 return (v & 0xffffU) << 0U; 2466} 2467static inline u32 gr_gpc0_tpc0_sm_cfg_sm_id_v(u32 r) 2468{ 2469 return (r >> 0U) & 0xffffU; 2470} 2471static inline u32 gr_gpc0_tpc0_sm_arch_r(void) 2472{ 2473 return 0x0050469cU; 2474} 2475static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) 2476{ 2477 return (r >> 0U) & 0xffU; 2478} 2479static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) 2480{ 2481 return (r >> 8U) & 0xfffU; 2482} 2483static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) 2484{ 2485 return (r >> 20U) & 0xfffU; 2486} 2487static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) 2488{ 2489 return 0x00503018U; 2490} 2491static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) 2492{ 2493 return 0x1U << 0U; 2494} 2495static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) 2496{ 2497 return 0x1U; 2498} 2499static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) 2500{ 2501 return 0x005030c0U; 2502} 2503static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) 2504{ 2505 return (v & 0x3fffffU) << 0U; 2506} 2507static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) 2508{ 2509 return 0x3fffffU << 0U; 2510} 2511static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) 2512{ 2513 return 0x00030000U; 2514} 2515static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void) 2516{ 2517 return 0x00030a00U; 2518} 2519static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) 2520{ 2521 return 0x00000020U; 2522} 2523static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) 2524{ 2525 return 0x005030f4U; 2526} 2527static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) 2528{ 2529 return 0x005030e4U; 2530} 2531static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) 2532{ 2533 return (v & 0xffffU) << 0U; 2534} 2535static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) 2536{ 2537 return 0xffffU << 0U; 2538} 2539static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) 2540{ 2541 return 0x00000800U; 2542} 2543static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) 2544{ 2545 return 0x00000020U; 2546} 2547static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) 2548{ 2549 return 0x005030f8U; 2550} 2551static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void) 2552{ 2553 return 0x005030f0U; 2554} 2555static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v) 2556{ 2557 return (v & 0x3fffffU) << 0U; 2558} 2559static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void) 2560{ 2561 return 0x00030000U; 2562} 2563static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void) 2564{ 2565 return 0x00419b00U; 2566} 2567static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v) 2568{ 2569 return (v & 0xffffffffU) << 0U; 2570} 2571static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void) 2572{ 2573 return 0x00419b04U; 2574} 2575static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void) 2576{ 2577 return 21U; 2578} 2579static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v) 2580{ 2581 return (v & 0x1fffffU) << 0U; 2582} 2583static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void) 2584{ 2585 return 0x1fffffU << 0U; 2586} 2587static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r) 2588{ 2589 return (r >> 0U) & 0x1fffffU; 2590} 2591static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void) 2592{ 2593 return 0x80U; 2594} 2595static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void) 2596{ 2597 return 1U; 2598} 2599static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v) 2600{ 2601 return (v & 0x1U) << 31U; 2602} 2603static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void) 2604{ 2605 return 0x1U << 31U; 2606} 2607static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r) 2608{ 2609 return (r >> 31U) & 0x1U; 2610} 2611static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void) 2612{ 2613 return 0x80000000U; 2614} 2615static inline u32 gr_gpcs_tpcs_tex_m_dbg2_r(void) 2616{ 2617 return 0x00419a3cU; 2618} 2619static inline u32 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(u32 v) 2620{ 2621 return (v & 0x1U) << 2U; 2622} 2623static inline u32 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(void) 2624{ 2625 return 0x1U << 2U; 2626} 2627static inline u32 gr_gpcs_tpcs_tex_m_dbg2_su_rd_coalesce_en_f(u32 v) 2628{ 2629 return (v & 0x1U) << 4U; 2630} 2631static inline u32 gr_gpcs_tpcs_tex_m_dbg2_su_rd_coalesce_en_m(void) 2632{ 2633 return 0x1U << 4U; 2634} 2635static inline u32 gr_gpccs_falcon_addr_r(void) 2636{ 2637 return 0x0041a0acU; 2638} 2639static inline u32 gr_gpccs_falcon_addr_lsb_s(void) 2640{ 2641 return 6U; 2642} 2643static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) 2644{ 2645 return (v & 0x3fU) << 0U; 2646} 2647static inline u32 gr_gpccs_falcon_addr_lsb_m(void) 2648{ 2649 return 0x3fU << 0U; 2650} 2651static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) 2652{ 2653 return (r >> 0U) & 0x3fU; 2654} 2655static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) 2656{ 2657 return 0x00000000U; 2658} 2659static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) 2660{ 2661 return 0x0U; 2662} 2663static inline u32 gr_gpccs_falcon_addr_msb_s(void) 2664{ 2665 return 6U; 2666} 2667static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) 2668{ 2669 return (v & 0x3fU) << 6U; 2670} 2671static inline u32 gr_gpccs_falcon_addr_msb_m(void) 2672{ 2673 return 0x3fU << 6U; 2674} 2675static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) 2676{ 2677 return (r >> 6U) & 0x3fU; 2678} 2679static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) 2680{ 2681 return 0x00000000U; 2682} 2683static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) 2684{ 2685 return 0x0U; 2686} 2687static inline u32 gr_gpccs_falcon_addr_ext_s(void) 2688{ 2689 return 12U; 2690} 2691static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) 2692{ 2693 return (v & 0xfffU) << 0U; 2694} 2695static inline u32 gr_gpccs_falcon_addr_ext_m(void) 2696{ 2697 return 0xfffU << 0U; 2698} 2699static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) 2700{ 2701 return (r >> 0U) & 0xfffU; 2702} 2703static inline u32 gr_gpccs_cpuctl_r(void) 2704{ 2705 return 0x0041a100U; 2706} 2707static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) 2708{ 2709 return (v & 0x1U) << 1U; 2710} 2711static inline u32 gr_gpccs_dmactl_r(void) 2712{ 2713 return 0x0041a10cU; 2714} 2715static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) 2716{ 2717 return (v & 0x1U) << 0U; 2718} 2719static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) 2720{ 2721 return 0x1U << 1U; 2722} 2723static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) 2724{ 2725 return 0x1U << 2U; 2726} 2727static inline u32 gr_gpccs_imemc_r(u32 i) 2728{ 2729 return 0x0041a180U + i*16U; 2730} 2731static inline u32 gr_gpccs_imemc_offs_f(u32 v) 2732{ 2733 return (v & 0x3fU) << 2U; 2734} 2735static inline u32 gr_gpccs_imemc_blk_f(u32 v) 2736{ 2737 return (v & 0xffU) << 8U; 2738} 2739static inline u32 gr_gpccs_imemc_aincw_f(u32 v) 2740{ 2741 return (v & 0x1U) << 24U; 2742} 2743static inline u32 gr_gpccs_imemd_r(u32 i) 2744{ 2745 return 0x0041a184U + i*16U; 2746} 2747static inline u32 gr_gpccs_imemt_r(u32 i) 2748{ 2749 return 0x0041a188U + i*16U; 2750} 2751static inline u32 gr_gpccs_imemt__size_1_v(void) 2752{ 2753 return 0x00000004U; 2754} 2755static inline u32 gr_gpccs_imemt_tag_f(u32 v) 2756{ 2757 return (v & 0xffffU) << 0U; 2758} 2759static inline u32 gr_gpccs_dmemc_r(u32 i) 2760{ 2761 return 0x0041a1c0U + i*8U; 2762} 2763static inline u32 gr_gpccs_dmemc_offs_f(u32 v) 2764{ 2765 return (v & 0x3fU) << 2U; 2766} 2767static inline u32 gr_gpccs_dmemc_blk_f(u32 v) 2768{ 2769 return (v & 0xffU) << 8U; 2770} 2771static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) 2772{ 2773 return (v & 0x1U) << 24U; 2774} 2775static inline u32 gr_gpccs_dmemd_r(u32 i) 2776{ 2777 return 0x0041a1c4U + i*8U; 2778} 2779static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) 2780{ 2781 return 0x0041a800U + i*4U; 2782} 2783static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) 2784{ 2785 return (v & 0xffffffffU) << 0U; 2786} 2787static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) 2788{ 2789 return 0x00418e24U; 2790} 2791static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) 2792{ 2793 return 32U; 2794} 2795static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) 2796{ 2797 return (v & 0xffffffffU) << 0U; 2798} 2799static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) 2800{ 2801 return 0xffffffffU << 0U; 2802} 2803static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) 2804{ 2805 return (r >> 0U) & 0xffffffffU; 2806} 2807static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) 2808{ 2809 return 0x00000000U; 2810} 2811static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) 2812{ 2813 return 0x0U; 2814} 2815static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) 2816{ 2817 return 0x00418e28U; 2818} 2819static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) 2820{ 2821 return 11U; 2822} 2823static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) 2824{ 2825 return (v & 0x7ffU) << 0U; 2826} 2827static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) 2828{ 2829 return 0x7ffU << 0U; 2830} 2831static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) 2832{ 2833 return (r >> 0U) & 0x7ffU; 2834} 2835static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) 2836{ 2837 return 0x00000018U; 2838} 2839static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) 2840{ 2841 return 0x18U; 2842} 2843static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) 2844{ 2845 return 1U; 2846} 2847static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) 2848{ 2849 return (v & 0x1U) << 31U; 2850} 2851static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) 2852{ 2853 return 0x1U << 31U; 2854} 2855static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) 2856{ 2857 return (r >> 31U) & 0x1U; 2858} 2859static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) 2860{ 2861 return 0x00000000U; 2862} 2863static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) 2864{ 2865 return 0x0U; 2866} 2867static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) 2868{ 2869 return 0x00000001U; 2870} 2871static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) 2872{ 2873 return 0x80000000U; 2874} 2875static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void) 2876{ 2877 return 0x00500ee4U; 2878} 2879static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v) 2880{ 2881 return (v & 0xffffU) << 0U; 2882} 2883static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void) 2884{ 2885 return 0x00000250U; 2886} 2887static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void) 2888{ 2889 return 0x00000100U; 2890} 2891static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void) 2892{ 2893 return 0x00500ee0U; 2894} 2895static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v) 2896{ 2897 return (v & 0xffffffffU) << 0U; 2898} 2899static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void) 2900{ 2901 return 0x00000008U; 2902} 2903static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void) 2904{ 2905 return 0x00418eecU; 2906} 2907static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v) 2908{ 2909 return (v & 0xfffU) << 0U; 2910} 2911static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void) 2912{ 2913 return 0x00000100U; 2914} 2915static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void) 2916{ 2917 return 0x0041befcU; 2918} 2919static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v) 2920{ 2921 return (v & 0xfffU) << 0U; 2922} 2923static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) 2924{ 2925 return 0x00418ea0U + i*4U; 2926} 2927static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) 2928{ 2929 return (v & 0x3fffffU) << 0U; 2930} 2931static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) 2932{ 2933 return 0x3fffffU << 0U; 2934} 2935static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i) 2936{ 2937 return 0x00418010U + i*4U; 2938} 2939static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v) 2940{ 2941 return (v & 0xffffffffU) << 0U; 2942} 2943static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i) 2944{ 2945 return 0x0041804cU + i*4U; 2946} 2947static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v) 2948{ 2949 return (v & 0xffffffffU) << 0U; 2950} 2951static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i) 2952{ 2953 return 0x00418088U + i*4U; 2954} 2955static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v) 2956{ 2957 return (v & 0xffffffffU) << 0U; 2958} 2959static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i) 2960{ 2961 return 0x004180c4U + i*4U; 2962} 2963static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v) 2964{ 2965 return (v & 0xffffffffU) << 0U; 2966} 2967static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void) 2968{ 2969 return 0x00500100U; 2970} 2971static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i) 2972{ 2973 return 0x00418110U + i*4U; 2974} 2975static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v) 2976{ 2977 return (v & 0xffffffffU) << 0U; 2978} 2979static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void) 2980{ 2981 return 0x0050014cU; 2982} 2983static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) 2984{ 2985 return 0x00418810U; 2986} 2987static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) 2988{ 2989 return (v & 0xfffffffU) << 0U; 2990} 2991static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) 2992{ 2993 return 0x0000000cU; 2994} 2995static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) 2996{ 2997 return 0x80000000U; 2998} 2999static inline u32 gr_crstr_gpc_map0_r(void) 3000{ 3001 return 0x00418b08U; 3002} 3003static inline u32 gr_crstr_gpc_map0_tile0_f(u32 v) 3004{ 3005 return (v & 0x7U) << 0U; 3006} 3007static inline u32 gr_crstr_gpc_map0_tile1_f(u32 v) 3008{ 3009 return (v & 0x7U) << 5U; 3010} 3011static inline u32 gr_crstr_gpc_map0_tile2_f(u32 v) 3012{ 3013 return (v & 0x7U) << 10U; 3014} 3015static inline u32 gr_crstr_gpc_map0_tile3_f(u32 v) 3016{ 3017 return (v & 0x7U) << 15U; 3018} 3019static inline u32 gr_crstr_gpc_map0_tile4_f(u32 v) 3020{ 3021 return (v & 0x7U) << 20U; 3022} 3023static inline u32 gr_crstr_gpc_map0_tile5_f(u32 v) 3024{ 3025 return (v & 0x7U) << 25U; 3026} 3027static inline u32 gr_crstr_gpc_map1_r(void) 3028{ 3029 return 0x00418b0cU; 3030} 3031static inline u32 gr_crstr_gpc_map1_tile6_f(u32 v) 3032{ 3033 return (v & 0x7U) << 0U; 3034} 3035static inline u32 gr_crstr_gpc_map1_tile7_f(u32 v) 3036{ 3037 return (v & 0x7U) << 5U; 3038} 3039static inline u32 gr_crstr_gpc_map1_tile8_f(u32 v) 3040{ 3041 return (v & 0x7U) << 10U; 3042} 3043static inline u32 gr_crstr_gpc_map1_tile9_f(u32 v) 3044{ 3045 return (v & 0x7U) << 15U; 3046} 3047static inline u32 gr_crstr_gpc_map1_tile10_f(u32 v) 3048{ 3049 return (v & 0x7U) << 20U; 3050} 3051static inline u32 gr_crstr_gpc_map1_tile11_f(u32 v) 3052{ 3053 return (v & 0x7U) << 25U; 3054} 3055static inline u32 gr_crstr_gpc_map2_r(void) 3056{ 3057 return 0x00418b10U; 3058} 3059static inline u32 gr_crstr_gpc_map2_tile12_f(u32 v) 3060{ 3061 return (v & 0x7U) << 0U; 3062} 3063static inline u32 gr_crstr_gpc_map2_tile13_f(u32 v) 3064{ 3065 return (v & 0x7U) << 5U; 3066} 3067static inline u32 gr_crstr_gpc_map2_tile14_f(u32 v) 3068{ 3069 return (v & 0x7U) << 10U; 3070} 3071static inline u32 gr_crstr_gpc_map2_tile15_f(u32 v) 3072{ 3073 return (v & 0x7U) << 15U; 3074} 3075static inline u32 gr_crstr_gpc_map2_tile16_f(u32 v) 3076{ 3077 return (v & 0x7U) << 20U; 3078} 3079static inline u32 gr_crstr_gpc_map2_tile17_f(u32 v) 3080{ 3081 return (v & 0x7U) << 25U; 3082} 3083static inline u32 gr_crstr_gpc_map3_r(void) 3084{ 3085 return 0x00418b14U; 3086} 3087static inline u32 gr_crstr_gpc_map3_tile18_f(u32 v) 3088{ 3089 return (v & 0x7U) << 0U; 3090} 3091static inline u32 gr_crstr_gpc_map3_tile19_f(u32 v) 3092{ 3093 return (v & 0x7U) << 5U; 3094} 3095static inline u32 gr_crstr_gpc_map3_tile20_f(u32 v) 3096{ 3097 return (v & 0x7U) << 10U; 3098} 3099static inline u32 gr_crstr_gpc_map3_tile21_f(u32 v) 3100{ 3101 return (v & 0x7U) << 15U; 3102} 3103static inline u32 gr_crstr_gpc_map3_tile22_f(u32 v) 3104{ 3105 return (v & 0x7U) << 20U; 3106} 3107static inline u32 gr_crstr_gpc_map3_tile23_f(u32 v) 3108{ 3109 return (v & 0x7U) << 25U; 3110} 3111static inline u32 gr_crstr_gpc_map4_r(void) 3112{ 3113 return 0x00418b18U; 3114} 3115static inline u32 gr_crstr_gpc_map4_tile24_f(u32 v) 3116{ 3117 return (v & 0x7U) << 0U; 3118} 3119static inline u32 gr_crstr_gpc_map4_tile25_f(u32 v) 3120{ 3121 return (v & 0x7U) << 5U; 3122} 3123static inline u32 gr_crstr_gpc_map4_tile26_f(u32 v) 3124{ 3125 return (v & 0x7U) << 10U; 3126} 3127static inline u32 gr_crstr_gpc_map4_tile27_f(u32 v) 3128{ 3129 return (v & 0x7U) << 15U; 3130} 3131static inline u32 gr_crstr_gpc_map4_tile28_f(u32 v) 3132{ 3133 return (v & 0x7U) << 20U; 3134} 3135static inline u32 gr_crstr_gpc_map4_tile29_f(u32 v) 3136{ 3137 return (v & 0x7U) << 25U; 3138} 3139static inline u32 gr_crstr_gpc_map5_r(void) 3140{ 3141 return 0x00418b1cU; 3142} 3143static inline u32 gr_crstr_gpc_map5_tile30_f(u32 v) 3144{ 3145 return (v & 0x7U) << 0U; 3146} 3147static inline u32 gr_crstr_gpc_map5_tile31_f(u32 v) 3148{ 3149 return (v & 0x7U) << 5U; 3150} 3151static inline u32 gr_crstr_gpc_map5_tile32_f(u32 v) 3152{ 3153 return (v & 0x7U) << 10U; 3154} 3155static inline u32 gr_crstr_gpc_map5_tile33_f(u32 v) 3156{ 3157 return (v & 0x7U) << 15U; 3158} 3159static inline u32 gr_crstr_gpc_map5_tile34_f(u32 v) 3160{ 3161 return (v & 0x7U) << 20U; 3162} 3163static inline u32 gr_crstr_gpc_map5_tile35_f(u32 v) 3164{ 3165 return (v & 0x7U) << 25U; 3166} 3167static inline u32 gr_crstr_map_table_cfg_r(void) 3168{ 3169 return 0x00418bb8U; 3170} 3171static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) 3172{ 3173 return (v & 0xffU) << 0U; 3174} 3175static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) 3176{ 3177 return (v & 0xffU) << 8U; 3178} 3179static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_r(void) 3180{ 3181 return 0x00418980U; 3182} 3183static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_0_f(u32 v) 3184{ 3185 return (v & 0x7U) << 0U; 3186} 3187static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_1_f(u32 v) 3188{ 3189 return (v & 0x7U) << 4U; 3190} 3191static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_2_f(u32 v) 3192{ 3193 return (v & 0x7U) << 8U; 3194} 3195static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_3_f(u32 v) 3196{ 3197 return (v & 0x7U) << 12U; 3198} 3199static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_4_f(u32 v) 3200{ 3201 return (v & 0x7U) << 16U; 3202} 3203static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_5_f(u32 v) 3204{ 3205 return (v & 0x7U) << 20U; 3206} 3207static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_6_f(u32 v) 3208{ 3209 return (v & 0x7U) << 24U; 3210} 3211static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map0_tile_7_f(u32 v) 3212{ 3213 return (v & 0x7U) << 28U; 3214} 3215static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_r(void) 3216{ 3217 return 0x00418984U; 3218} 3219static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_8_f(u32 v) 3220{ 3221 return (v & 0x7U) << 0U; 3222} 3223static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_9_f(u32 v) 3224{ 3225 return (v & 0x7U) << 4U; 3226} 3227static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_10_f(u32 v) 3228{ 3229 return (v & 0x7U) << 8U; 3230} 3231static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_11_f(u32 v) 3232{ 3233 return (v & 0x7U) << 12U; 3234} 3235static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_12_f(u32 v) 3236{ 3237 return (v & 0x7U) << 16U; 3238} 3239static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_13_f(u32 v) 3240{ 3241 return (v & 0x7U) << 20U; 3242} 3243static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_14_f(u32 v) 3244{ 3245 return (v & 0x7U) << 24U; 3246} 3247static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map1_tile_15_f(u32 v) 3248{ 3249 return (v & 0x7U) << 28U; 3250} 3251static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_r(void) 3252{ 3253 return 0x00418988U; 3254} 3255static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_16_f(u32 v) 3256{ 3257 return (v & 0x7U) << 0U; 3258} 3259static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_17_f(u32 v) 3260{ 3261 return (v & 0x7U) << 4U; 3262} 3263static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_18_f(u32 v) 3264{ 3265 return (v & 0x7U) << 8U; 3266} 3267static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_19_f(u32 v) 3268{ 3269 return (v & 0x7U) << 12U; 3270} 3271static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_20_f(u32 v) 3272{ 3273 return (v & 0x7U) << 16U; 3274} 3275static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_21_f(u32 v) 3276{ 3277 return (v & 0x7U) << 20U; 3278} 3279static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_22_f(u32 v) 3280{ 3281 return (v & 0x7U) << 24U; 3282} 3283static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_s(void) 3284{ 3285 return 3U; 3286} 3287static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_f(u32 v) 3288{ 3289 return (v & 0x7U) << 28U; 3290} 3291static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_m(void) 3292{ 3293 return 0x7U << 28U; 3294} 3295static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map2_tile_23_v(u32 r) 3296{ 3297 return (r >> 28U) & 0x7U; 3298} 3299static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_r(void) 3300{ 3301 return 0x0041898cU; 3302} 3303static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_24_f(u32 v) 3304{ 3305 return (v & 0x7U) << 0U; 3306} 3307static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_25_f(u32 v) 3308{ 3309 return (v & 0x7U) << 4U; 3310} 3311static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_26_f(u32 v) 3312{ 3313 return (v & 0x7U) << 8U; 3314} 3315static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_27_f(u32 v) 3316{ 3317 return (v & 0x7U) << 12U; 3318} 3319static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_28_f(u32 v) 3320{ 3321 return (v & 0x7U) << 16U; 3322} 3323static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_29_f(u32 v) 3324{ 3325 return (v & 0x7U) << 20U; 3326} 3327static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_30_f(u32 v) 3328{ 3329 return (v & 0x7U) << 24U; 3330} 3331static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map3_tile_31_f(u32 v) 3332{ 3333 return (v & 0x7U) << 28U; 3334} 3335static inline u32 gr_gpcs_gpm_pd_cfg_r(void) 3336{ 3337 return 0x00418c6cU; 3338} 3339static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_disable_f(void) 3340{ 3341 return 0x0U; 3342} 3343static inline u32 gr_gpcs_gpm_pd_cfg_timeslice_mode_enable_f(void) 3344{ 3345 return 0x1U; 3346} 3347static inline u32 gr_gpcs_gcc_pagepool_base_r(void) 3348{ 3349 return 0x00419004U; 3350} 3351static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) 3352{ 3353 return (v & 0xffffffffU) << 0U; 3354} 3355static inline u32 gr_gpcs_gcc_pagepool_r(void) 3356{ 3357 return 0x00419008U; 3358} 3359static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) 3360{ 3361 return (v & 0x3ffU) << 0U; 3362} 3363static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) 3364{ 3365 return 0x0041980cU; 3366} 3367static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) 3368{ 3369 return 0x10U; 3370} 3371static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) 3372{ 3373 return 0x00419848U; 3374} 3375static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) 3376{ 3377 return (v & 0xfffffffU) << 0U; 3378} 3379static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) 3380{ 3381 return (v & 0x1U) << 28U; 3382} 3383static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) 3384{ 3385 return 0x10000000U; 3386} 3387static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) 3388{ 3389 return 0x00419c00U; 3390} 3391static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) 3392{ 3393 return 0x0U; 3394} 3395static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) 3396{ 3397 return 0x8U; 3398} 3399static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) 3400{ 3401 return 0x00419c2cU; 3402} 3403static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) 3404{ 3405 return (v & 0xfffffffU) << 0U; 3406} 3407static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) 3408{ 3409 return (v & 0x1U) << 28U; 3410} 3411static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) 3412{ 3413 return 0x10000000U; 3414} 3415static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(void) 3416{ 3417 return 0x00419e44U; 3418} 3419static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_error_report_f(void) 3420{ 3421 return 0x2U; 3422} 3423static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_api_stack_error_report_f(void) 3424{ 3425 return 0x4U; 3426} 3427static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_ret_empty_stack_error_report_f(void) 3428{ 3429 return 0x8U; 3430} 3431static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_wrap_report_f(void) 3432{ 3433 return 0x10U; 3434} 3435static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_pc_report_f(void) 3436{ 3437 return 0x20U; 3438} 3439static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_pc_overflow_report_f(void) 3440{ 3441 return 0x40U; 3442} 3443static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_immc_addr_report_f(void) 3444{ 3445 return 0x80U; 3446} 3447static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_reg_report_f(void) 3448{ 3449 return 0x100U; 3450} 3451static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) 3452{ 3453 return 0x200U; 3454} 3455static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_sph_instr_combo_report_f(void) 3456{ 3457 return 0x400U; 3458} 3459static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) 3460{ 3461 return 0x800U; 3462} 3463static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_report_f(void) 3464{ 3465 return 0x1000U; 3466} 3467static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_reg_report_f(void) 3468{ 3469 return 0x2000U; 3470} 3471static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_oor_addr_report_f(void) 3472{ 3473 return 0x4000U; 3474} 3475static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_misaligned_addr_report_f(void) 3476{ 3477 return 0x8000U; 3478} 3479static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) 3480{ 3481 return 0x10000U; 3482} 3483static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_illegal_instr_param2_report_f(void) 3484{ 3485 return 0x20000U; 3486} 3487static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) 3488{ 3489 return 0x40000U; 3490} 3491static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_mmu_fault_report_f(void) 3492{ 3493 return 0x800000U; 3494} 3495static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_stack_overflow_report_f(void) 3496{ 3497 return 0x400000U; 3498} 3499static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_geometry_sm_error_report_f(void) 3500{ 3501 return 0x80000U; 3502} 3503static inline u32 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_divergent_report_f(void) 3504{ 3505 return 0x100000U; 3506} 3507static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_report_mask_r(void) 3508{ 3509 return 0x00504644U; 3510} 3511static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r(void) 3512{ 3513 return 0x00419e4cU; 3514} 3515static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_sm_to_sm_fault_report_f(void) 3516{ 3517 return 0x1U; 3518} 3519static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_l1_error_report_f(void) 3520{ 3521 return 0x2U; 3522} 3523static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) 3524{ 3525 return 0x4U; 3526} 3527static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_physical_stack_overflow_error_report_f(void) 3528{ 3529 return 0x8U; 3530} 3531static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_int_report_f(void) 3532{ 3533 return 0x10U; 3534} 3535static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_ecc_sec_error_report_f(void) 3536{ 3537 return 0x20000000U; 3538} 3539static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_ecc_ded_error_report_f(void) 3540{ 3541 return 0x40000000U; 3542} 3543static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_bpt_pause_report_f(void) 3544{ 3545 return 0x20U; 3546} 3547static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_single_step_complete_report_f(void) 3548{ 3549 return 0x40U; 3550} 3551static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_report_mask_r(void) 3552{ 3553 return 0x0050464cU; 3554} 3555static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) 3556{ 3557 return 0x00419d0cU; 3558} 3559static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) 3560{ 3561 return 0x2U; 3562} 3563static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) 3564{ 3565 return 0x1U; 3566} 3567static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 3568{ 3569 return 0x0050450cU; 3570} 3571static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) 3572{ 3573 return (r >> 1U) & 0x1U; 3574} 3575static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) 3576{ 3577 return 0x2U; 3578} 3579static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) 3580{ 3581 return 0x0041ac94U; 3582} 3583static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) 3584{ 3585 return (v & 0xffU) << 16U; 3586} 3587static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) 3588{ 3589 return 0x00502c90U; 3590} 3591static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) 3592{ 3593 return (r >> 2U) & 0x1U; 3594} 3595static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) 3596{ 3597 return (r >> 16U) & 0xffU; 3598} 3599static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) 3600{ 3601 return 0x00000001U; 3602} 3603static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) 3604{ 3605 return 0x00504508U; 3606} 3607static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) 3608{ 3609 return (r >> 0U) & 0x1U; 3610} 3611static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) 3612{ 3613 return 0x00000001U; 3614} 3615static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) 3616{ 3617 return (r >> 1U) & 0x1U; 3618} 3619static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) 3620{ 3621 return 0x00000001U; 3622} 3623static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_r(void) 3624{ 3625 return 0x00504610U; 3626} 3627static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_m(void) 3628{ 3629 return 0x1U << 0U; 3630} 3631static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(u32 r) 3632{ 3633 return (r >> 0U) & 0x1U; 3634} 3635static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v(void) 3636{ 3637 return 0x00000001U; 3638} 3639static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_off_v(void) 3640{ 3641 return 0x00000000U; 3642} 3643static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(void) 3644{ 3645 return 0x80000000U; 3646} 3647static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_disable_f(void) 3648{ 3649 return 0x0U; 3650} 3651static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_single_step_mode_enable_f(void) 3652{ 3653 return 0x8U; 3654} 3655static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_single_step_mode_disable_f(void) 3656{ 3657 return 0x0U; 3658} 3659static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_run_trigger_task_f(void) 3660{ 3661 return 0x40000000U; 3662} 3663static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_m(void) 3664{ 3665 return 0x1U << 1U; 3666} 3667static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_v(u32 r) 3668{ 3669 return (r >> 1U) & 0x1U; 3670} 3671static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_warp_disable_f(void) 3672{ 3673 return 0x0U; 3674} 3675static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_m(void) 3676{ 3677 return 0x1U << 2U; 3678} 3679static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_v(u32 r) 3680{ 3681 return (r >> 2U) & 0x1U; 3682} 3683static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_disable_f(void) 3684{ 3685 return 0x0U; 3686} 3687static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_stop_on_any_warp_disable_v(void) 3688{ 3689 return 0x00000000U; 3690} 3691static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_on_any_sm_stop_on_any_sm_disable_v(void) 3692{ 3693 return 0x00000000U; 3694} 3695static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_r(void) 3696{ 3697 return 0x00504614U; 3698} 3699static inline u32 gr_gpc0_tpc0_sm_warp_valid_mask_1_r(void) 3700{ 3701 return 0x00504618U; 3702} 3703static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r(void) 3704{ 3705 return 0x00504624U; 3706} 3707static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_1_r(void) 3708{ 3709 return 0x00504628U; 3710} 3711static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r(void) 3712{ 3713 return 0x00504634U; 3714} 3715static inline u32 gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r(void) 3716{ 3717 return 0x00504638U; 3718} 3719static inline u32 gr_gpcs_tpcs_sm_dbgr_bpt_pause_mask_r(void) 3720{ 3721 return 0x00419e24U; 3722} 3723static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_r(void) 3724{ 3725 return 0x0050460cU; 3726} 3727static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_sm_in_trap_mode_v(u32 r) 3728{ 3729 return (r >> 0U) & 0x1U; 3730} 3731static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(u32 r) 3732{ 3733 return (r >> 4U) & 0x1U; 3734} 3735static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v(void) 3736{ 3737 return 0x00000001U; 3738} 3739static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_r(void) 3740{ 3741 return 0x00419e50U; 3742} 3743static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_int_pending_f(void) 3744{ 3745 return 0x10U; 3746} 3747static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_pause_pending_f(void) 3748{ 3749 return 0x20U; 3750} 3751static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_single_step_complete_pending_f(void) 3752{ 3753 return 0x40U; 3754} 3755static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_sm_to_sm_fault_pending_f(void) 3756{ 3757 return 0x1U; 3758} 3759static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_l1_error_pending_f(void) 3760{ 3761 return 0x2U; 3762} 3763static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_multiple_warp_errors_pending_f(void) 3764{ 3765 return 0x4U; 3766} 3767static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_physical_stack_overflow_error_pending_f(void) 3768{ 3769 return 0x8U; 3770} 3771static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_timeout_error_pending_f(void) 3772{ 3773 return 0x80000000U; 3774} 3775static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_r(void) 3776{ 3777 return 0x00504650U; 3778} 3779static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f(void) 3780{ 3781 return 0x10U; 3782} 3783static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_ecc_sec_error_pending_f(void) 3784{ 3785 return 0x20000000U; 3786} 3787static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_ecc_ded_error_pending_f(void) 3788{ 3789 return 0x40000000U; 3790} 3791static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f(void) 3792{ 3793 return 0x20U; 3794} 3795static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(void) 3796{ 3797 return 0x40U; 3798} 3799static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_sm_to_sm_fault_pending_f(void) 3800{ 3801 return 0x1U; 3802} 3803static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_l1_error_pending_f(void) 3804{ 3805 return 0x2U; 3806} 3807static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_multiple_warp_errors_pending_f(void) 3808{ 3809 return 0x4U; 3810} 3811static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_physical_stack_overflow_error_pending_f(void) 3812{ 3813 return 0x8U; 3814} 3815static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_timeout_error_pending_f(void) 3816{ 3817 return 0x80000000U; 3818} 3819static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_r(void) 3820{ 3821 return 0x00504224U; 3822} 3823static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_intr_pending_f(void) 3824{ 3825 return 0x1U; 3826} 3827static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f(void) 3828{ 3829 return 0x80U; 3830} 3831static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f(void) 3832{ 3833 return 0x100U; 3834} 3835static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_reset_active_f(void) 3836{ 3837 return 0x40000000U; 3838} 3839static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_r(void) 3840{ 3841 return 0x00504648U; 3842} 3843static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_v(u32 r) 3844{ 3845 return (r >> 0U) & 0xffffU; 3846} 3847static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v(void) 3848{ 3849 return 0x00000000U; 3850} 3851static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f(void) 3852{ 3853 return 0x0U; 3854} 3855static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m(void) 3856{ 3857 return 0x1U << 24U; 3858} 3859static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_m(void) 3860{ 3861 return 0x7U << 25U; 3862} 3863static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_none_f(void) 3864{ 3865 return 0x0U; 3866} 3867static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_pc_r(void) 3868{ 3869 return 0x00504654U; 3870} 3871static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) 3872{ 3873 return 0x00504770U; 3874} 3875static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) 3876{ 3877 return 0x00419f70U; 3878} 3879static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) 3880{ 3881 return 0x1U << 4U; 3882} 3883static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) 3884{ 3885 return (v & 0x1U) << 4U; 3886} 3887static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) 3888{ 3889 return 0x0050477cU; 3890} 3891static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) 3892{ 3893 return 0x00419f7cU; 3894} 3895static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) 3896{ 3897 return 0x1U << 0U; 3898} 3899static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) 3900{ 3901 return (v & 0x1U) << 0U; 3902} 3903static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) 3904{ 3905 return 0x0041be08U; 3906} 3907static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) 3908{ 3909 return 0x4U; 3910} 3911static inline u32 gr_ppcs_wwdx_map_gpc_map0_r(void) 3912{ 3913 return 0x0041bf00U; 3914} 3915static inline u32 gr_ppcs_wwdx_map_gpc_map1_r(void) 3916{ 3917 return 0x0041bf04U; 3918} 3919static inline u32 gr_ppcs_wwdx_map_gpc_map2_r(void) 3920{ 3921 return 0x0041bf08U; 3922} 3923static inline u32 gr_ppcs_wwdx_map_gpc_map3_r(void) 3924{ 3925 return 0x0041bf0cU; 3926} 3927static inline u32 gr_ppcs_wwdx_map_gpc_map4_r(void) 3928{ 3929 return 0x0041bf10U; 3930} 3931static inline u32 gr_ppcs_wwdx_map_gpc_map5_r(void) 3932{ 3933 return 0x0041bf14U; 3934} 3935static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) 3936{ 3937 return 0x0041bfd0U; 3938} 3939static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) 3940{ 3941 return (v & 0xffU) << 0U; 3942} 3943static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) 3944{ 3945 return (v & 0xffU) << 8U; 3946} 3947static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) 3948{ 3949 return (v & 0x1fU) << 16U; 3950} 3951static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) 3952{ 3953 return (v & 0x7U) << 21U; 3954} 3955static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff5_mod_value_f(u32 v) 3956{ 3957 return (v & 0x1fU) << 24U; 3958} 3959static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) 3960{ 3961 return 0x0041bfd4U; 3962} 3963static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) 3964{ 3965 return (v & 0xffffffU) << 0U; 3966} 3967static inline u32 gr_ppcs_wwdx_map_table_cfg2_r(void) 3968{ 3969 return 0x0041bfe4U; 3970} 3971static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff6_mod_value_f(u32 v) 3972{ 3973 return (v & 0x1fU) << 0U; 3974} 3975static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff7_mod_value_f(u32 v) 3976{ 3977 return (v & 0x1fU) << 5U; 3978} 3979static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff8_mod_value_f(u32 v) 3980{ 3981 return (v & 0x1fU) << 10U; 3982} 3983static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff9_mod_value_f(u32 v) 3984{ 3985 return (v & 0x1fU) << 15U; 3986} 3987static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff10_mod_value_f(u32 v) 3988{ 3989 return (v & 0x1fU) << 20U; 3990} 3991static inline u32 gr_ppcs_wwdx_map_table_cfg2_coeff11_mod_value_f(u32 v) 3992{ 3993 return (v & 0x1fU) << 25U; 3994} 3995static inline u32 gr_bes_zrop_settings_r(void) 3996{ 3997 return 0x00408850U; 3998} 3999static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) 4000{ 4001 return (v & 0xfU) << 0U; 4002} 4003static inline u32 gr_be0_crop_debug3_r(void) 4004{ 4005 return 0x00410108U; 4006} 4007static inline u32 gr_bes_crop_debug3_r(void) 4008{ 4009 return 0x00408908U; 4010} 4011static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) 4012{ 4013 return 0x1U << 31U; 4014} 4015static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void) 4016{ 4017 return 0x1U << 1U; 4018} 4019static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void) 4020{ 4021 return 0x0U; 4022} 4023static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void) 4024{ 4025 return 0x2U; 4026} 4027static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void) 4028{ 4029 return 0x1U << 2U; 4030} 4031static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void) 4032{ 4033 return 0x0U; 4034} 4035static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void) 4036{ 4037 return 0x4U; 4038} 4039static inline u32 gr_bes_crop_debug4_r(void) 4040{ 4041 return 0x0040894cU; 4042} 4043static inline u32 gr_bes_crop_debug4_clamp_fp_blend_m(void) 4044{ 4045 return 0x1U << 18U; 4046} 4047static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_inf_f(void) 4048{ 4049 return 0x0U; 4050} 4051static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_maxval_f(void) 4052{ 4053 return 0x40000U; 4054} 4055static inline u32 gr_bes_crop_settings_r(void) 4056{ 4057 return 0x00408958U; 4058} 4059static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) 4060{ 4061 return (v & 0xfU) << 0U; 4062} 4063static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) 4064{ 4065 return 0x00000020U; 4066} 4067static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) 4068{ 4069 return 0x00000020U; 4070} 4071static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) 4072{ 4073 return 0x000000c0U; 4074} 4075static inline u32 gr_zcull_subregion_qty_v(void) 4076{ 4077 return 0x00000010U; 4078} 4079static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel0_r(void) 4080{ 4081 return 0x00504604U; 4082} 4083static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control_sel1_r(void) 4084{ 4085 return 0x00504608U; 4086} 4087static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r(void) 4088{ 4089 return 0x0050465cU; 4090} 4091static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control1_r(void) 4092{ 4093 return 0x00504660U; 4094} 4095static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control2_r(void) 4096{ 4097 return 0x00504664U; 4098} 4099static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control3_r(void) 4100{ 4101 return 0x00504668U; 4102} 4103static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control4_r(void) 4104{ 4105 return 0x0050466cU; 4106} 4107static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control5_r(void) 4108{ 4109 return 0x00504658U; 4110} 4111static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_control_r(void) 4112{ 4113 return 0x00504730U; 4114} 4115static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_control_r(void) 4116{ 4117 return 0x00504734U; 4118} 4119static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_control_r(void) 4120{ 4121 return 0x00504738U; 4122} 4123static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_control_r(void) 4124{ 4125 return 0x0050473cU; 4126} 4127static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter4_control_r(void) 4128{ 4129 return 0x00504740U; 4130} 4131static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter5_control_r(void) 4132{ 4133 return 0x00504744U; 4134} 4135static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter6_control_r(void) 4136{ 4137 return 0x00504748U; 4138} 4139static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter7_control_r(void) 4140{ 4141 return 0x0050474cU; 4142} 4143static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status_s1_r(void) 4144{ 4145 return 0x00504678U; 4146} 4147static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_status1_r(void) 4148{ 4149 return 0x00504694U; 4150} 4151static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_s0_r(void) 4152{ 4153 return 0x005046f0U; 4154} 4155static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter0_s1_r(void) 4156{ 4157 return 0x00504700U; 4158} 4159static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_s0_r(void) 4160{ 4161 return 0x005046f4U; 4162} 4163static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter1_s1_r(void) 4164{ 4165 return 0x00504704U; 4166} 4167static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_s0_r(void) 4168{ 4169 return 0x005046f8U; 4170} 4171static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter2_s1_r(void) 4172{ 4173 return 0x00504708U; 4174} 4175static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_s0_r(void) 4176{ 4177 return 0x005046fcU; 4178} 4179static inline u32 gr_pri_gpc0_tpc0_sm_dsm_perf_counter3_s1_r(void) 4180{ 4181 return 0x0050470cU; 4182} 4183static inline u32 gr_fe_pwr_mode_r(void) 4184{ 4185 return 0x00404170U; 4186} 4187static inline u32 gr_fe_pwr_mode_mode_auto_f(void) 4188{ 4189 return 0x0U; 4190} 4191static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) 4192{ 4193 return 0x2U; 4194} 4195static inline u32 gr_fe_pwr_mode_req_v(u32 r) 4196{ 4197 return (r >> 4U) & 0x1U; 4198} 4199static inline u32 gr_fe_pwr_mode_req_send_f(void) 4200{ 4201 return 0x10U; 4202} 4203static inline u32 gr_fe_pwr_mode_req_done_v(void) 4204{ 4205 return 0x00000000U; 4206} 4207static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) 4208{ 4209 return 0x00418880U; 4210} 4211static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) 4212{ 4213 return 0x1U << 0U; 4214} 4215static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) 4216{ 4217 return 0x1U << 11U; 4218} 4219static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) 4220{ 4221 return 0x1U << 1U; 4222} 4223static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) 4224{ 4225 return 0x1U << 2U; 4226} 4227static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) 4228{ 4229 return 0x3U << 3U; 4230} 4231static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) 4232{ 4233 return 0x3U << 5U; 4234} 4235static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) 4236{ 4237 return 0x3U << 28U; 4238} 4239static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) 4240{ 4241 return 0x1U << 30U; 4242} 4243static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) 4244{ 4245 return 0x1U << 31U; 4246} 4247static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) 4248{ 4249 return 0x00418890U; 4250} 4251static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) 4252{ 4253 return 0x00418894U; 4254} 4255static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) 4256{ 4257 return 0x004188b0U; 4258} 4259static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) 4260{ 4261 return (r >> 16U) & 0x1U; 4262} 4263static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) 4264{ 4265 return 0x00000001U; 4266} 4267static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) 4268{ 4269 return 0x004188b4U; 4270} 4271static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) 4272{ 4273 return 0x004188b8U; 4274} 4275static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) 4276{ 4277 return 0x004188acU; 4278} 4279static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_r(void) 4280{ 4281 return 0x00419e10U; 4282} 4283static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_f(u32 v) 4284{ 4285 return (v & 0x1U) << 0U; 4286} 4287static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_on_v(void) 4288{ 4289 return 0x00000001U; 4290} 4291static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_m(void) 4292{ 4293 return 0x1U << 31U; 4294} 4295static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_v(u32 r) 4296{ 4297 return (r >> 31U) & 0x1U; 4298} 4299static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(void) 4300{ 4301 return 0x80000000U; 4302} 4303static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_disable_f(void) 4304{ 4305 return 0x0U; 4306} 4307static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_m(void) 4308{ 4309 return 0x1U << 3U; 4310} 4311static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f(void) 4312{ 4313 return 0x8U; 4314} 4315static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_disable_f(void) 4316{ 4317 return 0x0U; 4318} 4319static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_m(void) 4320{ 4321 return 0x1U << 30U; 4322} 4323static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_v(u32 r) 4324{ 4325 return (r >> 30U) & 0x1U; 4326} 4327static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_task_f(void) 4328{ 4329 return 0x40000000U; 4330} 4331static inline u32 gr_fe_gfxp_wfi_timeout_r(void) 4332{ 4333 return 0x004041c0U; 4334} 4335static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v) 4336{ 4337 return (v & 0xffffffffU) << 0U; 4338} 4339static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void) 4340{ 4341 return 0x0U; 4342} 4343static inline u32 gr_debug_2_r(void) 4344{ 4345 return 0x00400088U; 4346} 4347static inline u32 gr_debug_2_gfxp_wfi_always_injects_wfi_m(void) 4348{ 4349 return 0x1U << 23U; 4350} 4351static inline u32 gr_debug_2_gfxp_wfi_always_injects_wfi_v(u32 r) 4352{ 4353 return (r >> 23U) & 0x1U; 4354} 4355static inline u32 gr_debug_2_gfxp_wfi_always_injects_wfi_enabled_f(void) 4356{ 4357 return 0x800000U; 4358} 4359static inline u32 gr_debug_2_gfxp_wfi_always_injects_wfi_disabled_f(void) 4360{ 4361 return 0x0U; 4362} 4363static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void) 4364{ 4365 return 0x00419c84U; 4366} 4367static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v) 4368{ 4369 return (v & 0x7U) << 8U; 4370} 4371static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void) 4372{ 4373 return 0x7U << 8U; 4374} 4375static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void) 4376{ 4377 return 0x100U; 4378} 4379static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void) 4380{ 4381 return 0x00419f78U; 4382} 4383static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void) 4384{ 4385 return 0x3U << 11U; 4386} 4387static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void) 4388{ 4389 return 0x1000U; 4390} 4391static inline u32 gr_gpcs_tc_debug0_r(void) 4392{ 4393 return 0x00418708U; 4394} 4395static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v) 4396{ 4397 return (v & 0xffU) << 0U; 4398} 4399static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void) 4400{ 4401 return 0xffU << 0U; 4402} 4403static inline u32 gr_gpc0_prop_debug1_r(void) 4404{ 4405 return 0x00500400U; 4406} 4407static inline u32 gr_gpc0_prop_debug1_czf_bypass_f(u32 v) 4408{ 4409 return (v & 0x3U) << 14U; 4410} 4411static inline u32 gr_gpc0_prop_debug1_czf_bypass_m(void) 4412{ 4413 return 0x3U << 14U; 4414} 4415static inline u32 gr_gpc0_prop_debug1_czf_bypass_init_v(void) 4416{ 4417 return 0x00000001U; 4418} 4419#endif
diff --git a/include/nvgpu/hw/gp10b/hw_ltc_gp10b.h b/include/nvgpu/hw/gp10b/hw_ltc_gp10b.h
deleted file mode 100644
index 721a48a..0000000
--- a/include/nvgpu/hw/gp10b/hw_ltc_gp10b.h
+++ /dev/null
@@ -1,587 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ltc_gp10b_h_ 57#define _hw_ltc_gp10b_h_ 58 59static inline u32 ltc_pltcg_base_v(void) 60{ 61 return 0x00140000U; 62} 63static inline u32 ltc_pltcg_extent_v(void) 64{ 65 return 0x0017ffffU; 66} 67static inline u32 ltc_ltc0_ltss_v(void) 68{ 69 return 0x00140200U; 70} 71static inline u32 ltc_ltc0_lts0_v(void) 72{ 73 return 0x00140400U; 74} 75static inline u32 ltc_ltcs_ltss_v(void) 76{ 77 return 0x0017e200U; 78} 79static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) 80{ 81 return 0x0014046cU; 82} 83static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) 84{ 85 return 0x00140518U; 86} 87static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) 88{ 89 return 0x0017e318U; 90} 91static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) 92{ 93 return 0x1U << 15U; 94} 95static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) 96{ 97 return 0x00140494U; 98} 99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) 100{ 101 return (r >> 0U) & 0xffffU; 102} 103static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) 104{ 105 return (r >> 16U) & 0x3U; 106} 107static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) 108{ 109 return 0x00000000U; 110} 111static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) 120{ 121 return 0x0017e26cU; 122} 123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) 124{ 125 return 0x1U; 126} 127static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) 128{ 129 return 0x2U; 130} 131static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) 132{ 133 return (r >> 2U) & 0x1U; 134} 135static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) 140{ 141 return 0x4U; 142} 143static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) 144{ 145 return 0x0014046cU; 146} 147static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) 148{ 149 return 0x0017e270U; 150} 151static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) 152{ 153 return (v & 0x3ffffU) << 0U; 154} 155static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) 156{ 157 return 0x0017e274U; 158} 159static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) 160{ 161 return (v & 0x3ffffU) << 0U; 162} 163static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) 164{ 165 return 0x0003ffffU; 166} 167static inline u32 ltc_ltcs_ltss_cbc_base_r(void) 168{ 169 return 0x0017e278U; 170} 171static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) 172{ 173 return 0x0000000bU; 174} 175static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) 176{ 177 return (r >> 0U) & 0x3ffffffU; 178} 179static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) 180{ 181 return 0x0017e27cU; 182} 183static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) 184{ 185 return 0x0017e000U; 186} 187static inline u32 ltc_ltcs_ltss_cbc_param_r(void) 188{ 189 return 0x0017e280U; 190} 191static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) 192{ 193 return (r >> 0U) & 0xffffU; 194} 195static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) 196{ 197 return (r >> 24U) & 0xfU; 198} 199static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) 200{ 201 return (r >> 28U) & 0xfU; 202} 203static inline u32 ltc_ltcs_ltss_cbc_param2_r(void) 204{ 205 return 0x0017e3f4U; 206} 207static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r) 208{ 209 return (r >> 0U) & 0xffffU; 210} 211static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) 212{ 213 return 0x0017e2acU; 214} 215static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) 216{ 217 return (v & 0x1fU) << 16U; 218} 219static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) 220{ 221 return 0x0017e338U; 222} 223static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) 224{ 225 return (v & 0xfU) << 0U; 226} 227static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) 228{ 229 return 0x0017e33cU + i*4U; 230} 231static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) 232{ 233 return 0x00000004U; 234} 235static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) 236{ 237 return 0x0017e34cU; 238} 239static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) 240{ 241 return 32U; 242} 243static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) 244{ 245 return (v & 0xffffffffU) << 0U; 246} 247static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) 248{ 249 return 0xffffffffU << 0U; 250} 251static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) 252{ 253 return (r >> 0U) & 0xffffffffU; 254} 255static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) 256{ 257 return 0x0017e2b0U; 258} 259static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) 260{ 261 return 0x10000000U; 262} 263static inline u32 ltc_ltcs_ltss_g_elpg_r(void) 264{ 265 return 0x0017e214U; 266} 267static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) 268{ 269 return (r >> 0U) & 0x1U; 270} 271static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) 272{ 273 return 0x00000001U; 274} 275static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) 276{ 277 return 0x1U; 278} 279static inline u32 ltc_ltc0_ltss_g_elpg_r(void) 280{ 281 return 0x00140214U; 282} 283static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) 284{ 285 return (r >> 0U) & 0x1U; 286} 287static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) 288{ 289 return 0x00000001U; 290} 291static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) 292{ 293 return 0x1U; 294} 295static inline u32 ltc_ltc1_ltss_g_elpg_r(void) 296{ 297 return 0x00142214U; 298} 299static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) 300{ 301 return (r >> 0U) & 0x1U; 302} 303static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) 304{ 305 return 0x00000001U; 306} 307static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) 308{ 309 return 0x1U; 310} 311static inline u32 ltc_ltcs_ltss_intr_r(void) 312{ 313 return 0x0017e20cU; 314} 315static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void) 316{ 317 return 0x100U; 318} 319static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void) 320{ 321 return 0x200U; 322} 323static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) 324{ 325 return 0x1U << 20U; 326} 327static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) 328{ 329 return 0x1U << 30U; 330} 331static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void) 332{ 333 return 0x1000000U; 334} 335static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void) 336{ 337 return 0x2000000U; 338} 339static inline u32 ltc_ltc0_lts0_intr_r(void) 340{ 341 return 0x0014040cU; 342} 343static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void) 344{ 345 return 0x0014051cU; 346} 347static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void) 348{ 349 return 0xffU << 0U; 350} 351static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r) 352{ 353 return (r >> 0U) & 0xffU; 354} 355static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void) 356{ 357 return 0xffU << 16U; 358} 359static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r) 360{ 361 return (r >> 16U) & 0xffU; 362} 363static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) 364{ 365 return 0x0017e2a0U; 366} 367static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) 368{ 369 return (r >> 0U) & 0x1U; 370} 371static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) 372{ 373 return 0x00000001U; 374} 375static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) 376{ 377 return 0x1U; 378} 379static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) 380{ 381 return (r >> 8U) & 0xfU; 382} 383static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) 384{ 385 return 0x00000003U; 386} 387static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) 388{ 389 return 0x300U; 390} 391static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) 392{ 393 return (r >> 28U) & 0x1U; 394} 395static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) 396{ 397 return 0x00000001U; 398} 399static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) 400{ 401 return 0x10000000U; 402} 403static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) 404{ 405 return (r >> 29U) & 0x1U; 406} 407static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) 408{ 409 return 0x00000001U; 410} 411static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) 412{ 413 return 0x20000000U; 414} 415static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) 416{ 417 return (r >> 30U) & 0x1U; 418} 419static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) 420{ 421 return 0x00000001U; 422} 423static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) 424{ 425 return 0x40000000U; 426} 427static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) 428{ 429 return 0x0017e2a4U; 430} 431static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) 432{ 433 return (r >> 0U) & 0x1U; 434} 435static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) 436{ 437 return 0x00000001U; 438} 439static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) 440{ 441 return 0x1U; 442} 443static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) 444{ 445 return (r >> 8U) & 0xfU; 446} 447static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) 448{ 449 return 0x00000003U; 450} 451static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) 452{ 453 return 0x300U; 454} 455static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) 456{ 457 return (r >> 16U) & 0x1U; 458} 459static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) 460{ 461 return 0x00000001U; 462} 463static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) 464{ 465 return 0x10000U; 466} 467static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) 468{ 469 return (r >> 28U) & 0x1U; 470} 471static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) 472{ 473 return 0x00000001U; 474} 475static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) 476{ 477 return 0x10000000U; 478} 479static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) 480{ 481 return (r >> 29U) & 0x1U; 482} 483static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) 484{ 485 return 0x00000001U; 486} 487static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) 488{ 489 return 0x20000000U; 490} 491static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) 492{ 493 return (r >> 30U) & 0x1U; 494} 495static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) 496{ 497 return 0x00000001U; 498} 499static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) 500{ 501 return 0x40000000U; 502} 503static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) 504{ 505 return 0x001402a0U; 506} 507static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) 508{ 509 return (r >> 0U) & 0x1U; 510} 511static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) 512{ 513 return 0x00000001U; 514} 515static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) 516{ 517 return 0x1U; 518} 519static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) 520{ 521 return 0x001402a4U; 522} 523static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) 524{ 525 return (r >> 0U) & 0x1U; 526} 527static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) 528{ 529 return 0x00000001U; 530} 531static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) 532{ 533 return 0x1U; 534} 535static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) 536{ 537 return 0x001422a0U; 538} 539static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) 540{ 541 return (r >> 0U) & 0x1U; 542} 543static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) 544{ 545 return 0x00000001U; 546} 547static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) 548{ 549 return 0x1U; 550} 551static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) 552{ 553 return 0x001422a4U; 554} 555static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) 556{ 557 return (r >> 0U) & 0x1U; 558} 559static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) 560{ 561 return 0x00000001U; 562} 563static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) 564{ 565 return 0x1U; 566} 567static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void) 568{ 569 return 0x0014058cU; 570} 571static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r) 572{ 573 return (r >> 0U) & 0xffffU; 574} 575static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r) 576{ 577 return (r >> 16U) & 0x1fU; 578} 579static inline u32 ltc_ltca_g_axi_pctrl_r(void) 580{ 581 return 0x00160000U; 582} 583static inline u32 ltc_ltca_g_axi_pctrl_user_sid_f(u32 v) 584{ 585 return (v & 0xffU) << 2U; 586} 587#endif
diff --git a/include/nvgpu/hw/gp10b/hw_mc_gp10b.h b/include/nvgpu/hw/gp10b/hw_mc_gp10b.h
deleted file mode 100644
index 39c132a..0000000
--- a/include/nvgpu/hw/gp10b/hw_mc_gp10b.h
+++ /dev/null
@@ -1,255 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_mc_gp10b_h_ 57#define _hw_mc_gp10b_h_ 58 59static inline u32 mc_boot_0_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 mc_boot_0_architecture_v(u32 r) 64{ 65 return (r >> 24U) & 0x1fU; 66} 67static inline u32 mc_boot_0_implementation_v(u32 r) 68{ 69 return (r >> 20U) & 0xfU; 70} 71static inline u32 mc_boot_0_major_revision_v(u32 r) 72{ 73 return (r >> 4U) & 0xfU; 74} 75static inline u32 mc_boot_0_minor_revision_v(u32 r) 76{ 77 return (r >> 0U) & 0xfU; 78} 79static inline u32 mc_intr_r(u32 i) 80{ 81 return 0x00000100U + i*4U; 82} 83static inline u32 mc_intr_pfifo_pending_f(void) 84{ 85 return 0x100U; 86} 87static inline u32 mc_intr_replayable_fault_pending_f(void) 88{ 89 return 0x200U; 90} 91static inline u32 mc_intr_pfb_pending_f(void) 92{ 93 return 0x2000U; 94} 95static inline u32 mc_intr_pgraph_pending_f(void) 96{ 97 return 0x1000U; 98} 99static inline u32 mc_intr_pmu_pending_f(void) 100{ 101 return 0x1000000U; 102} 103static inline u32 mc_intr_ltc_pending_f(void) 104{ 105 return 0x2000000U; 106} 107static inline u32 mc_intr_priv_ring_pending_f(void) 108{ 109 return 0x40000000U; 110} 111static inline u32 mc_intr_pbus_pending_f(void) 112{ 113 return 0x10000000U; 114} 115static inline u32 mc_intr_en_r(u32 i) 116{ 117 return 0x00000140U + i*4U; 118} 119static inline u32 mc_intr_en_set_r(u32 i) 120{ 121 return 0x00000160U + i*4U; 122} 123static inline u32 mc_intr_en_clear_r(u32 i) 124{ 125 return 0x00000180U + i*4U; 126} 127static inline u32 mc_enable_r(void) 128{ 129 return 0x00000200U; 130} 131static inline u32 mc_enable_xbar_enabled_f(void) 132{ 133 return 0x4U; 134} 135static inline u32 mc_enable_l2_enabled_f(void) 136{ 137 return 0x8U; 138} 139static inline u32 mc_enable_pmedia_s(void) 140{ 141 return 1U; 142} 143static inline u32 mc_enable_pmedia_f(u32 v) 144{ 145 return (v & 0x1U) << 4U; 146} 147static inline u32 mc_enable_pmedia_m(void) 148{ 149 return 0x1U << 4U; 150} 151static inline u32 mc_enable_pmedia_v(u32 r) 152{ 153 return (r >> 4U) & 0x1U; 154} 155static inline u32 mc_enable_priv_ring_enabled_f(void) 156{ 157 return 0x20U; 158} 159static inline u32 mc_enable_ce0_m(void) 160{ 161 return 0x1U << 6U; 162} 163static inline u32 mc_enable_pfifo_enabled_f(void) 164{ 165 return 0x100U; 166} 167static inline u32 mc_enable_pgraph_enabled_f(void) 168{ 169 return 0x1000U; 170} 171static inline u32 mc_enable_pwr_v(u32 r) 172{ 173 return (r >> 13U) & 0x1U; 174} 175static inline u32 mc_enable_pwr_disabled_v(void) 176{ 177 return 0x00000000U; 178} 179static inline u32 mc_enable_pwr_enabled_f(void) 180{ 181 return 0x2000U; 182} 183static inline u32 mc_enable_pfb_enabled_f(void) 184{ 185 return 0x100000U; 186} 187static inline u32 mc_enable_ce2_m(void) 188{ 189 return 0x1U << 21U; 190} 191static inline u32 mc_enable_ce2_enabled_f(void) 192{ 193 return 0x200000U; 194} 195static inline u32 mc_enable_blg_enabled_f(void) 196{ 197 return 0x8000000U; 198} 199static inline u32 mc_enable_perfmon_enabled_f(void) 200{ 201 return 0x10000000U; 202} 203static inline u32 mc_enable_hub_enabled_f(void) 204{ 205 return 0x20000000U; 206} 207static inline u32 mc_intr_ltc_r(void) 208{ 209 return 0x000001c0U; 210} 211static inline u32 mc_enable_pb_r(void) 212{ 213 return 0x00000204U; 214} 215static inline u32 mc_enable_pb_0_s(void) 216{ 217 return 1U; 218} 219static inline u32 mc_enable_pb_0_f(u32 v) 220{ 221 return (v & 0x1U) << 0U; 222} 223static inline u32 mc_enable_pb_0_m(void) 224{ 225 return 0x1U << 0U; 226} 227static inline u32 mc_enable_pb_0_v(u32 r) 228{ 229 return (r >> 0U) & 0x1U; 230} 231static inline u32 mc_enable_pb_0_enabled_v(void) 232{ 233 return 0x00000001U; 234} 235static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) 236{ 237 return (v & 0x1U) << (0U + i*1U); 238} 239static inline u32 mc_elpg_enable_r(void) 240{ 241 return 0x0000020cU; 242} 243static inline u32 mc_elpg_enable_xbar_enabled_f(void) 244{ 245 return 0x4U; 246} 247static inline u32 mc_elpg_enable_pfb_enabled_f(void) 248{ 249 return 0x100000U; 250} 251static inline u32 mc_elpg_enable_hub_enabled_f(void) 252{ 253 return 0x20000000U; 254} 255#endif
diff --git a/include/nvgpu/hw/gp10b/hw_pbdma_gp10b.h b/include/nvgpu/hw/gp10b/hw_pbdma_gp10b.h
deleted file mode 100644
index 66e8ddb..0000000
--- a/include/nvgpu/hw/gp10b/hw_pbdma_gp10b.h
+++ /dev/null
@@ -1,615 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pbdma_gp10b_h_ 57#define _hw_pbdma_gp10b_h_ 58 59static inline u32 pbdma_gp_entry1_r(void) 60{ 61 return 0x10000004U; 62} 63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) 64{ 65 return (r >> 0U) & 0xffU; 66} 67static inline u32 pbdma_gp_entry1_length_f(u32 v) 68{ 69 return (v & 0x1fffffU) << 10U; 70} 71static inline u32 pbdma_gp_entry1_length_v(u32 r) 72{ 73 return (r >> 10U) & 0x1fffffU; 74} 75static inline u32 pbdma_gp_base_r(u32 i) 76{ 77 return 0x00040048U + i*8192U; 78} 79static inline u32 pbdma_gp_base__size_1_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 pbdma_gp_base_offset_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 3U; 86} 87static inline u32 pbdma_gp_base_rsvd_s(void) 88{ 89 return 3U; 90} 91static inline u32 pbdma_gp_base_hi_r(u32 i) 92{ 93 return 0x0004004cU + i*8192U; 94} 95static inline u32 pbdma_gp_base_hi_offset_f(u32 v) 96{ 97 return (v & 0xffU) << 0U; 98} 99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) 100{ 101 return (v & 0x1fU) << 16U; 102} 103static inline u32 pbdma_gp_fetch_r(u32 i) 104{ 105 return 0x00040050U + i*8192U; 106} 107static inline u32 pbdma_gp_get_r(u32 i) 108{ 109 return 0x00040014U + i*8192U; 110} 111static inline u32 pbdma_gp_put_r(u32 i) 112{ 113 return 0x00040000U + i*8192U; 114} 115static inline u32 pbdma_pb_fetch_r(u32 i) 116{ 117 return 0x00040054U + i*8192U; 118} 119static inline u32 pbdma_pb_fetch_hi_r(u32 i) 120{ 121 return 0x00040058U + i*8192U; 122} 123static inline u32 pbdma_get_r(u32 i) 124{ 125 return 0x00040018U + i*8192U; 126} 127static inline u32 pbdma_get_hi_r(u32 i) 128{ 129 return 0x0004001cU + i*8192U; 130} 131static inline u32 pbdma_put_r(u32 i) 132{ 133 return 0x0004005cU + i*8192U; 134} 135static inline u32 pbdma_put_hi_r(u32 i) 136{ 137 return 0x00040060U + i*8192U; 138} 139static inline u32 pbdma_formats_r(u32 i) 140{ 141 return 0x0004009cU + i*8192U; 142} 143static inline u32 pbdma_formats_gp_fermi0_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 pbdma_formats_pb_fermi1_f(void) 148{ 149 return 0x100U; 150} 151static inline u32 pbdma_formats_mp_fermi0_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 pbdma_pb_header_r(u32 i) 156{ 157 return 0x00040084U + i*8192U; 158} 159static inline u32 pbdma_pb_header_priv_user_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 pbdma_pb_header_method_zero_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 pbdma_pb_header_subchannel_zero_f(void) 168{ 169 return 0x0U; 170} 171static inline u32 pbdma_pb_header_level_main_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 pbdma_pb_header_first_true_f(void) 176{ 177 return 0x400000U; 178} 179static inline u32 pbdma_pb_header_type_inc_f(void) 180{ 181 return 0x20000000U; 182} 183static inline u32 pbdma_pb_header_type_non_inc_f(void) 184{ 185 return 0x60000000U; 186} 187static inline u32 pbdma_hdr_shadow_r(u32 i) 188{ 189 return 0x00040118U + i*8192U; 190} 191static inline u32 pbdma_gp_shadow_0_r(u32 i) 192{ 193 return 0x00040110U + i*8192U; 194} 195static inline u32 pbdma_gp_shadow_1_r(u32 i) 196{ 197 return 0x00040114U + i*8192U; 198} 199static inline u32 pbdma_subdevice_r(u32 i) 200{ 201 return 0x00040094U + i*8192U; 202} 203static inline u32 pbdma_subdevice_id_f(u32 v) 204{ 205 return (v & 0xfffU) << 0U; 206} 207static inline u32 pbdma_subdevice_status_active_f(void) 208{ 209 return 0x10000000U; 210} 211static inline u32 pbdma_subdevice_channel_dma_enable_f(void) 212{ 213 return 0x20000000U; 214} 215static inline u32 pbdma_method0_r(u32 i) 216{ 217 return 0x000400c0U + i*8192U; 218} 219static inline u32 pbdma_method0_fifo_size_v(void) 220{ 221 return 0x00000004U; 222} 223static inline u32 pbdma_method0_addr_f(u32 v) 224{ 225 return (v & 0xfffU) << 2U; 226} 227static inline u32 pbdma_method0_addr_v(u32 r) 228{ 229 return (r >> 2U) & 0xfffU; 230} 231static inline u32 pbdma_method0_subch_v(u32 r) 232{ 233 return (r >> 16U) & 0x7U; 234} 235static inline u32 pbdma_method0_first_true_f(void) 236{ 237 return 0x400000U; 238} 239static inline u32 pbdma_method0_valid_true_f(void) 240{ 241 return 0x80000000U; 242} 243static inline u32 pbdma_method1_r(u32 i) 244{ 245 return 0x000400c8U + i*8192U; 246} 247static inline u32 pbdma_method2_r(u32 i) 248{ 249 return 0x000400d0U + i*8192U; 250} 251static inline u32 pbdma_method3_r(u32 i) 252{ 253 return 0x000400d8U + i*8192U; 254} 255static inline u32 pbdma_data0_r(u32 i) 256{ 257 return 0x000400c4U + i*8192U; 258} 259static inline u32 pbdma_target_r(u32 i) 260{ 261 return 0x000400acU + i*8192U; 262} 263static inline u32 pbdma_target_engine_sw_f(void) 264{ 265 return 0x1fU; 266} 267static inline u32 pbdma_acquire_r(u32 i) 268{ 269 return 0x00040030U + i*8192U; 270} 271static inline u32 pbdma_acquire_retry_man_2_f(void) 272{ 273 return 0x2U; 274} 275static inline u32 pbdma_acquire_retry_exp_2_f(void) 276{ 277 return 0x100U; 278} 279static inline u32 pbdma_acquire_timeout_exp_f(u32 v) 280{ 281 return (v & 0xfU) << 11U; 282} 283static inline u32 pbdma_acquire_timeout_exp_max_v(void) 284{ 285 return 0x0000000fU; 286} 287static inline u32 pbdma_acquire_timeout_exp_max_f(void) 288{ 289 return 0x7800U; 290} 291static inline u32 pbdma_acquire_timeout_man_f(u32 v) 292{ 293 return (v & 0xffffU) << 15U; 294} 295static inline u32 pbdma_acquire_timeout_man_max_v(void) 296{ 297 return 0x0000ffffU; 298} 299static inline u32 pbdma_acquire_timeout_man_max_f(void) 300{ 301 return 0x7fff8000U; 302} 303static inline u32 pbdma_acquire_timeout_en_enable_f(void) 304{ 305 return 0x80000000U; 306} 307static inline u32 pbdma_acquire_timeout_en_disable_f(void) 308{ 309 return 0x0U; 310} 311static inline u32 pbdma_status_r(u32 i) 312{ 313 return 0x00040100U + i*8192U; 314} 315static inline u32 pbdma_channel_r(u32 i) 316{ 317 return 0x00040120U + i*8192U; 318} 319static inline u32 pbdma_signature_r(u32 i) 320{ 321 return 0x00040010U + i*8192U; 322} 323static inline u32 pbdma_signature_hw_valid_f(void) 324{ 325 return 0xfaceU; 326} 327static inline u32 pbdma_signature_sw_zero_f(void) 328{ 329 return 0x0U; 330} 331static inline u32 pbdma_userd_r(u32 i) 332{ 333 return 0x00040008U + i*8192U; 334} 335static inline u32 pbdma_userd_target_vid_mem_f(void) 336{ 337 return 0x0U; 338} 339static inline u32 pbdma_userd_target_sys_mem_coh_f(void) 340{ 341 return 0x2U; 342} 343static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) 344{ 345 return 0x3U; 346} 347static inline u32 pbdma_userd_addr_f(u32 v) 348{ 349 return (v & 0x7fffffU) << 9U; 350} 351static inline u32 pbdma_userd_hi_r(u32 i) 352{ 353 return 0x0004000cU + i*8192U; 354} 355static inline u32 pbdma_userd_hi_addr_f(u32 v) 356{ 357 return (v & 0xffU) << 0U; 358} 359static inline u32 pbdma_config_r(u32 i) 360{ 361 return 0x000400f4U + i*8192U; 362} 363static inline u32 pbdma_config_auth_level_privileged_f(void) 364{ 365 return 0x100U; 366} 367static inline u32 pbdma_hce_ctrl_r(u32 i) 368{ 369 return 0x000400e4U + i*8192U; 370} 371static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) 372{ 373 return 0x20U; 374} 375static inline u32 pbdma_intr_0_r(u32 i) 376{ 377 return 0x00040108U + i*8192U; 378} 379static inline u32 pbdma_intr_0_memreq_v(u32 r) 380{ 381 return (r >> 0U) & 0x1U; 382} 383static inline u32 pbdma_intr_0_memreq_pending_f(void) 384{ 385 return 0x1U; 386} 387static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) 388{ 389 return 0x2U; 390} 391static inline u32 pbdma_intr_0_memack_extra_pending_f(void) 392{ 393 return 0x4U; 394} 395static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) 396{ 397 return 0x8U; 398} 399static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) 400{ 401 return 0x10U; 402} 403static inline u32 pbdma_intr_0_memflush_pending_f(void) 404{ 405 return 0x20U; 406} 407static inline u32 pbdma_intr_0_memop_pending_f(void) 408{ 409 return 0x40U; 410} 411static inline u32 pbdma_intr_0_lbconnect_pending_f(void) 412{ 413 return 0x80U; 414} 415static inline u32 pbdma_intr_0_lbreq_pending_f(void) 416{ 417 return 0x100U; 418} 419static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) 420{ 421 return 0x200U; 422} 423static inline u32 pbdma_intr_0_lback_extra_pending_f(void) 424{ 425 return 0x400U; 426} 427static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) 428{ 429 return 0x800U; 430} 431static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) 432{ 433 return 0x1000U; 434} 435static inline u32 pbdma_intr_0_gpfifo_pending_f(void) 436{ 437 return 0x2000U; 438} 439static inline u32 pbdma_intr_0_gpptr_pending_f(void) 440{ 441 return 0x4000U; 442} 443static inline u32 pbdma_intr_0_gpentry_pending_f(void) 444{ 445 return 0x8000U; 446} 447static inline u32 pbdma_intr_0_gpcrc_pending_f(void) 448{ 449 return 0x10000U; 450} 451static inline u32 pbdma_intr_0_pbptr_pending_f(void) 452{ 453 return 0x20000U; 454} 455static inline u32 pbdma_intr_0_pbentry_pending_f(void) 456{ 457 return 0x40000U; 458} 459static inline u32 pbdma_intr_0_pbcrc_pending_f(void) 460{ 461 return 0x80000U; 462} 463static inline u32 pbdma_intr_0_xbarconnect_pending_f(void) 464{ 465 return 0x100000U; 466} 467static inline u32 pbdma_intr_0_method_pending_f(void) 468{ 469 return 0x200000U; 470} 471static inline u32 pbdma_intr_0_methodcrc_pending_f(void) 472{ 473 return 0x400000U; 474} 475static inline u32 pbdma_intr_0_device_pending_f(void) 476{ 477 return 0x800000U; 478} 479static inline u32 pbdma_intr_0_semaphore_pending_f(void) 480{ 481 return 0x2000000U; 482} 483static inline u32 pbdma_intr_0_acquire_pending_f(void) 484{ 485 return 0x4000000U; 486} 487static inline u32 pbdma_intr_0_pri_pending_f(void) 488{ 489 return 0x8000000U; 490} 491static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) 492{ 493 return 0x20000000U; 494} 495static inline u32 pbdma_intr_0_pbseg_pending_f(void) 496{ 497 return 0x40000000U; 498} 499static inline u32 pbdma_intr_0_signature_pending_f(void) 500{ 501 return 0x80000000U; 502} 503static inline u32 pbdma_intr_0_syncpoint_illegal_pending_f(void) 504{ 505 return 0x10000000U; 506} 507static inline u32 pbdma_intr_1_r(u32 i) 508{ 509 return 0x00040148U + i*8192U; 510} 511static inline u32 pbdma_intr_en_0_r(u32 i) 512{ 513 return 0x0004010cU + i*8192U; 514} 515static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) 516{ 517 return 0x100U; 518} 519static inline u32 pbdma_intr_en_1_r(u32 i) 520{ 521 return 0x0004014cU + i*8192U; 522} 523static inline u32 pbdma_intr_stall_r(u32 i) 524{ 525 return 0x0004013cU + i*8192U; 526} 527static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) 528{ 529 return 0x100U; 530} 531static inline u32 pbdma_intr_stall_1_r(u32 i) 532{ 533 return 0x00040140U + i*8192U; 534} 535static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void) 536{ 537 return 0x1U; 538} 539static inline u32 pbdma_udma_nop_r(void) 540{ 541 return 0x00000008U; 542} 543static inline u32 pbdma_allowed_syncpoints_r(u32 i) 544{ 545 return 0x000400e8U + i*8192U; 546} 547static inline u32 pbdma_allowed_syncpoints_0_valid_f(u32 v) 548{ 549 return (v & 0x1U) << 31U; 550} 551static inline u32 pbdma_allowed_syncpoints_0_index_f(u32 v) 552{ 553 return (v & 0x7fffU) << 16U; 554} 555static inline u32 pbdma_allowed_syncpoints_0_index_v(u32 r) 556{ 557 return (r >> 16U) & 0x7fffU; 558} 559static inline u32 pbdma_allowed_syncpoints_1_valid_f(u32 v) 560{ 561 return (v & 0x1U) << 15U; 562} 563static inline u32 pbdma_allowed_syncpoints_1_index_f(u32 v) 564{ 565 return (v & 0x7fffU) << 0U; 566} 567static inline u32 pbdma_syncpointa_r(u32 i) 568{ 569 return 0x000400a4U + i*8192U; 570} 571static inline u32 pbdma_syncpointa_payload_v(u32 r) 572{ 573 return (r >> 0U) & 0xffffffffU; 574} 575static inline u32 pbdma_syncpointb_r(u32 i) 576{ 577 return 0x000400a8U + i*8192U; 578} 579static inline u32 pbdma_syncpointb_op_v(u32 r) 580{ 581 return (r >> 0U) & 0x1U; 582} 583static inline u32 pbdma_syncpointb_op_wait_v(void) 584{ 585 return 0x00000000U; 586} 587static inline u32 pbdma_syncpointb_wait_switch_v(u32 r) 588{ 589 return (r >> 4U) & 0x1U; 590} 591static inline u32 pbdma_syncpointb_wait_switch_en_v(void) 592{ 593 return 0x00000001U; 594} 595static inline u32 pbdma_syncpointb_syncpt_index_v(u32 r) 596{ 597 return (r >> 8U) & 0xfffU; 598} 599static inline u32 pbdma_runlist_timeslice_r(u32 i) 600{ 601 return 0x000400f8U + i*8192U; 602} 603static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) 604{ 605 return 0x80U; 606} 607static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) 608{ 609 return 0x3000U; 610} 611static inline u32 pbdma_runlist_timeslice_enable_true_f(void) 612{ 613 return 0x10000000U; 614} 615#endif
diff --git a/include/nvgpu/hw/gp10b/hw_perf_gp10b.h b/include/nvgpu/hw/gp10b/hw_perf_gp10b.h
deleted file mode 100644
index 43424e1..0000000
--- a/include/nvgpu/hw/gp10b/hw_perf_gp10b.h
+++ /dev/null
@@ -1,219 +0,0 @@ 1/* 2 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_perf_gp10b_h_ 57#define _hw_perf_gp10b_h_ 58 59static inline u32 perf_pmmsys_base_v(void) 60{ 61 return 0x001b0000U; 62} 63static inline u32 perf_pmmsys_extent_v(void) 64{ 65 return 0x001b0fffU; 66} 67static inline u32 perf_pmasys_control_r(void) 68{ 69 return 0x001b4000U; 70} 71static inline u32 perf_pmasys_control_membuf_status_v(u32 r) 72{ 73 return (r >> 4U) & 0x1U; 74} 75static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) 76{ 77 return 0x00000001U; 78} 79static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) 84{ 85 return (v & 0x1U) << 5U; 86} 87static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) 88{ 89 return (r >> 5U) & 0x1U; 90} 91static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) 92{ 93 return 0x00000001U; 94} 95static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) 96{ 97 return 0x20U; 98} 99static inline u32 perf_pmasys_mem_block_r(void) 100{ 101 return 0x001b4070U; 102} 103static inline u32 perf_pmasys_mem_block_base_f(u32 v) 104{ 105 return (v & 0xfffffffU) << 0U; 106} 107static inline u32 perf_pmasys_mem_block_target_f(u32 v) 108{ 109 return (v & 0x3U) << 28U; 110} 111static inline u32 perf_pmasys_mem_block_target_v(u32 r) 112{ 113 return (r >> 28U) & 0x3U; 114} 115static inline u32 perf_pmasys_mem_block_target_lfb_v(void) 116{ 117 return 0x00000000U; 118} 119static inline u32 perf_pmasys_mem_block_target_lfb_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) 128{ 129 return 0x20000000U; 130} 131static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) 132{ 133 return 0x00000003U; 134} 135static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) 136{ 137 return 0x30000000U; 138} 139static inline u32 perf_pmasys_mem_block_valid_f(u32 v) 140{ 141 return (v & 0x1U) << 31U; 142} 143static inline u32 perf_pmasys_mem_block_valid_v(u32 r) 144{ 145 return (r >> 31U) & 0x1U; 146} 147static inline u32 perf_pmasys_mem_block_valid_true_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 perf_pmasys_mem_block_valid_true_f(void) 152{ 153 return 0x80000000U; 154} 155static inline u32 perf_pmasys_mem_block_valid_false_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 perf_pmasys_mem_block_valid_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 perf_pmasys_outbase_r(void) 164{ 165 return 0x001b4074U; 166} 167static inline u32 perf_pmasys_outbase_ptr_f(u32 v) 168{ 169 return (v & 0x7ffffffU) << 5U; 170} 171static inline u32 perf_pmasys_outbaseupper_r(void) 172{ 173 return 0x001b4078U; 174} 175static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) 176{ 177 return (v & 0xffU) << 0U; 178} 179static inline u32 perf_pmasys_outsize_r(void) 180{ 181 return 0x001b407cU; 182} 183static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) 184{ 185 return (v & 0x7ffffffU) << 5U; 186} 187static inline u32 perf_pmasys_mem_bytes_r(void) 188{ 189 return 0x001b4084U; 190} 191static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) 192{ 193 return (v & 0xfffffffU) << 4U; 194} 195static inline u32 perf_pmasys_mem_bump_r(void) 196{ 197 return 0x001b4088U; 198} 199static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) 200{ 201 return (v & 0xfffffffU) << 4U; 202} 203static inline u32 perf_pmasys_enginestatus_r(void) 204{ 205 return 0x001b40a4U; 206} 207static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) 208{ 209 return (v & 0x1U) << 4U; 210} 211static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) 216{ 217 return 0x10U; 218} 219#endif
diff --git a/include/nvgpu/hw/gp10b/hw_pram_gp10b.h b/include/nvgpu/hw/gp10b/hw_pram_gp10b.h
deleted file mode 100644
index aef0e69..0000000
--- a/include/nvgpu/hw/gp10b/hw_pram_gp10b.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pram_gp10b_h_ 57#define _hw_pram_gp10b_h_ 58 59static inline u32 pram_data032_r(u32 i) 60{ 61 return 0x00700000U + i*4U; 62} 63#endif
diff --git a/include/nvgpu/hw/gp10b/hw_pri_ringmaster_gp10b.h b/include/nvgpu/hw/gp10b/hw_pri_ringmaster_gp10b.h
deleted file mode 100644
index 03a3854..0000000
--- a/include/nvgpu/hw/gp10b/hw_pri_ringmaster_gp10b.h
+++ /dev/null
@@ -1,167 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringmaster_gp10b_h_ 57#define _hw_pri_ringmaster_gp10b_h_ 58 59static inline u32 pri_ringmaster_command_r(void) 60{ 61 return 0x0012004cU; 62} 63static inline u32 pri_ringmaster_command_cmd_m(void) 64{ 65 return 0x3fU << 0U; 66} 67static inline u32 pri_ringmaster_command_cmd_v(u32 r) 68{ 69 return (r >> 0U) & 0x3fU; 70} 71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 pri_ringmaster_command_data_r(void) 92{ 93 return 0x00120048U; 94} 95static inline u32 pri_ringmaster_start_results_r(void) 96{ 97 return 0x00120050U; 98} 99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 pri_ringmaster_intr_status0_r(void) 108{ 109 return 0x00120058U; 110} 111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) 112{ 113 return (r >> 0U) & 0x1U; 114} 115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) 116{ 117 return (r >> 1U) & 0x1U; 118} 119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) 120{ 121 return (r >> 2U) & 0x1U; 122} 123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) 124{ 125 return (r >> 8U) & 0x1U; 126} 127static inline u32 pri_ringmaster_intr_status1_r(void) 128{ 129 return 0x0012005cU; 130} 131static inline u32 pri_ringmaster_global_ctl_r(void) 132{ 133 return 0x00120060U; 134} 135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) 136{ 137 return 0x1U; 138} 139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 pri_ringmaster_enum_fbp_r(void) 144{ 145 return 0x00120074U; 146} 147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) 148{ 149 return (r >> 0U) & 0x1fU; 150} 151static inline u32 pri_ringmaster_enum_gpc_r(void) 152{ 153 return 0x00120078U; 154} 155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) 156{ 157 return (r >> 0U) & 0x1fU; 158} 159static inline u32 pri_ringmaster_enum_ltc_r(void) 160{ 161 return 0x0012006cU; 162} 163static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) 164{ 165 return (r >> 0U) & 0x1fU; 166} 167#endif
diff --git a/include/nvgpu/hw/gp10b/hw_pri_ringstation_gpc_gp10b.h b/include/nvgpu/hw/gp10b/hw_pri_ringstation_gpc_gp10b.h
deleted file mode 100644
index ba55658..0000000
--- a/include/nvgpu/hw/gp10b/hw_pri_ringstation_gpc_gp10b.h
+++ /dev/null
@@ -1,87 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_gpc_gp10b_h_ 57#define _hw_pri_ringstation_gpc_gp10b_h_ 58 59static inline u32 pri_ringstation_gpc_master_config_r(u32 i) 60{ 61 return 0x00128300U + i*4U; 62} 63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) 64{ 65 return 0x00128120U; 66} 67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) 68{ 69 return 0x00128124U; 70} 71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) 72{ 73 return 0x00128128U; 74} 75static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_subid_v(u32 r) 76{ 77 return (r >> 24U) & 0x3fU; 78} 79static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_priv_level_v(u32 r) 80{ 81 return (r >> 20U) & 0x3U; 82} 83static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) 84{ 85 return 0x0012812cU; 86} 87#endif
diff --git a/include/nvgpu/hw/gp10b/hw_pri_ringstation_sys_gp10b.h b/include/nvgpu/hw/gp10b/hw_pri_ringstation_sys_gp10b.h
deleted file mode 100644
index 1dcb1a3..0000000
--- a/include/nvgpu/hw/gp10b/hw_pri_ringstation_sys_gp10b.h
+++ /dev/null
@@ -1,99 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_sys_gp10b_h_ 57#define _hw_pri_ringstation_sys_gp10b_h_ 58 59static inline u32 pri_ringstation_sys_master_config_r(u32 i) 60{ 61 return 0x00122300U + i*4U; 62} 63static inline u32 pri_ringstation_sys_decode_config_r(void) 64{ 65 return 0x00122204U; 66} 67static inline u32 pri_ringstation_sys_decode_config_ring_m(void) 68{ 69 return 0x7U << 0U; 70} 71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 pri_ringstation_sys_priv_error_adr_r(void) 76{ 77 return 0x00122120U; 78} 79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) 80{ 81 return 0x00122124U; 82} 83static inline u32 pri_ringstation_sys_priv_error_info_r(void) 84{ 85 return 0x00122128U; 86} 87static inline u32 pri_ringstation_sys_priv_error_info_subid_v(u32 r) 88{ 89 return (r >> 24U) & 0x3fU; 90} 91static inline u32 pri_ringstation_sys_priv_error_info_priv_level_v(u32 r) 92{ 93 return (r >> 20U) & 0x3U; 94} 95static inline u32 pri_ringstation_sys_priv_error_code_r(void) 96{ 97 return 0x0012212cU; 98} 99#endif
diff --git a/include/nvgpu/hw/gp10b/hw_proj_gp10b.h b/include/nvgpu/hw/gp10b/hw_proj_gp10b.h
deleted file mode 100644
index a885e93..0000000
--- a/include/nvgpu/hw/gp10b/hw_proj_gp10b.h
+++ /dev/null
@@ -1,179 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_proj_gp10b_h_ 57#define _hw_proj_gp10b_h_ 58 59static inline u32 proj_gpc_base_v(void) 60{ 61 return 0x00500000U; 62} 63static inline u32 proj_gpc_shared_base_v(void) 64{ 65 return 0x00418000U; 66} 67static inline u32 proj_gpc_stride_v(void) 68{ 69 return 0x00008000U; 70} 71static inline u32 proj_gpc_priv_stride_v(void) 72{ 73 return 0x00000800U; 74} 75static inline u32 proj_ltc_stride_v(void) 76{ 77 return 0x00002000U; 78} 79static inline u32 proj_lts_stride_v(void) 80{ 81 return 0x00000200U; 82} 83static inline u32 proj_fbpa_base_v(void) 84{ 85 return 0x00900000U; 86} 87static inline u32 proj_fbpa_shared_base_v(void) 88{ 89 return 0x009a0000U; 90} 91static inline u32 proj_fbpa_stride_v(void) 92{ 93 return 0x00004000U; 94} 95static inline u32 proj_ppc_in_gpc_base_v(void) 96{ 97 return 0x00003000U; 98} 99static inline u32 proj_ppc_in_gpc_shared_base_v(void) 100{ 101 return 0x00003e00U; 102} 103static inline u32 proj_ppc_in_gpc_stride_v(void) 104{ 105 return 0x00000200U; 106} 107static inline u32 proj_rop_base_v(void) 108{ 109 return 0x00410000U; 110} 111static inline u32 proj_rop_shared_base_v(void) 112{ 113 return 0x00408800U; 114} 115static inline u32 proj_rop_stride_v(void) 116{ 117 return 0x00000400U; 118} 119static inline u32 proj_tpc_in_gpc_base_v(void) 120{ 121 return 0x00004000U; 122} 123static inline u32 proj_tpc_in_gpc_stride_v(void) 124{ 125 return 0x00000800U; 126} 127static inline u32 proj_tpc_in_gpc_shared_base_v(void) 128{ 129 return 0x00001800U; 130} 131static inline u32 proj_host_num_engines_v(void) 132{ 133 return 0x00000002U; 134} 135static inline u32 proj_host_num_pbdma_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) 140{ 141 return 0x00000002U; 142} 143static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) 144{ 145 return 0x00000001U; 146} 147static inline u32 proj_scal_litter_num_fbps_v(void) 148{ 149 return 0x00000001U; 150} 151static inline u32 proj_scal_litter_num_fbpas_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 proj_scal_litter_num_gpcs_v(void) 156{ 157 return 0x00000001U; 158} 159static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) 164{ 165 return 0x00000002U; 166} 167static inline u32 proj_scal_litter_num_zcull_banks_v(void) 168{ 169 return 0x00000004U; 170} 171static inline u32 proj_scal_max_gpcs_v(void) 172{ 173 return 0x00000020U; 174} 175static inline u32 proj_scal_max_tpc_per_gpc_v(void) 176{ 177 return 0x00000008U; 178} 179#endif
diff --git a/include/nvgpu/hw/gp10b/hw_pwr_gp10b.h b/include/nvgpu/hw/gp10b/hw_pwr_gp10b.h
deleted file mode 100644
index f067be7..0000000
--- a/include/nvgpu/hw/gp10b/hw_pwr_gp10b.h
+++ /dev/null
@@ -1,883 +0,0 @@ 1/* 2 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pwr_gp10b_h_ 57#define _hw_pwr_gp10b_h_ 58 59static inline u32 pwr_falcon_irqsset_r(void) 60{ 61 return 0x0010a000U; 62} 63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pwr_falcon_irqsclr_r(void) 68{ 69 return 0x0010a004U; 70} 71static inline u32 pwr_falcon_irqstat_r(void) 72{ 73 return 0x0010a008U; 74} 75static inline u32 pwr_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pwr_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pwr_falcon_irqmode_r(void) 88{ 89 return 0x0010a00cU; 90} 91static inline u32 pwr_falcon_irqmset_r(void) 92{ 93 return 0x0010a010U; 94} 95static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 pwr_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 pwr_falcon_irqmclr_r(void) 128{ 129 return 0x0010a014U; 130} 131static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 pwr_falcon_irqmask_r(void) 168{ 169 return 0x0010a018U; 170} 171static inline u32 pwr_falcon_irqdest_r(void) 172{ 173 return 0x0010a01cU; 174} 175static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 pwr_falcon_curctx_r(void) 248{ 249 return 0x0010a050U; 250} 251static inline u32 pwr_falcon_nxtctx_r(void) 252{ 253 return 0x0010a054U; 254} 255static inline u32 pwr_falcon_mailbox0_r(void) 256{ 257 return 0x0010a040U; 258} 259static inline u32 pwr_falcon_mailbox1_r(void) 260{ 261 return 0x0010a044U; 262} 263static inline u32 pwr_falcon_itfen_r(void) 264{ 265 return 0x0010a048U; 266} 267static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 pwr_falcon_idlestate_r(void) 272{ 273 return 0x0010a04cU; 274} 275static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 pwr_falcon_os_r(void) 284{ 285 return 0x0010a080U; 286} 287static inline u32 pwr_falcon_engctl_r(void) 288{ 289 return 0x0010a0a4U; 290} 291static inline u32 pwr_falcon_cpuctl_r(void) 292{ 293 return 0x0010a100U; 294} 295static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) 300{ 301 return (v & 0x1U) << 4U; 302} 303static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) 304{ 305 return 0x1U << 4U; 306} 307static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) 308{ 309 return (r >> 4U) & 0x1U; 310} 311static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 312{ 313 return (v & 0x1U) << 6U; 314} 315static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) 316{ 317 return 0x1U << 6U; 318} 319static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 320{ 321 return (r >> 6U) & 0x1U; 322} 323static inline u32 pwr_falcon_cpuctl_alias_r(void) 324{ 325 return 0x0010a130U; 326} 327static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) 328{ 329 return (v & 0x1U) << 1U; 330} 331static inline u32 pwr_pmu_scpctl_stat_r(void) 332{ 333 return 0x0010ac08U; 334} 335static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) 336{ 337 return (v & 0x1U) << 20U; 338} 339static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) 340{ 341 return 0x1U << 20U; 342} 343static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) 344{ 345 return (r >> 20U) & 0x1U; 346} 347static inline u32 pwr_falcon_imemc_r(u32 i) 348{ 349 return 0x0010a180U + i*16U; 350} 351static inline u32 pwr_falcon_imemc_offs_f(u32 v) 352{ 353 return (v & 0x3fU) << 2U; 354} 355static inline u32 pwr_falcon_imemc_blk_f(u32 v) 356{ 357 return (v & 0xffU) << 8U; 358} 359static inline u32 pwr_falcon_imemc_aincw_f(u32 v) 360{ 361 return (v & 0x1U) << 24U; 362} 363static inline u32 pwr_falcon_imemd_r(u32 i) 364{ 365 return 0x0010a184U + i*16U; 366} 367static inline u32 pwr_falcon_imemt_r(u32 i) 368{ 369 return 0x0010a188U + i*16U; 370} 371static inline u32 pwr_falcon_sctl_r(void) 372{ 373 return 0x0010a240U; 374} 375static inline u32 pwr_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 pwr_falcon_bootvec_r(void) 380{ 381 return 0x0010a104U; 382} 383static inline u32 pwr_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 pwr_falcon_dmactl_r(void) 388{ 389 return 0x0010a10cU; 390} 391static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 pwr_falcon_hwcfg_r(void) 400{ 401 return 0x0010a108U; 402} 403static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) 404{ 405 return (r >> 0U) & 0x1ffU; 406} 407static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) 408{ 409 return (r >> 9U) & 0x1ffU; 410} 411static inline u32 pwr_falcon_dmatrfbase_r(void) 412{ 413 return 0x0010a110U; 414} 415static inline u32 pwr_falcon_dmatrfbase1_r(void) 416{ 417 return 0x0010a128U; 418} 419static inline u32 pwr_falcon_dmatrfmoffs_r(void) 420{ 421 return 0x0010a114U; 422} 423static inline u32 pwr_falcon_dmatrfcmd_r(void) 424{ 425 return 0x0010a118U; 426} 427static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) 428{ 429 return (v & 0x1U) << 4U; 430} 431static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) 432{ 433 return (v & 0x1U) << 5U; 434} 435static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) 436{ 437 return (v & 0x7U) << 8U; 438} 439static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) 440{ 441 return (v & 0x7U) << 12U; 442} 443static inline u32 pwr_falcon_dmatrffboffs_r(void) 444{ 445 return 0x0010a11cU; 446} 447static inline u32 pwr_falcon_exterraddr_r(void) 448{ 449 return 0x0010a168U; 450} 451static inline u32 pwr_falcon_exterrstat_r(void) 452{ 453 return 0x0010a16cU; 454} 455static inline u32 pwr_falcon_exterrstat_valid_m(void) 456{ 457 return 0x1U << 31U; 458} 459static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) 460{ 461 return (r >> 31U) & 0x1U; 462} 463static inline u32 pwr_falcon_exterrstat_valid_true_v(void) 464{ 465 return 0x00000001U; 466} 467static inline u32 pwr_pmu_falcon_icd_cmd_r(void) 468{ 469 return 0x0010a200U; 470} 471static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) 472{ 473 return 4U; 474} 475static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) 476{ 477 return (v & 0xfU) << 0U; 478} 479static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) 480{ 481 return 0xfU << 0U; 482} 483static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) 484{ 485 return (r >> 0U) & 0xfU; 486} 487static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) 488{ 489 return 0x8U; 490} 491static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) 492{ 493 return 0xeU; 494} 495static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) 496{ 497 return (v & 0x1fU) << 8U; 498} 499static inline u32 pwr_pmu_falcon_icd_rdata_r(void) 500{ 501 return 0x0010a20cU; 502} 503static inline u32 pwr_falcon_dmemc_r(u32 i) 504{ 505 return 0x0010a1c0U + i*8U; 506} 507static inline u32 pwr_falcon_dmemc_offs_f(u32 v) 508{ 509 return (v & 0x3fU) << 2U; 510} 511static inline u32 pwr_falcon_dmemc_offs_m(void) 512{ 513 return 0x3fU << 2U; 514} 515static inline u32 pwr_falcon_dmemc_blk_f(u32 v) 516{ 517 return (v & 0xffU) << 8U; 518} 519static inline u32 pwr_falcon_dmemc_blk_m(void) 520{ 521 return 0xffU << 8U; 522} 523static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) 524{ 525 return (v & 0x1U) << 24U; 526} 527static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) 528{ 529 return (v & 0x1U) << 25U; 530} 531static inline u32 pwr_falcon_dmemd_r(u32 i) 532{ 533 return 0x0010a1c4U + i*8U; 534} 535static inline u32 pwr_pmu_new_instblk_r(void) 536{ 537 return 0x0010a480U; 538} 539static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) 540{ 541 return (v & 0xfffffffU) << 0U; 542} 543static inline u32 pwr_pmu_new_instblk_target_fb_f(void) 544{ 545 return 0x0U; 546} 547static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) 548{ 549 return 0x20000000U; 550} 551static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) 552{ 553 return 0x30000000U; 554} 555static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) 556{ 557 return (v & 0x1U) << 30U; 558} 559static inline u32 pwr_pmu_mutex_id_r(void) 560{ 561 return 0x0010a488U; 562} 563static inline u32 pwr_pmu_mutex_id_value_v(u32 r) 564{ 565 return (r >> 0U) & 0xffU; 566} 567static inline u32 pwr_pmu_mutex_id_value_init_v(void) 568{ 569 return 0x00000000U; 570} 571static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) 572{ 573 return 0x000000ffU; 574} 575static inline u32 pwr_pmu_mutex_id_release_r(void) 576{ 577 return 0x0010a48cU; 578} 579static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) 580{ 581 return (v & 0xffU) << 0U; 582} 583static inline u32 pwr_pmu_mutex_id_release_value_m(void) 584{ 585 return 0xffU << 0U; 586} 587static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) 588{ 589 return 0x00000000U; 590} 591static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) 592{ 593 return 0x0U; 594} 595static inline u32 pwr_pmu_mutex_r(u32 i) 596{ 597 return 0x0010a580U + i*4U; 598} 599static inline u32 pwr_pmu_mutex__size_1_v(void) 600{ 601 return 0x00000010U; 602} 603static inline u32 pwr_pmu_mutex_value_f(u32 v) 604{ 605 return (v & 0xffU) << 0U; 606} 607static inline u32 pwr_pmu_mutex_value_v(u32 r) 608{ 609 return (r >> 0U) & 0xffU; 610} 611static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) 612{ 613 return 0x0U; 614} 615static inline u32 pwr_pmu_queue_head_r(u32 i) 616{ 617 return 0x0010a4a0U + i*4U; 618} 619static inline u32 pwr_pmu_queue_head__size_1_v(void) 620{ 621 return 0x00000004U; 622} 623static inline u32 pwr_pmu_queue_head_address_f(u32 v) 624{ 625 return (v & 0xffffffffU) << 0U; 626} 627static inline u32 pwr_pmu_queue_head_address_v(u32 r) 628{ 629 return (r >> 0U) & 0xffffffffU; 630} 631static inline u32 pwr_pmu_queue_tail_r(u32 i) 632{ 633 return 0x0010a4b0U + i*4U; 634} 635static inline u32 pwr_pmu_queue_tail__size_1_v(void) 636{ 637 return 0x00000004U; 638} 639static inline u32 pwr_pmu_queue_tail_address_f(u32 v) 640{ 641 return (v & 0xffffffffU) << 0U; 642} 643static inline u32 pwr_pmu_queue_tail_address_v(u32 r) 644{ 645 return (r >> 0U) & 0xffffffffU; 646} 647static inline u32 pwr_pmu_msgq_head_r(void) 648{ 649 return 0x0010a4c8U; 650} 651static inline u32 pwr_pmu_msgq_head_val_f(u32 v) 652{ 653 return (v & 0xffffffffU) << 0U; 654} 655static inline u32 pwr_pmu_msgq_head_val_v(u32 r) 656{ 657 return (r >> 0U) & 0xffffffffU; 658} 659static inline u32 pwr_pmu_msgq_tail_r(void) 660{ 661 return 0x0010a4ccU; 662} 663static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) 664{ 665 return (v & 0xffffffffU) << 0U; 666} 667static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) 668{ 669 return (r >> 0U) & 0xffffffffU; 670} 671static inline u32 pwr_pmu_idle_mask_r(u32 i) 672{ 673 return 0x0010a504U + i*16U; 674} 675static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) 676{ 677 return 0x1U; 678} 679static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) 680{ 681 return 0x200000U; 682} 683static inline u32 pwr_pmu_idle_mask_1_r(u32 i) 684{ 685 return 0x0010aa34U + i*8U; 686} 687static inline u32 pwr_pmu_idle_count_r(u32 i) 688{ 689 return 0x0010a508U + i*16U; 690} 691static inline u32 pwr_pmu_idle_count_value_f(u32 v) 692{ 693 return (v & 0x7fffffffU) << 0U; 694} 695static inline u32 pwr_pmu_idle_count_value_v(u32 r) 696{ 697 return (r >> 0U) & 0x7fffffffU; 698} 699static inline u32 pwr_pmu_idle_count_reset_f(u32 v) 700{ 701 return (v & 0x1U) << 31U; 702} 703static inline u32 pwr_pmu_idle_ctrl_r(u32 i) 704{ 705 return 0x0010a50cU + i*16U; 706} 707static inline u32 pwr_pmu_idle_ctrl_value_m(void) 708{ 709 return 0x3U << 0U; 710} 711static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) 712{ 713 return 0x2U; 714} 715static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) 716{ 717 return 0x3U; 718} 719static inline u32 pwr_pmu_idle_ctrl_filter_m(void) 720{ 721 return 0x1U << 2U; 722} 723static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) 724{ 725 return 0x0U; 726} 727static inline u32 pwr_pmu_idle_threshold_r(u32 i) 728{ 729 return 0x0010a8a0U + i*4U; 730} 731static inline u32 pwr_pmu_idle_threshold_value_f(u32 v) 732{ 733 return (v & 0x7fffffffU) << 0U; 734} 735static inline u32 pwr_pmu_idle_intr_r(void) 736{ 737 return 0x0010a9e8U; 738} 739static inline u32 pwr_pmu_idle_intr_en_f(u32 v) 740{ 741 return (v & 0x1U) << 0U; 742} 743static inline u32 pwr_pmu_idle_intr_en_disabled_v(void) 744{ 745 return 0x00000000U; 746} 747static inline u32 pwr_pmu_idle_intr_en_enabled_v(void) 748{ 749 return 0x00000001U; 750} 751static inline u32 pwr_pmu_idle_intr_status_r(void) 752{ 753 return 0x0010a9ecU; 754} 755static inline u32 pwr_pmu_idle_intr_status_intr_f(u32 v) 756{ 757 return (v & 0x1U) << 0U; 758} 759static inline u32 pwr_pmu_idle_intr_status_intr_m(void) 760{ 761 return 0x1U << 0U; 762} 763static inline u32 pwr_pmu_idle_intr_status_intr_v(u32 r) 764{ 765 return (r >> 0U) & 0x1U; 766} 767static inline u32 pwr_pmu_idle_intr_status_intr_pending_v(void) 768{ 769 return 0x00000001U; 770} 771static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void) 772{ 773 return 0x00000001U; 774} 775static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) 776{ 777 return 0x0010a9f0U + i*8U; 778} 779static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) 780{ 781 return 0x0010a9f4U + i*8U; 782} 783static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) 784{ 785 return 0x0010aa30U + i*8U; 786} 787static inline u32 pwr_pmu_debug_r(u32 i) 788{ 789 return 0x0010a5c0U + i*4U; 790} 791static inline u32 pwr_pmu_debug__size_1_v(void) 792{ 793 return 0x00000004U; 794} 795static inline u32 pwr_pmu_mailbox_r(u32 i) 796{ 797 return 0x0010a450U + i*4U; 798} 799static inline u32 pwr_pmu_mailbox__size_1_v(void) 800{ 801 return 0x0000000cU; 802} 803static inline u32 pwr_pmu_bar0_addr_r(void) 804{ 805 return 0x0010a7a0U; 806} 807static inline u32 pwr_pmu_bar0_data_r(void) 808{ 809 return 0x0010a7a4U; 810} 811static inline u32 pwr_pmu_bar0_ctl_r(void) 812{ 813 return 0x0010a7acU; 814} 815static inline u32 pwr_pmu_bar0_timeout_r(void) 816{ 817 return 0x0010a7a8U; 818} 819static inline u32 pwr_pmu_bar0_fecs_error_r(void) 820{ 821 return 0x0010a988U; 822} 823static inline u32 pwr_pmu_bar0_error_status_r(void) 824{ 825 return 0x0010a7b0U; 826} 827static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) 828{ 829 return 0x0010a6c0U + i*4U; 830} 831static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) 832{ 833 return 0x0010a6e8U + i*4U; 834} 835static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) 836{ 837 return 0x0010a710U + i*4U; 838} 839static inline u32 pwr_pmu_pg_intren_r(u32 i) 840{ 841 return 0x0010a760U + i*4U; 842} 843static inline u32 pwr_fbif_transcfg_r(u32 i) 844{ 845 return 0x0010ae00U + i*4U; 846} 847static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) 848{ 849 return 0x0U; 850} 851static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) 852{ 853 return 0x1U; 854} 855static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) 856{ 857 return 0x2U; 858} 859static inline u32 pwr_fbif_transcfg_mem_type_s(void) 860{ 861 return 1U; 862} 863static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) 864{ 865 return (v & 0x1U) << 2U; 866} 867static inline u32 pwr_fbif_transcfg_mem_type_m(void) 868{ 869 return 0x1U << 2U; 870} 871static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) 872{ 873 return (r >> 2U) & 0x1U; 874} 875static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) 876{ 877 return 0x0U; 878} 879static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) 880{ 881 return 0x4U; 882} 883#endif
diff --git a/include/nvgpu/hw/gp10b/hw_ram_gp10b.h b/include/nvgpu/hw/gp10b/hw_ram_gp10b.h
deleted file mode 100644
index cc83f52..0000000
--- a/include/nvgpu/hw/gp10b/hw_ram_gp10b.h
+++ /dev/null
@@ -1,519 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ram_gp10b_h_ 57#define _hw_ram_gp10b_h_ 58 59static inline u32 ram_in_ramfc_s(void) 60{ 61 return 4096U; 62} 63static inline u32 ram_in_ramfc_w(void) 64{ 65 return 0U; 66} 67static inline u32 ram_in_page_dir_base_target_f(u32 v) 68{ 69 return (v & 0x3U) << 0U; 70} 71static inline u32 ram_in_page_dir_base_target_w(void) 72{ 73 return 128U; 74} 75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 ram_in_page_dir_base_vol_w(void) 88{ 89 return 128U; 90} 91static inline u32 ram_in_page_dir_base_vol_true_f(void) 92{ 93 return 0x4U; 94} 95static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v) 96{ 97 return (v & 0x1U) << 4U; 98} 99static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void) 100{ 101 return 0x1U << 4U; 102} 103static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void) 104{ 105 return 128U; 106} 107static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void) 108{ 109 return 0x10U; 110} 111static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v) 112{ 113 return (v & 0x1U) << 5U; 114} 115static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void) 116{ 117 return 0x1U << 5U; 118} 119static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void) 120{ 121 return 128U; 122} 123static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void) 124{ 125 return 0x20U; 126} 127static inline u32 ram_in_use_ver2_pt_format_f(u32 v) 128{ 129 return (v & 0x1U) << 10U; 130} 131static inline u32 ram_in_use_ver2_pt_format_m(void) 132{ 133 return 0x1U << 10U; 134} 135static inline u32 ram_in_use_ver2_pt_format_w(void) 136{ 137 return 128U; 138} 139static inline u32 ram_in_use_ver2_pt_format_true_f(void) 140{ 141 return 0x400U; 142} 143static inline u32 ram_in_use_ver2_pt_format_false_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 ram_in_big_page_size_f(u32 v) 148{ 149 return (v & 0x1U) << 11U; 150} 151static inline u32 ram_in_big_page_size_m(void) 152{ 153 return 0x1U << 11U; 154} 155static inline u32 ram_in_big_page_size_w(void) 156{ 157 return 128U; 158} 159static inline u32 ram_in_big_page_size_128kb_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 ram_in_big_page_size_64kb_f(void) 164{ 165 return 0x800U; 166} 167static inline u32 ram_in_page_dir_base_lo_f(u32 v) 168{ 169 return (v & 0xfffffU) << 12U; 170} 171static inline u32 ram_in_page_dir_base_lo_w(void) 172{ 173 return 128U; 174} 175static inline u32 ram_in_page_dir_base_hi_f(u32 v) 176{ 177 return (v & 0xffU) << 0U; 178} 179static inline u32 ram_in_page_dir_base_hi_w(void) 180{ 181 return 129U; 182} 183static inline u32 ram_in_adr_limit_lo_f(u32 v) 184{ 185 return (v & 0xfffffU) << 12U; 186} 187static inline u32 ram_in_adr_limit_lo_w(void) 188{ 189 return 130U; 190} 191static inline u32 ram_in_adr_limit_hi_f(u32 v) 192{ 193 return (v & 0xffffffffU) << 0U; 194} 195static inline u32 ram_in_adr_limit_hi_w(void) 196{ 197 return 131U; 198} 199static inline u32 ram_in_engine_cs_w(void) 200{ 201 return 132U; 202} 203static inline u32 ram_in_engine_cs_wfi_v(void) 204{ 205 return 0x00000000U; 206} 207static inline u32 ram_in_engine_cs_wfi_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 ram_in_engine_cs_fg_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 ram_in_engine_cs_fg_f(void) 216{ 217 return 0x8U; 218} 219static inline u32 ram_in_gr_cs_w(void) 220{ 221 return 132U; 222} 223static inline u32 ram_in_gr_cs_wfi_f(void) 224{ 225 return 0x0U; 226} 227static inline u32 ram_in_gr_wfi_target_w(void) 228{ 229 return 132U; 230} 231static inline u32 ram_in_gr_wfi_mode_w(void) 232{ 233 return 132U; 234} 235static inline u32 ram_in_gr_wfi_mode_physical_v(void) 236{ 237 return 0x00000000U; 238} 239static inline u32 ram_in_gr_wfi_mode_physical_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 ram_in_gr_wfi_mode_virtual_v(void) 244{ 245 return 0x00000001U; 246} 247static inline u32 ram_in_gr_wfi_mode_virtual_f(void) 248{ 249 return 0x4U; 250} 251static inline u32 ram_in_gr_wfi_ptr_lo_f(u32 v) 252{ 253 return (v & 0xfffffU) << 12U; 254} 255static inline u32 ram_in_gr_wfi_ptr_lo_w(void) 256{ 257 return 132U; 258} 259static inline u32 ram_in_gr_wfi_ptr_hi_f(u32 v) 260{ 261 return (v & 0xffU) << 0U; 262} 263static inline u32 ram_in_gr_wfi_ptr_hi_w(void) 264{ 265 return 133U; 266} 267static inline u32 ram_in_base_shift_v(void) 268{ 269 return 0x0000000cU; 270} 271static inline u32 ram_in_alloc_size_v(void) 272{ 273 return 0x00001000U; 274} 275static inline u32 ram_fc_size_val_v(void) 276{ 277 return 0x00000200U; 278} 279static inline u32 ram_fc_gp_put_w(void) 280{ 281 return 0U; 282} 283static inline u32 ram_fc_userd_w(void) 284{ 285 return 2U; 286} 287static inline u32 ram_fc_userd_hi_w(void) 288{ 289 return 3U; 290} 291static inline u32 ram_fc_signature_w(void) 292{ 293 return 4U; 294} 295static inline u32 ram_fc_gp_get_w(void) 296{ 297 return 5U; 298} 299static inline u32 ram_fc_pb_get_w(void) 300{ 301 return 6U; 302} 303static inline u32 ram_fc_pb_get_hi_w(void) 304{ 305 return 7U; 306} 307static inline u32 ram_fc_pb_top_level_get_w(void) 308{ 309 return 8U; 310} 311static inline u32 ram_fc_pb_top_level_get_hi_w(void) 312{ 313 return 9U; 314} 315static inline u32 ram_fc_acquire_w(void) 316{ 317 return 12U; 318} 319static inline u32 ram_fc_semaphorea_w(void) 320{ 321 return 14U; 322} 323static inline u32 ram_fc_semaphoreb_w(void) 324{ 325 return 15U; 326} 327static inline u32 ram_fc_semaphorec_w(void) 328{ 329 return 16U; 330} 331static inline u32 ram_fc_semaphored_w(void) 332{ 333 return 17U; 334} 335static inline u32 ram_fc_gp_base_w(void) 336{ 337 return 18U; 338} 339static inline u32 ram_fc_gp_base_hi_w(void) 340{ 341 return 19U; 342} 343static inline u32 ram_fc_gp_fetch_w(void) 344{ 345 return 20U; 346} 347static inline u32 ram_fc_pb_fetch_w(void) 348{ 349 return 21U; 350} 351static inline u32 ram_fc_pb_fetch_hi_w(void) 352{ 353 return 22U; 354} 355static inline u32 ram_fc_pb_put_w(void) 356{ 357 return 23U; 358} 359static inline u32 ram_fc_pb_put_hi_w(void) 360{ 361 return 24U; 362} 363static inline u32 ram_fc_pb_header_w(void) 364{ 365 return 33U; 366} 367static inline u32 ram_fc_pb_count_w(void) 368{ 369 return 34U; 370} 371static inline u32 ram_fc_subdevice_w(void) 372{ 373 return 37U; 374} 375static inline u32 ram_fc_formats_w(void) 376{ 377 return 39U; 378} 379static inline u32 ram_fc_allowed_syncpoints_w(void) 380{ 381 return 58U; 382} 383static inline u32 ram_fc_syncpointa_w(void) 384{ 385 return 41U; 386} 387static inline u32 ram_fc_syncpointb_w(void) 388{ 389 return 42U; 390} 391static inline u32 ram_fc_target_w(void) 392{ 393 return 43U; 394} 395static inline u32 ram_fc_hce_ctrl_w(void) 396{ 397 return 57U; 398} 399static inline u32 ram_fc_chid_w(void) 400{ 401 return 58U; 402} 403static inline u32 ram_fc_chid_id_f(u32 v) 404{ 405 return (v & 0xfffU) << 0U; 406} 407static inline u32 ram_fc_chid_id_w(void) 408{ 409 return 0U; 410} 411static inline u32 ram_fc_config_w(void) 412{ 413 return 61U; 414} 415static inline u32 ram_fc_runlist_timeslice_w(void) 416{ 417 return 62U; 418} 419static inline u32 ram_userd_base_shift_v(void) 420{ 421 return 0x00000009U; 422} 423static inline u32 ram_userd_chan_size_v(void) 424{ 425 return 0x00000200U; 426} 427static inline u32 ram_userd_put_w(void) 428{ 429 return 16U; 430} 431static inline u32 ram_userd_get_w(void) 432{ 433 return 17U; 434} 435static inline u32 ram_userd_ref_w(void) 436{ 437 return 18U; 438} 439static inline u32 ram_userd_put_hi_w(void) 440{ 441 return 19U; 442} 443static inline u32 ram_userd_ref_threshold_w(void) 444{ 445 return 20U; 446} 447static inline u32 ram_userd_top_level_get_w(void) 448{ 449 return 22U; 450} 451static inline u32 ram_userd_top_level_get_hi_w(void) 452{ 453 return 23U; 454} 455static inline u32 ram_userd_get_hi_w(void) 456{ 457 return 24U; 458} 459static inline u32 ram_userd_gp_get_w(void) 460{ 461 return 34U; 462} 463static inline u32 ram_userd_gp_put_w(void) 464{ 465 return 35U; 466} 467static inline u32 ram_userd_gp_top_level_get_w(void) 468{ 469 return 22U; 470} 471static inline u32 ram_userd_gp_top_level_get_hi_w(void) 472{ 473 return 23U; 474} 475static inline u32 ram_rl_entry_size_v(void) 476{ 477 return 0x00000008U; 478} 479static inline u32 ram_rl_entry_chid_f(u32 v) 480{ 481 return (v & 0xfffU) << 0U; 482} 483static inline u32 ram_rl_entry_id_f(u32 v) 484{ 485 return (v & 0xfffU) << 0U; 486} 487static inline u32 ram_rl_entry_type_f(u32 v) 488{ 489 return (v & 0x1U) << 13U; 490} 491static inline u32 ram_rl_entry_type_chid_f(void) 492{ 493 return 0x0U; 494} 495static inline u32 ram_rl_entry_type_tsg_f(void) 496{ 497 return 0x2000U; 498} 499static inline u32 ram_rl_entry_timeslice_scale_f(u32 v) 500{ 501 return (v & 0xfU) << 14U; 502} 503static inline u32 ram_rl_entry_timeslice_scale_3_f(void) 504{ 505 return 0xc000U; 506} 507static inline u32 ram_rl_entry_timeslice_timeout_f(u32 v) 508{ 509 return (v & 0xffU) << 18U; 510} 511static inline u32 ram_rl_entry_timeslice_timeout_128_f(void) 512{ 513 return 0x2000000U; 514} 515static inline u32 ram_rl_entry_tsg_length_f(u32 v) 516{ 517 return (v & 0x3fU) << 26U; 518} 519#endif
diff --git a/include/nvgpu/hw/gp10b/hw_therm_gp10b.h b/include/nvgpu/hw/gp10b/hw_therm_gp10b.h
deleted file mode 100644
index 49fb718..0000000
--- a/include/nvgpu/hw/gp10b/hw_therm_gp10b.h
+++ /dev/null
@@ -1,415 +0,0 @@ 1/* 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_therm_gp10b_h_ 57#define _hw_therm_gp10b_h_ 58 59static inline u32 therm_use_a_r(void) 60{ 61 return 0x00020798U; 62} 63static inline u32 therm_use_a_ext_therm_0_enable_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 therm_use_a_ext_therm_1_enable_f(void) 68{ 69 return 0x2U; 70} 71static inline u32 therm_use_a_ext_therm_2_enable_f(void) 72{ 73 return 0x4U; 74} 75static inline u32 therm_evt_ext_therm_0_r(void) 76{ 77 return 0x00020700U; 78} 79static inline u32 therm_evt_ext_therm_0_slow_factor_f(u32 v) 80{ 81 return (v & 0x3fU) << 24U; 82} 83static inline u32 therm_evt_ext_therm_0_slow_factor_init_v(void) 84{ 85 return 0x00000001U; 86} 87static inline u32 therm_evt_ext_therm_0_mode_f(u32 v) 88{ 89 return (v & 0x3U) << 30U; 90} 91static inline u32 therm_evt_ext_therm_0_mode_normal_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 therm_evt_ext_therm_0_mode_inverted_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 therm_evt_ext_therm_0_mode_forced_v(void) 100{ 101 return 0x00000002U; 102} 103static inline u32 therm_evt_ext_therm_0_mode_cleared_v(void) 104{ 105 return 0x00000003U; 106} 107static inline u32 therm_evt_ext_therm_1_r(void) 108{ 109 return 0x00020704U; 110} 111static inline u32 therm_evt_ext_therm_1_slow_factor_f(u32 v) 112{ 113 return (v & 0x3fU) << 24U; 114} 115static inline u32 therm_evt_ext_therm_1_slow_factor_init_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 therm_evt_ext_therm_1_mode_f(u32 v) 120{ 121 return (v & 0x3U) << 30U; 122} 123static inline u32 therm_evt_ext_therm_1_mode_normal_v(void) 124{ 125 return 0x00000000U; 126} 127static inline u32 therm_evt_ext_therm_1_mode_inverted_v(void) 128{ 129 return 0x00000001U; 130} 131static inline u32 therm_evt_ext_therm_1_mode_forced_v(void) 132{ 133 return 0x00000002U; 134} 135static inline u32 therm_evt_ext_therm_1_mode_cleared_v(void) 136{ 137 return 0x00000003U; 138} 139static inline u32 therm_evt_ext_therm_2_r(void) 140{ 141 return 0x00020708U; 142} 143static inline u32 therm_evt_ext_therm_2_slow_factor_f(u32 v) 144{ 145 return (v & 0x3fU) << 24U; 146} 147static inline u32 therm_evt_ext_therm_2_slow_factor_init_v(void) 148{ 149 return 0x00000003U; 150} 151static inline u32 therm_evt_ext_therm_2_mode_f(u32 v) 152{ 153 return (v & 0x3U) << 30U; 154} 155static inline u32 therm_evt_ext_therm_2_mode_normal_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 therm_evt_ext_therm_2_mode_inverted_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 therm_evt_ext_therm_2_mode_forced_v(void) 164{ 165 return 0x00000002U; 166} 167static inline u32 therm_evt_ext_therm_2_mode_cleared_v(void) 168{ 169 return 0x00000003U; 170} 171static inline u32 therm_weight_1_r(void) 172{ 173 return 0x00020024U; 174} 175static inline u32 therm_config1_r(void) 176{ 177 return 0x00020050U; 178} 179static inline u32 therm_config2_r(void) 180{ 181 return 0x00020130U; 182} 183static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) 184{ 185 return (v & 0x1U) << 24U; 186} 187static inline u32 therm_config2_grad_enable_f(u32 v) 188{ 189 return (v & 0x1U) << 31U; 190} 191static inline u32 therm_gate_ctrl_r(u32 i) 192{ 193 return 0x00020200U + i*4U; 194} 195static inline u32 therm_gate_ctrl_eng_clk_m(void) 196{ 197 return 0x3U << 0U; 198} 199static inline u32 therm_gate_ctrl_eng_clk_run_f(void) 200{ 201 return 0x0U; 202} 203static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) 204{ 205 return 0x1U; 206} 207static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) 208{ 209 return 0x2U; 210} 211static inline u32 therm_gate_ctrl_blk_clk_m(void) 212{ 213 return 0x3U << 2U; 214} 215static inline u32 therm_gate_ctrl_blk_clk_run_f(void) 216{ 217 return 0x0U; 218} 219static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) 220{ 221 return 0x4U; 222} 223static inline u32 therm_gate_ctrl_eng_pwr_m(void) 224{ 225 return 0x3U << 4U; 226} 227static inline u32 therm_gate_ctrl_eng_pwr_auto_f(void) 228{ 229 return 0x10U; 230} 231static inline u32 therm_gate_ctrl_eng_pwr_off_v(void) 232{ 233 return 0x00000002U; 234} 235static inline u32 therm_gate_ctrl_eng_pwr_off_f(void) 236{ 237 return 0x20U; 238} 239static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) 240{ 241 return (v & 0x1fU) << 8U; 242} 243static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) 244{ 245 return 0x1fU << 8U; 246} 247static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) 248{ 249 return (v & 0x7U) << 13U; 250} 251static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) 252{ 253 return 0x7U << 13U; 254} 255static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) 256{ 257 return (v & 0xfU) << 16U; 258} 259static inline u32 therm_gate_ctrl_eng_delay_before_m(void) 260{ 261 return 0xfU << 16U; 262} 263static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) 264{ 265 return (v & 0xfU) << 20U; 266} 267static inline u32 therm_gate_ctrl_eng_delay_after_m(void) 268{ 269 return 0xfU << 20U; 270} 271static inline u32 therm_fecs_idle_filter_r(void) 272{ 273 return 0x00020288U; 274} 275static inline u32 therm_fecs_idle_filter_value_m(void) 276{ 277 return 0xffffffffU << 0U; 278} 279static inline u32 therm_hubmmu_idle_filter_r(void) 280{ 281 return 0x0002028cU; 282} 283static inline u32 therm_hubmmu_idle_filter_value_m(void) 284{ 285 return 0xffffffffU << 0U; 286} 287static inline u32 therm_clk_slowdown_r(u32 i) 288{ 289 return 0x00020160U + i*4U; 290} 291static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) 292{ 293 return (v & 0x3fU) << 16U; 294} 295static inline u32 therm_clk_slowdown_idle_factor_m(void) 296{ 297 return 0x3fU << 16U; 298} 299static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) 300{ 301 return (r >> 16U) & 0x3fU; 302} 303static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) 304{ 305 return 0x0U; 306} 307static inline u32 therm_grad_stepping_table_r(u32 i) 308{ 309 return 0x000202c8U + i*4U; 310} 311static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) 312{ 313 return (v & 0x3fU) << 0U; 314} 315static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) 316{ 317 return 0x3fU << 0U; 318} 319static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) 320{ 321 return 0x1U; 322} 323static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) 324{ 325 return 0x2U; 326} 327static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) 328{ 329 return 0x6U; 330} 331static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) 332{ 333 return 0xeU; 334} 335static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) 336{ 337 return (v & 0x3fU) << 6U; 338} 339static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) 340{ 341 return 0x3fU << 6U; 342} 343static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) 344{ 345 return (v & 0x3fU) << 12U; 346} 347static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) 348{ 349 return 0x3fU << 12U; 350} 351static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) 352{ 353 return (v & 0x3fU) << 18U; 354} 355static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) 356{ 357 return 0x3fU << 18U; 358} 359static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) 360{ 361 return (v & 0x3fU) << 24U; 362} 363static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) 364{ 365 return 0x3fU << 24U; 366} 367static inline u32 therm_grad_stepping0_r(void) 368{ 369 return 0x000202c0U; 370} 371static inline u32 therm_grad_stepping0_feature_s(void) 372{ 373 return 1U; 374} 375static inline u32 therm_grad_stepping0_feature_f(u32 v) 376{ 377 return (v & 0x1U) << 0U; 378} 379static inline u32 therm_grad_stepping0_feature_m(void) 380{ 381 return 0x1U << 0U; 382} 383static inline u32 therm_grad_stepping0_feature_v(u32 r) 384{ 385 return (r >> 0U) & 0x1U; 386} 387static inline u32 therm_grad_stepping0_feature_enable_f(void) 388{ 389 return 0x1U; 390} 391static inline u32 therm_grad_stepping1_r(void) 392{ 393 return 0x000202c4U; 394} 395static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) 396{ 397 return (v & 0x1ffffU) << 0U; 398} 399static inline u32 therm_clk_timing_r(u32 i) 400{ 401 return 0x000203c0U + i*4U; 402} 403static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) 404{ 405 return (v & 0x1U) << 16U; 406} 407static inline u32 therm_clk_timing_grad_slowdown_m(void) 408{ 409 return 0x1U << 16U; 410} 411static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) 412{ 413 return 0x10000U; 414} 415#endif
diff --git a/include/nvgpu/hw/gp10b/hw_timer_gp10b.h b/include/nvgpu/hw/gp10b/hw_timer_gp10b.h
deleted file mode 100644
index 54facfc..0000000
--- a/include/nvgpu/hw/gp10b/hw_timer_gp10b.h
+++ /dev/null
@@ -1,127 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_timer_gp10b_h_ 57#define _hw_timer_gp10b_h_ 58 59static inline u32 timer_pri_timeout_r(void) 60{ 61 return 0x00009080U; 62} 63static inline u32 timer_pri_timeout_period_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 timer_pri_timeout_period_m(void) 68{ 69 return 0xffffffU << 0U; 70} 71static inline u32 timer_pri_timeout_period_v(u32 r) 72{ 73 return (r >> 0U) & 0xffffffU; 74} 75static inline u32 timer_pri_timeout_en_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 timer_pri_timeout_en_m(void) 80{ 81 return 0x1U << 31U; 82} 83static inline u32 timer_pri_timeout_en_v(u32 r) 84{ 85 return (r >> 31U) & 0x1U; 86} 87static inline u32 timer_pri_timeout_en_en_enabled_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 timer_pri_timeout_en_en_disabled_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 timer_pri_timeout_save_0_r(void) 96{ 97 return 0x00009084U; 98} 99static inline u32 timer_pri_timeout_save_0_fecs_tgt_v(u32 r) 100{ 101 return (r >> 31U) & 0x1U; 102} 103static inline u32 timer_pri_timeout_save_0_addr_v(u32 r) 104{ 105 return (r >> 2U) & 0x3fffffU; 106} 107static inline u32 timer_pri_timeout_save_0_write_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 timer_pri_timeout_save_1_r(void) 112{ 113 return 0x00009088U; 114} 115static inline u32 timer_pri_timeout_fecs_errcode_r(void) 116{ 117 return 0x0000908cU; 118} 119static inline u32 timer_time_0_r(void) 120{ 121 return 0x00009400U; 122} 123static inline u32 timer_time_1_r(void) 124{ 125 return 0x00009410U; 126} 127#endif
diff --git a/include/nvgpu/hw/gp10b/hw_top_gp10b.h b/include/nvgpu/hw/gp10b/hw_top_gp10b.h
deleted file mode 100644
index a7b7c2b..0000000
--- a/include/nvgpu/hw/gp10b/hw_top_gp10b.h
+++ /dev/null
@@ -1,231 +0,0 @@ 1/* 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_top_gp10b_h_ 57#define _hw_top_gp10b_h_ 58 59static inline u32 top_num_gpcs_r(void) 60{ 61 return 0x00022430U; 62} 63static inline u32 top_num_gpcs_value_v(u32 r) 64{ 65 return (r >> 0U) & 0x1fU; 66} 67static inline u32 top_tpc_per_gpc_r(void) 68{ 69 return 0x00022434U; 70} 71static inline u32 top_tpc_per_gpc_value_v(u32 r) 72{ 73 return (r >> 0U) & 0x1fU; 74} 75static inline u32 top_num_fbps_r(void) 76{ 77 return 0x00022438U; 78} 79static inline u32 top_num_fbps_value_v(u32 r) 80{ 81 return (r >> 0U) & 0x1fU; 82} 83static inline u32 top_ltc_per_fbp_r(void) 84{ 85 return 0x00022450U; 86} 87static inline u32 top_ltc_per_fbp_value_v(u32 r) 88{ 89 return (r >> 0U) & 0x1fU; 90} 91static inline u32 top_slices_per_ltc_r(void) 92{ 93 return 0x0002245cU; 94} 95static inline u32 top_slices_per_ltc_value_v(u32 r) 96{ 97 return (r >> 0U) & 0x1fU; 98} 99static inline u32 top_num_ltcs_r(void) 100{ 101 return 0x00022454U; 102} 103static inline u32 top_device_info_r(u32 i) 104{ 105 return 0x00022700U + i*4U; 106} 107static inline u32 top_device_info__size_1_v(void) 108{ 109 return 0x00000040U; 110} 111static inline u32 top_device_info_chain_v(u32 r) 112{ 113 return (r >> 31U) & 0x1U; 114} 115static inline u32 top_device_info_chain_enable_v(void) 116{ 117 return 0x00000001U; 118} 119static inline u32 top_device_info_engine_enum_v(u32 r) 120{ 121 return (r >> 26U) & 0xfU; 122} 123static inline u32 top_device_info_runlist_enum_v(u32 r) 124{ 125 return (r >> 21U) & 0xfU; 126} 127static inline u32 top_device_info_intr_enum_v(u32 r) 128{ 129 return (r >> 15U) & 0x1fU; 130} 131static inline u32 top_device_info_reset_enum_v(u32 r) 132{ 133 return (r >> 9U) & 0x1fU; 134} 135static inline u32 top_device_info_type_enum_v(u32 r) 136{ 137 return (r >> 2U) & 0x1fffffffU; 138} 139static inline u32 top_device_info_type_enum_graphics_v(void) 140{ 141 return 0x00000000U; 142} 143static inline u32 top_device_info_type_enum_graphics_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 top_device_info_type_enum_copy2_v(void) 148{ 149 return 0x00000003U; 150} 151static inline u32 top_device_info_type_enum_copy2_f(void) 152{ 153 return 0xcU; 154} 155static inline u32 top_device_info_type_enum_lce_v(void) 156{ 157 return 0x00000013U; 158} 159static inline u32 top_device_info_type_enum_lce_f(void) 160{ 161 return 0x4cU; 162} 163static inline u32 top_device_info_engine_v(u32 r) 164{ 165 return (r >> 5U) & 0x1U; 166} 167static inline u32 top_device_info_runlist_v(u32 r) 168{ 169 return (r >> 4U) & 0x1U; 170} 171static inline u32 top_device_info_intr_v(u32 r) 172{ 173 return (r >> 3U) & 0x1U; 174} 175static inline u32 top_device_info_reset_v(u32 r) 176{ 177 return (r >> 2U) & 0x1U; 178} 179static inline u32 top_device_info_entry_v(u32 r) 180{ 181 return (r >> 0U) & 0x3U; 182} 183static inline u32 top_device_info_entry_not_valid_v(void) 184{ 185 return 0x00000000U; 186} 187static inline u32 top_device_info_entry_enum_v(void) 188{ 189 return 0x00000002U; 190} 191static inline u32 top_device_info_entry_engine_type_v(void) 192{ 193 return 0x00000003U; 194} 195static inline u32 top_device_info_entry_data_v(void) 196{ 197 return 0x00000001U; 198} 199static inline u32 top_device_info_data_type_v(u32 r) 200{ 201 return (r >> 30U) & 0x1U; 202} 203static inline u32 top_device_info_data_type_enum2_v(void) 204{ 205 return 0x00000000U; 206} 207static inline u32 top_device_info_data_inst_id_v(u32 r) 208{ 209 return (r >> 26U) & 0xfU; 210} 211static inline u32 top_device_info_data_pri_base_v(u32 r) 212{ 213 return (r >> 12U) & 0xfffU; 214} 215static inline u32 top_device_info_data_pri_base_align_v(void) 216{ 217 return 0x0000000cU; 218} 219static inline u32 top_device_info_data_fault_id_enum_v(u32 r) 220{ 221 return (r >> 3U) & 0x1fU; 222} 223static inline u32 top_device_info_data_fault_id_v(u32 r) 224{ 225 return (r >> 2U) & 0x1U; 226} 227static inline u32 top_device_info_data_fault_id_valid_v(void) 228{ 229 return 0x00000001U; 230} 231#endif
diff --git a/include/nvgpu/hw/gv100/hw_bus_gv100.h b/include/nvgpu/hw/gv100/hw_bus_gv100.h
deleted file mode 100644
index 7771f1e..0000000
--- a/include/nvgpu/hw/gv100/hw_bus_gv100.h
+++ /dev/null
@@ -1,227 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_bus_gv100_h_ 57#define _hw_bus_gv100_h_ 58 59static inline u32 bus_sw_scratch_r(u32 i) 60{ 61 return 0x00001580U + i*4U; 62} 63static inline u32 bus_bar0_window_r(void) 64{ 65 return 0x00001700U; 66} 67static inline u32 bus_bar0_window_base_f(u32 v) 68{ 69 return (v & 0xffffffU) << 0U; 70} 71static inline u32 bus_bar0_window_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) 76{ 77 return 0x2000000U; 78} 79static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) 80{ 81 return 0x3000000U; 82} 83static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) 84{ 85 return 0x00000010U; 86} 87static inline u32 bus_bar1_block_r(void) 88{ 89 return 0x00001704U; 90} 91static inline u32 bus_bar1_block_ptr_f(u32 v) 92{ 93 return (v & 0xfffffffU) << 0U; 94} 95static inline u32 bus_bar1_block_target_vid_mem_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) 100{ 101 return 0x20000000U; 102} 103static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) 104{ 105 return 0x30000000U; 106} 107static inline u32 bus_bar1_block_mode_virtual_f(void) 108{ 109 return 0x80000000U; 110} 111static inline u32 bus_bar2_block_r(void) 112{ 113 return 0x00001714U; 114} 115static inline u32 bus_bar2_block_ptr_f(u32 v) 116{ 117 return (v & 0xfffffffU) << 0U; 118} 119static inline u32 bus_bar2_block_target_vid_mem_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) 124{ 125 return 0x20000000U; 126} 127static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) 128{ 129 return 0x30000000U; 130} 131static inline u32 bus_bar2_block_mode_virtual_f(void) 132{ 133 return 0x80000000U; 134} 135static inline u32 bus_bar1_block_ptr_shift_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 bus_bar2_block_ptr_shift_v(void) 140{ 141 return 0x0000000cU; 142} 143static inline u32 bus_bind_status_r(void) 144{ 145 return 0x00001710U; 146} 147static inline u32 bus_bind_status_bar1_pending_v(u32 r) 148{ 149 return (r >> 0U) & 0x1U; 150} 151static inline u32 bus_bind_status_bar1_pending_empty_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 bus_bind_status_bar1_pending_busy_f(void) 156{ 157 return 0x1U; 158} 159static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) 160{ 161 return (r >> 1U) & 0x1U; 162} 163static inline u32 bus_bind_status_bar1_outstanding_false_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 bus_bind_status_bar1_outstanding_true_f(void) 168{ 169 return 0x2U; 170} 171static inline u32 bus_bind_status_bar2_pending_v(u32 r) 172{ 173 return (r >> 2U) & 0x1U; 174} 175static inline u32 bus_bind_status_bar2_pending_empty_f(void) 176{ 177 return 0x0U; 178} 179static inline u32 bus_bind_status_bar2_pending_busy_f(void) 180{ 181 return 0x4U; 182} 183static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) 184{ 185 return (r >> 3U) & 0x1U; 186} 187static inline u32 bus_bind_status_bar2_outstanding_false_f(void) 188{ 189 return 0x0U; 190} 191static inline u32 bus_bind_status_bar2_outstanding_true_f(void) 192{ 193 return 0x8U; 194} 195static inline u32 bus_intr_0_r(void) 196{ 197 return 0x00001100U; 198} 199static inline u32 bus_intr_0_pri_squash_m(void) 200{ 201 return 0x1U << 1U; 202} 203static inline u32 bus_intr_0_pri_fecserr_m(void) 204{ 205 return 0x1U << 2U; 206} 207static inline u32 bus_intr_0_pri_timeout_m(void) 208{ 209 return 0x1U << 3U; 210} 211static inline u32 bus_intr_en_0_r(void) 212{ 213 return 0x00001140U; 214} 215static inline u32 bus_intr_en_0_pri_squash_m(void) 216{ 217 return 0x1U << 1U; 218} 219static inline u32 bus_intr_en_0_pri_fecserr_m(void) 220{ 221 return 0x1U << 2U; 222} 223static inline u32 bus_intr_en_0_pri_timeout_m(void) 224{ 225 return 0x1U << 3U; 226} 227#endif
diff --git a/include/nvgpu/hw/gv100/hw_ccsr_gv100.h b/include/nvgpu/hw/gv100/hw_ccsr_gv100.h
deleted file mode 100644
index b147803..0000000
--- a/include/nvgpu/hw/gv100/hw_ccsr_gv100.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ccsr_gv100_h_ 57#define _hw_ccsr_gv100_h_ 58 59static inline u32 ccsr_channel_inst_r(u32 i) 60{ 61 return 0x00800000U + i*8U; 62} 63static inline u32 ccsr_channel_inst__size_1_v(void) 64{ 65 return 0x00001000U; 66} 67static inline u32 ccsr_channel_inst_ptr_f(u32 v) 68{ 69 return (v & 0xfffffffU) << 0U; 70} 71static inline u32 ccsr_channel_inst_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) 76{ 77 return 0x20000000U; 78} 79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) 80{ 81 return 0x30000000U; 82} 83static inline u32 ccsr_channel_inst_bind_false_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 ccsr_channel_inst_bind_true_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 ccsr_channel_r(u32 i) 92{ 93 return 0x00800004U + i*8U; 94} 95static inline u32 ccsr_channel__size_1_v(void) 96{ 97 return 0x00001000U; 98} 99static inline u32 ccsr_channel_enable_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 ccsr_channel_enable_set_f(u32 v) 104{ 105 return (v & 0x1U) << 10U; 106} 107static inline u32 ccsr_channel_enable_set_true_f(void) 108{ 109 return 0x400U; 110} 111static inline u32 ccsr_channel_enable_clr_true_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ccsr_channel_status_v(u32 r) 116{ 117 return (r >> 24U) & 0xfU; 118} 119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) 120{ 121 return 0x00000002U; 122} 123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) 124{ 125 return 0x00000004U; 126} 127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) 128{ 129 return 0x0000000aU; 130} 131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) 132{ 133 return 0x0000000bU; 134} 135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) 140{ 141 return 0x0000000dU; 142} 143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) 144{ 145 return 0x0000000eU; 146} 147static inline u32 ccsr_channel_next_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 ccsr_channel_next_true_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 ccsr_channel_force_ctx_reload_true_f(void) 156{ 157 return 0x100U; 158} 159static inline u32 ccsr_channel_pbdma_faulted_f(u32 v) 160{ 161 return (v & 0x1U) << 22U; 162} 163static inline u32 ccsr_channel_pbdma_faulted_reset_f(void) 164{ 165 return 0x400000U; 166} 167static inline u32 ccsr_channel_eng_faulted_f(u32 v) 168{ 169 return (v & 0x1U) << 23U; 170} 171static inline u32 ccsr_channel_eng_faulted_v(u32 r) 172{ 173 return (r >> 23U) & 0x1U; 174} 175static inline u32 ccsr_channel_eng_faulted_reset_f(void) 176{ 177 return 0x800000U; 178} 179static inline u32 ccsr_channel_eng_faulted_true_v(void) 180{ 181 return 0x00000001U; 182} 183static inline u32 ccsr_channel_busy_v(u32 r) 184{ 185 return (r >> 28U) & 0x1U; 186} 187#endif
diff --git a/include/nvgpu/hw/gv100/hw_ce_gv100.h b/include/nvgpu/hw/gv100/hw_ce_gv100.h
deleted file mode 100644
index 18b5fc6..0000000
--- a/include/nvgpu/hw/gv100/hw_ce_gv100.h
+++ /dev/null
@@ -1,107 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ce_gv100_h_ 57#define _hw_ce_gv100_h_ 58 59static inline u32 ce_intr_status_r(u32 i) 60{ 61 return 0x00104410U + i*128U; 62} 63static inline u32 ce_intr_status_blockpipe_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 ce_intr_status_blockpipe_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 ce_intr_status_nonblockpipe_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 ce_intr_status_nonblockpipe_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 ce_intr_status_launcherr_pending_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 ce_intr_status_launcherr_reset_f(void) 84{ 85 return 0x4U; 86} 87static inline u32 ce_intr_status_invalid_config_pending_f(void) 88{ 89 return 0x8U; 90} 91static inline u32 ce_intr_status_invalid_config_reset_f(void) 92{ 93 return 0x8U; 94} 95static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void) 96{ 97 return 0x10U; 98} 99static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void) 100{ 101 return 0x10U; 102} 103static inline u32 ce_pce_map_r(void) 104{ 105 return 0x00104028U; 106} 107#endif
diff --git a/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h b/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h
deleted file mode 100644
index b7f3df2..0000000
--- a/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h
+++ /dev/null
@@ -1,459 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ctxsw_prog_gv100_h_ 57#define _hw_ctxsw_prog_gv100_h_ 58 59static inline u32 ctxsw_prog_fecs_header_v(void) 60{ 61 return 0x00000100U; 62} 63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ctxsw_prog_main_image_ctl_o(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v) 72{ 73 return (v & 0x3fU) << 0U; 74} 75static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void) 76{ 77 return 0x00000000U; 78} 79static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void) 80{ 81 return 0x00000008U; 82} 83static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void) 84{ 85 return 0x00000010U; 86} 87static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void) 88{ 89 return 0x00000011U; 90} 91static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void) 92{ 93 return 0x00000012U; 94} 95static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void) 96{ 97 return 0x00000020U; 98} 99static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void) 100{ 101 return 0x00000021U; 102} 103static inline u32 ctxsw_prog_main_image_patch_count_o(void) 104{ 105 return 0x00000010U; 106} 107static inline u32 ctxsw_prog_main_image_context_id_o(void) 108{ 109 return 0x000000f0U; 110} 111static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) 112{ 113 return 0x00000014U; 114} 115static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) 116{ 117 return 0x00000018U; 118} 119static inline u32 ctxsw_prog_main_image_zcull_o(void) 120{ 121 return 0x0000001cU; 122} 123static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) 124{ 125 return 0x00000001U; 126} 127static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) 128{ 129 return 0x00000002U; 130} 131static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) 132{ 133 return 0x00000020U; 134} 135static inline u32 ctxsw_prog_main_image_pm_o(void) 136{ 137 return 0x00000028U; 138} 139static inline u32 ctxsw_prog_main_image_pm_mode_m(void) 140{ 141 return 0x7U << 0U; 142} 143static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f(void) 148{ 149 return 0x2U; 150} 151static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) 152{ 153 return 0x7U << 3U; 154} 155static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) 156{ 157 return 0x8U; 158} 159static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) 164{ 165 return 0x0000002cU; 166} 167static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) 168{ 169 return 0x000000f4U; 170} 171static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void) 172{ 173 return 0x000000d0U; 174} 175static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void) 176{ 177 return 0x000000d4U; 178} 179static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void) 180{ 181 return 0x000000d8U; 182} 183static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void) 184{ 185 return 0x000000dcU; 186} 187static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) 188{ 189 return 0x000000f8U; 190} 191static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void) 192{ 193 return 0x00000060U; 194} 195static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v) 196{ 197 return (v & 0x1ffffU) << 0U; 198} 199static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void) 200{ 201 return 0x00000094U; 202} 203static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void) 204{ 205 return 0x00000064U; 206} 207static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v) 208{ 209 return (v & 0x1ffffU) << 0U; 210} 211static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void) 212{ 213 return 0x00000068U; 214} 215static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v) 216{ 217 return (v & 0xffffffffU) << 0U; 218} 219static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void) 220{ 221 return 0x00000070U; 222} 223static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v) 224{ 225 return (v & 0x1ffffU) << 0U; 226} 227static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void) 228{ 229 return 0x00000074U; 230} 231static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v) 232{ 233 return (v & 0xffffffffU) << 0U; 234} 235static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void) 236{ 237 return 0x00000078U; 238} 239static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v) 240{ 241 return (v & 0x1ffffU) << 0U; 242} 243static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void) 244{ 245 return 0x0000007cU; 246} 247static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v) 248{ 249 return (v & 0xffffffffU) << 0U; 250} 251static inline u32 ctxsw_prog_main_image_magic_value_o(void) 252{ 253 return 0x000000fcU; 254} 255static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) 256{ 257 return 0x600dc0deU; 258} 259static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) 260{ 261 return 0x0000000cU; 262} 263static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) 264{ 265 return (r >> 0U) & 0xffffU; 266} 267static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void) 268{ 269 return 0x000000b8U; 270} 271static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v) 272{ 273 return (v & 0xffffffffU) << 0U; 274} 275static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void) 276{ 277 return 0x000000bcU; 278} 279static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v) 280{ 281 return (v & 0x1ffffU) << 0U; 282} 283static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void) 284{ 285 return 0x000000c0U; 286} 287static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v) 288{ 289 return (v & 0xffffffffU) << 0U; 290} 291static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void) 292{ 293 return 0x000000c4U; 294} 295static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v) 296{ 297 return (v & 0x1ffffU) << 0U; 298} 299static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void) 300{ 301 return 0x000000c8U; 302} 303static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v) 304{ 305 return (v & 0xffffffffU) << 0U; 306} 307static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void) 308{ 309 return 0x000000ccU; 310} 311static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v) 312{ 313 return (v & 0x1ffffU) << 0U; 314} 315static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void) 316{ 317 return 0x000000e0U; 318} 319static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v) 320{ 321 return (v & 0xffffffffU) << 0U; 322} 323static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void) 324{ 325 return 0x000000e4U; 326} 327static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v) 328{ 329 return (v & 0x1ffffU) << 0U; 330} 331static inline u32 ctxsw_prog_local_image_ppc_info_o(void) 332{ 333 return 0x000000f4U; 334} 335static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) 336{ 337 return (r >> 0U) & 0xffffU; 338} 339static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) 340{ 341 return (r >> 16U) & 0xffffU; 342} 343static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) 344{ 345 return 0x000000f8U; 346} 347static inline u32 ctxsw_prog_local_magic_value_o(void) 348{ 349 return 0x000000fcU; 350} 351static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) 352{ 353 return 0xad0becabU; 354} 355static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) 356{ 357 return 0x000000ecU; 358} 359static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) 360{ 361 return (r >> 0U) & 0xffffU; 362} 363static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) 364{ 365 return (r >> 16U) & 0xffU; 366} 367static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) 368{ 369 return 0x00000100U; 370} 371static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) 372{ 373 return 0x00000004U; 374} 375static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) 376{ 377 return 0x00000000U; 378} 379static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) 380{ 381 return 0x00000002U; 382} 383static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) 384{ 385 return 0x000000a0U; 386} 387static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) 388{ 389 return 2U; 390} 391static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) 392{ 393 return (v & 0x3U) << 0U; 394} 395static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) 396{ 397 return 0x3U << 0U; 398} 399static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) 400{ 401 return (r >> 0U) & 0x3U; 402} 403static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) 404{ 405 return 0x0U; 406} 407static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) 408{ 409 return 0x2U; 410} 411static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) 412{ 413 return 0x000000a4U; 414} 415static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) 416{ 417 return 0x000000a8U; 418} 419static inline u32 ctxsw_prog_main_image_misc_options_o(void) 420{ 421 return 0x0000003cU; 422} 423static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) 424{ 425 return 0x1U << 3U; 426} 427static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) 428{ 429 return 0x0U; 430} 431static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void) 432{ 433 return 0x00000080U; 434} 435static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v) 436{ 437 return (v & 0x3U) << 0U; 438} 439static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void) 440{ 441 return 0x1U; 442} 443static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void) 444{ 445 return 0x00000084U; 446} 447static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v) 448{ 449 return (v & 0x3U) << 0U; 450} 451static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void) 452{ 453 return 0x1U; 454} 455static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void) 456{ 457 return 0x2U; 458} 459#endif
diff --git a/include/nvgpu/hw/gv100/hw_falcon_gv100.h b/include/nvgpu/hw/gv100/hw_falcon_gv100.h
deleted file mode 100644
index 3492d68..0000000
--- a/include/nvgpu/hw/gv100/hw_falcon_gv100.h
+++ /dev/null
@@ -1,603 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_falcon_gv100_h_ 57#define _hw_falcon_gv100_h_ 58 59static inline u32 falcon_falcon_irqsset_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 falcon_falcon_irqsclr_r(void) 68{ 69 return 0x00000004U; 70} 71static inline u32 falcon_falcon_irqstat_r(void) 72{ 73 return 0x00000008U; 74} 75static inline u32 falcon_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 falcon_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 falcon_falcon_irqmode_r(void) 88{ 89 return 0x0000000cU; 90} 91static inline u32 falcon_falcon_irqmset_r(void) 92{ 93 return 0x00000010U; 94} 95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 falcon_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 falcon_falcon_irqmclr_r(void) 128{ 129 return 0x00000014U; 130} 131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 falcon_falcon_irqmask_r(void) 168{ 169 return 0x00000018U; 170} 171static inline u32 falcon_falcon_irqdest_r(void) 172{ 173 return 0x0000001cU; 174} 175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 falcon_falcon_curctx_r(void) 248{ 249 return 0x00000050U; 250} 251static inline u32 falcon_falcon_nxtctx_r(void) 252{ 253 return 0x00000054U; 254} 255static inline u32 falcon_falcon_mailbox0_r(void) 256{ 257 return 0x00000040U; 258} 259static inline u32 falcon_falcon_mailbox1_r(void) 260{ 261 return 0x00000044U; 262} 263static inline u32 falcon_falcon_itfen_r(void) 264{ 265 return 0x00000048U; 266} 267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 falcon_falcon_idlestate_r(void) 272{ 273 return 0x0000004cU; 274} 275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 falcon_falcon_os_r(void) 284{ 285 return 0x00000080U; 286} 287static inline u32 falcon_falcon_engctl_r(void) 288{ 289 return 0x000000a4U; 290} 291static inline u32 falcon_falcon_cpuctl_r(void) 292{ 293 return 0x00000100U; 294} 295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) 300{ 301 return (v & 0x1U) << 2U; 302} 303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) 304{ 305 return (v & 0x1U) << 3U; 306} 307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) 308{ 309 return (v & 0x1U) << 4U; 310} 311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) 312{ 313 return 0x1U << 4U; 314} 315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) 316{ 317 return (r >> 4U) & 0x1U; 318} 319static inline u32 falcon_falcon_cpuctl_stopped_m(void) 320{ 321 return 0x1U << 5U; 322} 323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 324{ 325 return (v & 0x1U) << 6U; 326} 327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) 328{ 329 return 0x1U << 6U; 330} 331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 332{ 333 return (r >> 6U) & 0x1U; 334} 335static inline u32 falcon_falcon_cpuctl_alias_r(void) 336{ 337 return 0x00000130U; 338} 339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) 340{ 341 return (v & 0x1U) << 1U; 342} 343static inline u32 falcon_falcon_imemc_r(u32 i) 344{ 345 return 0x00000180U + i*16U; 346} 347static inline u32 falcon_falcon_imemc_offs_f(u32 v) 348{ 349 return (v & 0x3fU) << 2U; 350} 351static inline u32 falcon_falcon_imemc_blk_f(u32 v) 352{ 353 return (v & 0xffU) << 8U; 354} 355static inline u32 falcon_falcon_imemc_aincw_f(u32 v) 356{ 357 return (v & 0x1U) << 24U; 358} 359static inline u32 falcon_falcon_imemc_secure_f(u32 v) 360{ 361 return (v & 0x1U) << 28U; 362} 363static inline u32 falcon_falcon_imemd_r(u32 i) 364{ 365 return 0x00000184U + i*16U; 366} 367static inline u32 falcon_falcon_imemt_r(u32 i) 368{ 369 return 0x00000188U + i*16U; 370} 371static inline u32 falcon_falcon_sctl_r(void) 372{ 373 return 0x00000240U; 374} 375static inline u32 falcon_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 falcon_falcon_bootvec_r(void) 380{ 381 return 0x00000104U; 382} 383static inline u32 falcon_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 falcon_falcon_dmactl_r(void) 388{ 389 return 0x0000010cU; 390} 391static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) 400{ 401 return (v & 0x1U) << 0U; 402} 403static inline u32 falcon_falcon_hwcfg_r(void) 404{ 405 return 0x00000108U; 406} 407static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) 408{ 409 return (r >> 0U) & 0x1ffU; 410} 411static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) 412{ 413 return (r >> 9U) & 0x1ffU; 414} 415static inline u32 falcon_falcon_dmatrfbase_r(void) 416{ 417 return 0x00000110U; 418} 419static inline u32 falcon_falcon_dmatrfbase1_r(void) 420{ 421 return 0x00000128U; 422} 423static inline u32 falcon_falcon_dmatrfmoffs_r(void) 424{ 425 return 0x00000114U; 426} 427static inline u32 falcon_falcon_dmatrfcmd_r(void) 428{ 429 return 0x00000118U; 430} 431static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) 432{ 433 return (v & 0x1U) << 4U; 434} 435static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) 436{ 437 return (v & 0x1U) << 5U; 438} 439static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) 440{ 441 return (v & 0x7U) << 8U; 442} 443static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) 444{ 445 return (v & 0x7U) << 12U; 446} 447static inline u32 falcon_falcon_dmatrffboffs_r(void) 448{ 449 return 0x0000011cU; 450} 451static inline u32 falcon_falcon_imctl_debug_r(void) 452{ 453 return 0x0000015cU; 454} 455static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) 456{ 457 return (v & 0xffffffU) << 0U; 458} 459static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) 460{ 461 return (v & 0x7U) << 24U; 462} 463static inline u32 falcon_falcon_imstat_r(void) 464{ 465 return 0x00000144U; 466} 467static inline u32 falcon_falcon_traceidx_r(void) 468{ 469 return 0x00000148U; 470} 471static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) 472{ 473 return (r >> 16U) & 0xffU; 474} 475static inline u32 falcon_falcon_traceidx_idx_f(u32 v) 476{ 477 return (v & 0xffU) << 0U; 478} 479static inline u32 falcon_falcon_tracepc_r(void) 480{ 481 return 0x0000014cU; 482} 483static inline u32 falcon_falcon_tracepc_pc_v(u32 r) 484{ 485 return (r >> 0U) & 0xffffffU; 486} 487static inline u32 falcon_falcon_exterraddr_r(void) 488{ 489 return 0x00000168U; 490} 491static inline u32 falcon_falcon_exterrstat_r(void) 492{ 493 return 0x0000016cU; 494} 495static inline u32 falcon_falcon_exterrstat_valid_m(void) 496{ 497 return 0x1U << 31U; 498} 499static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) 500{ 501 return (r >> 31U) & 0x1U; 502} 503static inline u32 falcon_falcon_exterrstat_valid_true_v(void) 504{ 505 return 0x00000001U; 506} 507static inline u32 falcon_falcon_icd_cmd_r(void) 508{ 509 return 0x00000200U; 510} 511static inline u32 falcon_falcon_icd_cmd_opc_s(void) 512{ 513 return 4U; 514} 515static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) 516{ 517 return (v & 0xfU) << 0U; 518} 519static inline u32 falcon_falcon_icd_cmd_opc_m(void) 520{ 521 return 0xfU << 0U; 522} 523static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) 524{ 525 return (r >> 0U) & 0xfU; 526} 527static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) 528{ 529 return 0x8U; 530} 531static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) 532{ 533 return 0xeU; 534} 535static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) 536{ 537 return (v & 0x1fU) << 8U; 538} 539static inline u32 falcon_falcon_icd_rdata_r(void) 540{ 541 return 0x0000020cU; 542} 543static inline u32 falcon_falcon_dmemc_r(u32 i) 544{ 545 return 0x000001c0U + i*8U; 546} 547static inline u32 falcon_falcon_dmemc_offs_f(u32 v) 548{ 549 return (v & 0x3fU) << 2U; 550} 551static inline u32 falcon_falcon_dmemc_offs_m(void) 552{ 553 return 0x3fU << 2U; 554} 555static inline u32 falcon_falcon_dmemc_blk_f(u32 v) 556{ 557 return (v & 0xffU) << 8U; 558} 559static inline u32 falcon_falcon_dmemc_blk_m(void) 560{ 561 return 0xffU << 8U; 562} 563static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) 564{ 565 return (v & 0x1U) << 24U; 566} 567static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) 568{ 569 return (v & 0x1U) << 25U; 570} 571static inline u32 falcon_falcon_dmemd_r(u32 i) 572{ 573 return 0x000001c4U + i*8U; 574} 575static inline u32 falcon_falcon_debug1_r(void) 576{ 577 return 0x00000090U; 578} 579static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) 580{ 581 return 1U; 582} 583static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) 584{ 585 return (v & 0x1U) << 16U; 586} 587static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) 588{ 589 return 0x1U << 16U; 590} 591static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) 592{ 593 return (r >> 16U) & 0x1U; 594} 595static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 falcon_falcon_debuginfo_r(void) 600{ 601 return 0x00000094U; 602} 603#endif
diff --git a/include/nvgpu/hw/gv100/hw_fb_gv100.h b/include/nvgpu/hw/gv100/hw_fb_gv100.h
deleted file mode 100644
index ac248b5..0000000
--- a/include/nvgpu/hw/gv100/hw_fb_gv100.h
+++ /dev/null
@@ -1,1923 +0,0 @@ 1/* 2 * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fb_gv100_h_ 57#define _hw_fb_gv100_h_ 58 59static inline u32 fb_fbhub_num_active_ltcs_r(void) 60{ 61 return 0x00100800U; 62} 63static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_f(u32 v) 64{ 65 return (v & 0xffU) << 16U; 66} 67static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_m(void) 68{ 69 return 0xffU << 16U; 70} 71static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_v(u32 r) 72{ 73 return (r >> 16U) & 0xffU; 74} 75static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_f(u32 v, u32 i) 76{ 77 return (v & 0x1U) << (16U + i*1U); 78} 79static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_m(u32 i) 80{ 81 return 0x1U << (16U + i*1U); 82} 83static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_v(u32 r, u32 i) 84{ 85 return (r >> (16U + i*1U)) & 0x1U; 86} 87static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer___size_1_v(void) 88{ 89 return 0x00000008U; 90} 91static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer___size_1_f(u32 i) 92{ 93 return 0x0U << (32U + i*1U); 94} 95static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_enabled_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_enabled_f(u32 i) 100{ 101 return 0x1U << (32U + i*1U); 102} 103static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_disabled_v(void) 104{ 105 return 0x00000000U; 106} 107static inline u32 fb_fbhub_num_active_ltcs_use_nvlink_peer_disabled_f(u32 i) 108{ 109 return 0x0U << (32U + i*1U); 110} 111static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_f(u32 v) 112{ 113 return (v & 0x1U) << 25U; 114} 115static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_m(void) 116{ 117 return 0x1U << 25U; 118} 119static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_v(u32 r) 120{ 121 return (r >> 25U) & 0x1U; 122} 123static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_use_read_v(void) 124{ 125 return 0x00000000U; 126} 127static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_use_read_f(void) 128{ 129 return 0x0U; 130} 131static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_v(void) 132{ 133 return 0x00000001U; 134} 135static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_f(void) 136{ 137 return 0x2000000U; 138} 139static inline u32 fb_mmu_ctrl_r(void) 140{ 141 return 0x00100c80U; 142} 143static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) 144{ 145 return (r >> 15U) & 0x1U; 146} 147static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) 152{ 153 return (r >> 16U) & 0xffU; 154} 155static inline u32 fb_mmu_ctrl_atomic_capability_mode_f(u32 v) 156{ 157 return (v & 0x3U) << 24U; 158} 159static inline u32 fb_mmu_ctrl_atomic_capability_mode_m(void) 160{ 161 return 0x3U << 24U; 162} 163static inline u32 fb_mmu_ctrl_atomic_capability_mode_v(u32 r) 164{ 165 return (r >> 24U) & 0x3U; 166} 167static inline u32 fb_mmu_ctrl_atomic_capability_mode_l2_v(void) 168{ 169 return 0x00000000U; 170} 171static inline u32 fb_mmu_ctrl_atomic_capability_mode_l2_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 fb_mmu_ctrl_atomic_capability_mode_atomic_v(void) 176{ 177 return 0x00000001U; 178} 179static inline u32 fb_mmu_ctrl_atomic_capability_mode_atomic_f(void) 180{ 181 return 0x1000000U; 182} 183static inline u32 fb_mmu_ctrl_atomic_capability_mode_rmw_v(void) 184{ 185 return 0x00000002U; 186} 187static inline u32 fb_mmu_ctrl_atomic_capability_mode_rmw_f(void) 188{ 189 return 0x2000000U; 190} 191static inline u32 fb_mmu_ctrl_atomic_capability_mode_power_v(void) 192{ 193 return 0x00000003U; 194} 195static inline u32 fb_mmu_ctrl_atomic_capability_mode_power_f(void) 196{ 197 return 0x3000000U; 198} 199static inline u32 fb_hsmmu_pri_mmu_ctrl_r(void) 200{ 201 return 0x001fac80U; 202} 203static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_f(u32 v) 204{ 205 return (v & 0x3U) << 24U; 206} 207static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_m(void) 208{ 209 return 0x3U << 24U; 210} 211static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_v(u32 r) 212{ 213 return (r >> 24U) & 0x3U; 214} 215static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_l2_v(void) 216{ 217 return 0x00000000U; 218} 219static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_l2_f(void) 220{ 221 return 0x0U; 222} 223static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_atomic_v(void) 224{ 225 return 0x00000001U; 226} 227static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_atomic_f(void) 228{ 229 return 0x1000000U; 230} 231static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_rmw_v(void) 232{ 233 return 0x00000002U; 234} 235static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_rmw_f(void) 236{ 237 return 0x2000000U; 238} 239static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_power_v(void) 240{ 241 return 0x00000003U; 242} 243static inline u32 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_power_f(void) 244{ 245 return 0x3000000U; 246} 247static inline u32 fb_hsmmu_pri_mmu_debug_ctrl_r(void) 248{ 249 return 0x001facc4U; 250} 251static inline u32 fb_hsmmu_pri_mmu_debug_ctrl_debug_v(u32 r) 252{ 253 return (r >> 16U) & 0x1U; 254} 255static inline u32 fb_hsmmu_pri_mmu_debug_ctrl_debug_m(void) 256{ 257 return 0x1U << 16U; 258} 259static inline u32 fb_hsmmu_pri_mmu_debug_ctrl_debug_enabled_f(void) 260{ 261 return 0x10000U; 262} 263static inline u32 fb_hsmmu_pri_mmu_debug_ctrl_debug_disabled_f(void) 264{ 265 return 0x0U; 266} 267static inline u32 fb_hshub_num_active_ltcs_r(void) 268{ 269 return 0x001fbc20U; 270} 271static inline u32 fb_hshub_num_active_ltcs_use_nvlink_f(u32 v) 272{ 273 return (v & 0xffU) << 16U; 274} 275static inline u32 fb_hshub_num_active_ltcs_use_nvlink_m(void) 276{ 277 return 0xffU << 16U; 278} 279static inline u32 fb_hshub_num_active_ltcs_use_nvlink_v(u32 r) 280{ 281 return (r >> 16U) & 0xffU; 282} 283static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_f(u32 v, u32 i) 284{ 285 return (v & 0x1U) << (16U + i*1U); 286} 287static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_m(u32 i) 288{ 289 return 0x1U << (16U + i*1U); 290} 291static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_v(u32 r, u32 i) 292{ 293 return (r >> (16U + i*1U)) & 0x1U; 294} 295static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer___size_1_v(void) 296{ 297 return 0x00000008U; 298} 299static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer___size_1_f(u32 i) 300{ 301 return 0x0U << (32U + i*1U); 302} 303static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_enabled_v(void) 304{ 305 return 0x00000001U; 306} 307static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_enabled_f(u32 i) 308{ 309 return 0x1U << (32U + i*1U); 310} 311static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_disabled_v(void) 312{ 313 return 0x00000000U; 314} 315static inline u32 fb_hshub_num_active_ltcs_use_nvlink_peer_disabled_f(u32 i) 316{ 317 return 0x0U << (32U + i*1U); 318} 319static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_f(u32 v) 320{ 321 return (v & 0x1U) << 25U; 322} 323static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_m(void) 324{ 325 return 0x1U << 25U; 326} 327static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_v(u32 r) 328{ 329 return (r >> 25U) & 0x1U; 330} 331static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_read_v(void) 332{ 333 return 0x00000000U; 334} 335static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_read_f(void) 336{ 337 return 0x0U; 338} 339static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_v(void) 340{ 341 return 0x00000001U; 342} 343static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_f(void) 344{ 345 return 0x2000000U; 346} 347static inline u32 fb_priv_mmu_phy_secure_r(void) 348{ 349 return 0x00100ce4U; 350} 351static inline u32 fb_mmu_invalidate_pdb_r(void) 352{ 353 return 0x00100cb8U; 354} 355static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) 356{ 357 return 0x0U; 358} 359static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) 360{ 361 return 0x2U; 362} 363static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) 364{ 365 return (v & 0xfffffffU) << 4U; 366} 367static inline u32 fb_mmu_invalidate_r(void) 368{ 369 return 0x00100cbcU; 370} 371static inline u32 fb_mmu_invalidate_all_va_true_f(void) 372{ 373 return 0x1U; 374} 375static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) 376{ 377 return 0x2U; 378} 379static inline u32 fb_mmu_invalidate_hubtlb_only_s(void) 380{ 381 return 1U; 382} 383static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v) 384{ 385 return (v & 0x1U) << 2U; 386} 387static inline u32 fb_mmu_invalidate_hubtlb_only_m(void) 388{ 389 return 0x1U << 2U; 390} 391static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r) 392{ 393 return (r >> 2U) & 0x1U; 394} 395static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void) 396{ 397 return 0x4U; 398} 399static inline u32 fb_mmu_invalidate_replay_s(void) 400{ 401 return 3U; 402} 403static inline u32 fb_mmu_invalidate_replay_f(u32 v) 404{ 405 return (v & 0x7U) << 3U; 406} 407static inline u32 fb_mmu_invalidate_replay_m(void) 408{ 409 return 0x7U << 3U; 410} 411static inline u32 fb_mmu_invalidate_replay_v(u32 r) 412{ 413 return (r >> 3U) & 0x7U; 414} 415static inline u32 fb_mmu_invalidate_replay_none_f(void) 416{ 417 return 0x0U; 418} 419static inline u32 fb_mmu_invalidate_replay_start_f(void) 420{ 421 return 0x8U; 422} 423static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void) 424{ 425 return 0x10U; 426} 427static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void) 428{ 429 return 0x20U; 430} 431static inline u32 fb_mmu_invalidate_sys_membar_s(void) 432{ 433 return 1U; 434} 435static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v) 436{ 437 return (v & 0x1U) << 6U; 438} 439static inline u32 fb_mmu_invalidate_sys_membar_m(void) 440{ 441 return 0x1U << 6U; 442} 443static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r) 444{ 445 return (r >> 6U) & 0x1U; 446} 447static inline u32 fb_mmu_invalidate_sys_membar_true_f(void) 448{ 449 return 0x40U; 450} 451static inline u32 fb_mmu_invalidate_ack_s(void) 452{ 453 return 2U; 454} 455static inline u32 fb_mmu_invalidate_ack_f(u32 v) 456{ 457 return (v & 0x3U) << 7U; 458} 459static inline u32 fb_mmu_invalidate_ack_m(void) 460{ 461 return 0x3U << 7U; 462} 463static inline u32 fb_mmu_invalidate_ack_v(u32 r) 464{ 465 return (r >> 7U) & 0x3U; 466} 467static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void) 468{ 469 return 0x0U; 470} 471static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void) 472{ 473 return 0x100U; 474} 475static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void) 476{ 477 return 0x80U; 478} 479static inline u32 fb_mmu_invalidate_cancel_client_id_s(void) 480{ 481 return 6U; 482} 483static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v) 484{ 485 return (v & 0x3fU) << 9U; 486} 487static inline u32 fb_mmu_invalidate_cancel_client_id_m(void) 488{ 489 return 0x3fU << 9U; 490} 491static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r) 492{ 493 return (r >> 9U) & 0x3fU; 494} 495static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void) 496{ 497 return 5U; 498} 499static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v) 500{ 501 return (v & 0x1fU) << 15U; 502} 503static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void) 504{ 505 return 0x1fU << 15U; 506} 507static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r) 508{ 509 return (r >> 15U) & 0x1fU; 510} 511static inline u32 fb_mmu_invalidate_cancel_client_type_s(void) 512{ 513 return 1U; 514} 515static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v) 516{ 517 return (v & 0x1U) << 20U; 518} 519static inline u32 fb_mmu_invalidate_cancel_client_type_m(void) 520{ 521 return 0x1U << 20U; 522} 523static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r) 524{ 525 return (r >> 20U) & 0x1U; 526} 527static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void) 528{ 529 return 0x0U; 530} 531static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void) 532{ 533 return 0x100000U; 534} 535static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void) 536{ 537 return 3U; 538} 539static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v) 540{ 541 return (v & 0x7U) << 24U; 542} 543static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void) 544{ 545 return 0x7U << 24U; 546} 547static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r) 548{ 549 return (r >> 24U) & 0x7U; 550} 551static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void) 552{ 553 return 0x0U; 554} 555static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void) 556{ 557 return 0x1000000U; 558} 559static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void) 560{ 561 return 0x2000000U; 562} 563static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void) 564{ 565 return 0x3000000U; 566} 567static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void) 568{ 569 return 0x4000000U; 570} 571static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void) 572{ 573 return 0x5000000U; 574} 575static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void) 576{ 577 return 0x6000000U; 578} 579static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void) 580{ 581 return 0x7000000U; 582} 583static inline u32 fb_mmu_invalidate_trigger_s(void) 584{ 585 return 1U; 586} 587static inline u32 fb_mmu_invalidate_trigger_f(u32 v) 588{ 589 return (v & 0x1U) << 31U; 590} 591static inline u32 fb_mmu_invalidate_trigger_m(void) 592{ 593 return 0x1U << 31U; 594} 595static inline u32 fb_mmu_invalidate_trigger_v(u32 r) 596{ 597 return (r >> 31U) & 0x1U; 598} 599static inline u32 fb_mmu_invalidate_trigger_true_f(void) 600{ 601 return 0x80000000U; 602} 603static inline u32 fb_mmu_debug_wr_r(void) 604{ 605 return 0x00100cc8U; 606} 607static inline u32 fb_mmu_debug_wr_aperture_s(void) 608{ 609 return 2U; 610} 611static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) 612{ 613 return (v & 0x3U) << 0U; 614} 615static inline u32 fb_mmu_debug_wr_aperture_m(void) 616{ 617 return 0x3U << 0U; 618} 619static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) 620{ 621 return (r >> 0U) & 0x3U; 622} 623static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) 624{ 625 return 0x0U; 626} 627static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) 628{ 629 return 0x2U; 630} 631static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) 632{ 633 return 0x3U; 634} 635static inline u32 fb_mmu_debug_wr_vol_false_f(void) 636{ 637 return 0x0U; 638} 639static inline u32 fb_mmu_debug_wr_vol_true_v(void) 640{ 641 return 0x00000001U; 642} 643static inline u32 fb_mmu_debug_wr_vol_true_f(void) 644{ 645 return 0x4U; 646} 647static inline u32 fb_mmu_debug_wr_addr_f(u32 v) 648{ 649 return (v & 0xfffffffU) << 4U; 650} 651static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) 652{ 653 return 0x0000000cU; 654} 655static inline u32 fb_mmu_debug_rd_r(void) 656{ 657 return 0x00100cccU; 658} 659static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) 660{ 661 return 0x0U; 662} 663static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) 664{ 665 return 0x2U; 666} 667static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) 668{ 669 return 0x3U; 670} 671static inline u32 fb_mmu_debug_rd_vol_false_f(void) 672{ 673 return 0x0U; 674} 675static inline u32 fb_mmu_debug_rd_addr_f(u32 v) 676{ 677 return (v & 0xfffffffU) << 4U; 678} 679static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) 680{ 681 return 0x0000000cU; 682} 683static inline u32 fb_mmu_debug_ctrl_r(void) 684{ 685 return 0x00100cc4U; 686} 687static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) 688{ 689 return (r >> 16U) & 0x1U; 690} 691static inline u32 fb_mmu_debug_ctrl_debug_m(void) 692{ 693 return 0x1U << 16U; 694} 695static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) 696{ 697 return 0x00000001U; 698} 699static inline u32 fb_mmu_debug_ctrl_debug_enabled_f(void) 700{ 701 return 0x10000U; 702} 703static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) 704{ 705 return 0x00000000U; 706} 707static inline u32 fb_mmu_debug_ctrl_debug_disabled_f(void) 708{ 709 return 0x0U; 710} 711static inline u32 fb_niso_cfg1_r(void) 712{ 713 return 0x00100c14U; 714} 715static inline u32 fb_niso_cfg1_sysmem_nvlink_f(u32 v) 716{ 717 return (v & 0x1U) << 17U; 718} 719static inline u32 fb_niso_cfg1_sysmem_nvlink_m(void) 720{ 721 return 0x1U << 17U; 722} 723static inline u32 fb_niso_cfg1_sysmem_nvlink_v(u32 r) 724{ 725 return (r >> 17U) & 0x1U; 726} 727static inline u32 fb_niso_cfg1_sysmem_nvlink_enabled_v(void) 728{ 729 return 0x00000001U; 730} 731static inline u32 fb_niso_cfg1_sysmem_nvlink_enabled_f(void) 732{ 733 return 0x20000U; 734} 735static inline u32 fb_niso_flush_sysmem_addr_r(void) 736{ 737 return 0x00100c10U; 738} 739static inline u32 fb_niso_intr_r(void) 740{ 741 return 0x00100a20U; 742} 743static inline u32 fb_niso_intr_hub_access_counter_notify_m(void) 744{ 745 return 0x1U << 0U; 746} 747static inline u32 fb_niso_intr_hub_access_counter_notify_pending_f(void) 748{ 749 return 0x1U; 750} 751static inline u32 fb_niso_intr_hub_access_counter_error_m(void) 752{ 753 return 0x1U << 1U; 754} 755static inline u32 fb_niso_intr_hub_access_counter_error_pending_f(void) 756{ 757 return 0x2U; 758} 759static inline u32 fb_niso_intr_mmu_replayable_fault_notify_m(void) 760{ 761 return 0x1U << 27U; 762} 763static inline u32 fb_niso_intr_mmu_replayable_fault_notify_pending_f(void) 764{ 765 return 0x8000000U; 766} 767static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_m(void) 768{ 769 return 0x1U << 28U; 770} 771static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_pending_f(void) 772{ 773 return 0x10000000U; 774} 775static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_m(void) 776{ 777 return 0x1U << 29U; 778} 779static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f(void) 780{ 781 return 0x20000000U; 782} 783static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_m(void) 784{ 785 return 0x1U << 30U; 786} 787static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f(void) 788{ 789 return 0x40000000U; 790} 791static inline u32 fb_niso_intr_mmu_other_fault_notify_m(void) 792{ 793 return 0x1U << 31U; 794} 795static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void) 796{ 797 return 0x80000000U; 798} 799static inline u32 fb_niso_intr_en_r(u32 i) 800{ 801 return 0x00100a24U + i*4U; 802} 803static inline u32 fb_niso_intr_en__size_1_v(void) 804{ 805 return 0x00000002U; 806} 807static inline u32 fb_niso_intr_en_hub_access_counter_notify_f(u32 v) 808{ 809 return (v & 0x1U) << 0U; 810} 811static inline u32 fb_niso_intr_en_hub_access_counter_notify_enabled_f(void) 812{ 813 return 0x1U; 814} 815static inline u32 fb_niso_intr_en_hub_access_counter_error_f(u32 v) 816{ 817 return (v & 0x1U) << 1U; 818} 819static inline u32 fb_niso_intr_en_hub_access_counter_error_enabled_f(void) 820{ 821 return 0x2U; 822} 823static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_f(u32 v) 824{ 825 return (v & 0x1U) << 27U; 826} 827static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_enabled_f(void) 828{ 829 return 0x8000000U; 830} 831static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_f(u32 v) 832{ 833 return (v & 0x1U) << 28U; 834} 835static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_enabled_f(void) 836{ 837 return 0x10000000U; 838} 839static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_f(u32 v) 840{ 841 return (v & 0x1U) << 29U; 842} 843static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_enabled_f(void) 844{ 845 return 0x20000000U; 846} 847static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_f(u32 v) 848{ 849 return (v & 0x1U) << 30U; 850} 851static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_enabled_f(void) 852{ 853 return 0x40000000U; 854} 855static inline u32 fb_niso_intr_en_mmu_other_fault_notify_f(u32 v) 856{ 857 return (v & 0x1U) << 31U; 858} 859static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void) 860{ 861 return 0x80000000U; 862} 863static inline u32 fb_niso_intr_en_set_r(u32 i) 864{ 865 return 0x00100a2cU + i*4U; 866} 867static inline u32 fb_niso_intr_en_set__size_1_v(void) 868{ 869 return 0x00000002U; 870} 871static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_m(void) 872{ 873 return 0x1U << 0U; 874} 875static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_set_f(void) 876{ 877 return 0x1U; 878} 879static inline u32 fb_niso_intr_en_set_hub_access_counter_error_m(void) 880{ 881 return 0x1U << 1U; 882} 883static inline u32 fb_niso_intr_en_set_hub_access_counter_error_set_f(void) 884{ 885 return 0x2U; 886} 887static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_m(void) 888{ 889 return 0x1U << 27U; 890} 891static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_set_f(void) 892{ 893 return 0x8000000U; 894} 895static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(void) 896{ 897 return 0x1U << 28U; 898} 899static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_set_f(void) 900{ 901 return 0x10000000U; 902} 903static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m(void) 904{ 905 return 0x1U << 29U; 906} 907static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_set_f(void) 908{ 909 return 0x20000000U; 910} 911static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(void) 912{ 913 return 0x1U << 30U; 914} 915static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_set_f(void) 916{ 917 return 0x40000000U; 918} 919static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_m(void) 920{ 921 return 0x1U << 31U; 922} 923static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void) 924{ 925 return 0x80000000U; 926} 927static inline u32 fb_niso_intr_en_clr_r(u32 i) 928{ 929 return 0x00100a34U + i*4U; 930} 931static inline u32 fb_niso_intr_en_clr__size_1_v(void) 932{ 933 return 0x00000002U; 934} 935static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_m(void) 936{ 937 return 0x1U << 0U; 938} 939static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_set_f(void) 940{ 941 return 0x1U; 942} 943static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_m(void) 944{ 945 return 0x1U << 1U; 946} 947static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_set_f(void) 948{ 949 return 0x2U; 950} 951static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m(void) 952{ 953 return 0x1U << 27U; 954} 955static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_set_f(void) 956{ 957 return 0x8000000U; 958} 959static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(void) 960{ 961 return 0x1U << 28U; 962} 963static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_set_f(void) 964{ 965 return 0x10000000U; 966} 967static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m(void) 968{ 969 return 0x1U << 29U; 970} 971static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_set_f(void) 972{ 973 return 0x20000000U; 974} 975static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(void) 976{ 977 return 0x1U << 30U; 978} 979static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_set_f(void) 980{ 981 return 0x40000000U; 982} 983static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_m(void) 984{ 985 return 0x1U << 31U; 986} 987static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_set_f(void) 988{ 989 return 0x80000000U; 990} 991static inline u32 fb_niso_intr_en_clr_mmu_non_replay_fault_buffer_v(void) 992{ 993 return 0x00000000U; 994} 995static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void) 996{ 997 return 0x00000001U; 998} 999static inline u32 fb_mmu_fault_buffer_lo_r(u32 i) 1000{ 1001 return 0x00100e24U + i*20U; 1002} 1003static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void) 1004{ 1005 return 0x00000002U; 1006} 1007static inline u32 fb_mmu_fault_buffer_lo_addr_mode_f(u32 v) 1008{ 1009 return (v & 0x1U) << 0U; 1010} 1011static inline u32 fb_mmu_fault_buffer_lo_addr_mode_v(u32 r) 1012{ 1013 return (r >> 0U) & 0x1U; 1014} 1015static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_v(void) 1016{ 1017 return 0x00000000U; 1018} 1019static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_f(void) 1020{ 1021 return 0x0U; 1022} 1023static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_v(void) 1024{ 1025 return 0x00000001U; 1026} 1027static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_f(void) 1028{ 1029 return 0x1U; 1030} 1031static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_f(u32 v) 1032{ 1033 return (v & 0x3U) << 1U; 1034} 1035static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_v(u32 r) 1036{ 1037 return (r >> 1U) & 0x3U; 1038} 1039static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_v(void) 1040{ 1041 return 0x00000002U; 1042} 1043static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_f(void) 1044{ 1045 return 0x4U; 1046} 1047static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_v(void) 1048{ 1049 return 0x00000003U; 1050} 1051static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_f(void) 1052{ 1053 return 0x6U; 1054} 1055static inline u32 fb_mmu_fault_buffer_lo_phys_vol_f(u32 v) 1056{ 1057 return (v & 0x1U) << 3U; 1058} 1059static inline u32 fb_mmu_fault_buffer_lo_phys_vol_v(u32 r) 1060{ 1061 return (r >> 3U) & 0x1U; 1062} 1063static inline u32 fb_mmu_fault_buffer_lo_addr_f(u32 v) 1064{ 1065 return (v & 0xfffffU) << 12U; 1066} 1067static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r) 1068{ 1069 return (r >> 12U) & 0xfffffU; 1070} 1071static inline u32 fb_mmu_fault_buffer_hi_r(u32 i) 1072{ 1073 return 0x00100e28U + i*20U; 1074} 1075static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void) 1076{ 1077 return 0x00000002U; 1078} 1079static inline u32 fb_mmu_fault_buffer_hi_addr_f(u32 v) 1080{ 1081 return (v & 0xffffffffU) << 0U; 1082} 1083static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r) 1084{ 1085 return (r >> 0U) & 0xffffffffU; 1086} 1087static inline u32 fb_mmu_fault_buffer_get_r(u32 i) 1088{ 1089 return 0x00100e2cU + i*20U; 1090} 1091static inline u32 fb_mmu_fault_buffer_get__size_1_v(void) 1092{ 1093 return 0x00000002U; 1094} 1095static inline u32 fb_mmu_fault_buffer_get_ptr_f(u32 v) 1096{ 1097 return (v & 0xfffffU) << 0U; 1098} 1099static inline u32 fb_mmu_fault_buffer_get_ptr_m(void) 1100{ 1101 return 0xfffffU << 0U; 1102} 1103static inline u32 fb_mmu_fault_buffer_get_ptr_v(u32 r) 1104{ 1105 return (r >> 0U) & 0xfffffU; 1106} 1107static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_f(u32 v) 1108{ 1109 return (v & 0x1U) << 30U; 1110} 1111static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_m(void) 1112{ 1113 return 0x1U << 30U; 1114} 1115static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_v(void) 1116{ 1117 return 0x00000001U; 1118} 1119static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f(void) 1120{ 1121 return 0x40000000U; 1122} 1123static inline u32 fb_mmu_fault_buffer_get_overflow_f(u32 v) 1124{ 1125 return (v & 0x1U) << 31U; 1126} 1127static inline u32 fb_mmu_fault_buffer_get_overflow_m(void) 1128{ 1129 return 0x1U << 31U; 1130} 1131static inline u32 fb_mmu_fault_buffer_get_overflow_clear_v(void) 1132{ 1133 return 0x00000001U; 1134} 1135static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void) 1136{ 1137 return 0x80000000U; 1138} 1139static inline u32 fb_mmu_fault_buffer_put_r(u32 i) 1140{ 1141 return 0x00100e30U + i*20U; 1142} 1143static inline u32 fb_mmu_fault_buffer_put__size_1_v(void) 1144{ 1145 return 0x00000002U; 1146} 1147static inline u32 fb_mmu_fault_buffer_put_ptr_f(u32 v) 1148{ 1149 return (v & 0xfffffU) << 0U; 1150} 1151static inline u32 fb_mmu_fault_buffer_put_ptr_v(u32 r) 1152{ 1153 return (r >> 0U) & 0xfffffU; 1154} 1155static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_f(u32 v) 1156{ 1157 return (v & 0x1U) << 30U; 1158} 1159static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_v(u32 r) 1160{ 1161 return (r >> 30U) & 0x1U; 1162} 1163static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_v(void) 1164{ 1165 return 0x00000001U; 1166} 1167static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_f(void) 1168{ 1169 return 0x40000000U; 1170} 1171static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_v(void) 1172{ 1173 return 0x00000000U; 1174} 1175static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_f(void) 1176{ 1177 return 0x0U; 1178} 1179static inline u32 fb_mmu_fault_buffer_put_overflow_f(u32 v) 1180{ 1181 return (v & 0x1U) << 31U; 1182} 1183static inline u32 fb_mmu_fault_buffer_put_overflow_v(u32 r) 1184{ 1185 return (r >> 31U) & 0x1U; 1186} 1187static inline u32 fb_mmu_fault_buffer_put_overflow_yes_v(void) 1188{ 1189 return 0x00000001U; 1190} 1191static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void) 1192{ 1193 return 0x80000000U; 1194} 1195static inline u32 fb_mmu_fault_buffer_size_r(u32 i) 1196{ 1197 return 0x00100e34U + i*20U; 1198} 1199static inline u32 fb_mmu_fault_buffer_size__size_1_v(void) 1200{ 1201 return 0x00000002U; 1202} 1203static inline u32 fb_mmu_fault_buffer_size_val_f(u32 v) 1204{ 1205 return (v & 0xfffffU) << 0U; 1206} 1207static inline u32 fb_mmu_fault_buffer_size_val_v(u32 r) 1208{ 1209 return (r >> 0U) & 0xfffffU; 1210} 1211static inline u32 fb_mmu_fault_buffer_size_overflow_intr_f(u32 v) 1212{ 1213 return (v & 0x1U) << 29U; 1214} 1215static inline u32 fb_mmu_fault_buffer_size_overflow_intr_v(u32 r) 1216{ 1217 return (r >> 29U) & 0x1U; 1218} 1219static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_v(void) 1220{ 1221 return 0x00000001U; 1222} 1223static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_f(void) 1224{ 1225 return 0x20000000U; 1226} 1227static inline u32 fb_mmu_fault_buffer_size_set_default_f(u32 v) 1228{ 1229 return (v & 0x1U) << 30U; 1230} 1231static inline u32 fb_mmu_fault_buffer_size_set_default_v(u32 r) 1232{ 1233 return (r >> 30U) & 0x1U; 1234} 1235static inline u32 fb_mmu_fault_buffer_size_set_default_yes_v(void) 1236{ 1237 return 0x00000001U; 1238} 1239static inline u32 fb_mmu_fault_buffer_size_set_default_yes_f(void) 1240{ 1241 return 0x40000000U; 1242} 1243static inline u32 fb_mmu_fault_buffer_size_enable_f(u32 v) 1244{ 1245 return (v & 0x1U) << 31U; 1246} 1247static inline u32 fb_mmu_fault_buffer_size_enable_m(void) 1248{ 1249 return 0x1U << 31U; 1250} 1251static inline u32 fb_mmu_fault_buffer_size_enable_v(u32 r) 1252{ 1253 return (r >> 31U) & 0x1U; 1254} 1255static inline u32 fb_mmu_fault_buffer_size_enable_true_v(void) 1256{ 1257 return 0x00000001U; 1258} 1259static inline u32 fb_mmu_fault_buffer_size_enable_true_f(void) 1260{ 1261 return 0x80000000U; 1262} 1263static inline u32 fb_mmu_fault_addr_lo_r(void) 1264{ 1265 return 0x00100e4cU; 1266} 1267static inline u32 fb_mmu_fault_addr_lo_phys_aperture_f(u32 v) 1268{ 1269 return (v & 0x3U) << 0U; 1270} 1271static inline u32 fb_mmu_fault_addr_lo_phys_aperture_v(u32 r) 1272{ 1273 return (r >> 0U) & 0x3U; 1274} 1275static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_v(void) 1276{ 1277 return 0x00000002U; 1278} 1279static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_f(void) 1280{ 1281 return 0x2U; 1282} 1283static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_v(void) 1284{ 1285 return 0x00000003U; 1286} 1287static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_f(void) 1288{ 1289 return 0x3U; 1290} 1291static inline u32 fb_mmu_fault_addr_lo_addr_f(u32 v) 1292{ 1293 return (v & 0xfffffU) << 12U; 1294} 1295static inline u32 fb_mmu_fault_addr_lo_addr_v(u32 r) 1296{ 1297 return (r >> 12U) & 0xfffffU; 1298} 1299static inline u32 fb_mmu_fault_addr_hi_r(void) 1300{ 1301 return 0x00100e50U; 1302} 1303static inline u32 fb_mmu_fault_addr_hi_addr_f(u32 v) 1304{ 1305 return (v & 0xffffffffU) << 0U; 1306} 1307static inline u32 fb_mmu_fault_addr_hi_addr_v(u32 r) 1308{ 1309 return (r >> 0U) & 0xffffffffU; 1310} 1311static inline u32 fb_mmu_fault_inst_lo_r(void) 1312{ 1313 return 0x00100e54U; 1314} 1315static inline u32 fb_mmu_fault_inst_lo_engine_id_v(u32 r) 1316{ 1317 return (r >> 0U) & 0x1ffU; 1318} 1319static inline u32 fb_mmu_fault_inst_lo_aperture_v(u32 r) 1320{ 1321 return (r >> 10U) & 0x3U; 1322} 1323static inline u32 fb_mmu_fault_inst_lo_aperture_sys_coh_v(void) 1324{ 1325 return 0x00000002U; 1326} 1327static inline u32 fb_mmu_fault_inst_lo_aperture_sys_nocoh_v(void) 1328{ 1329 return 0x00000003U; 1330} 1331static inline u32 fb_mmu_fault_inst_lo_addr_f(u32 v) 1332{ 1333 return (v & 0xfffffU) << 12U; 1334} 1335static inline u32 fb_mmu_fault_inst_lo_addr_v(u32 r) 1336{ 1337 return (r >> 12U) & 0xfffffU; 1338} 1339static inline u32 fb_mmu_fault_inst_hi_r(void) 1340{ 1341 return 0x00100e58U; 1342} 1343static inline u32 fb_mmu_fault_inst_hi_addr_v(u32 r) 1344{ 1345 return (r >> 0U) & 0xffffffffU; 1346} 1347static inline u32 fb_mmu_fault_info_r(void) 1348{ 1349 return 0x00100e5cU; 1350} 1351static inline u32 fb_mmu_fault_info_fault_type_v(u32 r) 1352{ 1353 return (r >> 0U) & 0x1fU; 1354} 1355static inline u32 fb_mmu_fault_info_replayable_fault_v(u32 r) 1356{ 1357 return (r >> 7U) & 0x1U; 1358} 1359static inline u32 fb_mmu_fault_info_client_v(u32 r) 1360{ 1361 return (r >> 8U) & 0x7fU; 1362} 1363static inline u32 fb_mmu_fault_info_access_type_v(u32 r) 1364{ 1365 return (r >> 16U) & 0xfU; 1366} 1367static inline u32 fb_mmu_fault_info_client_type_v(u32 r) 1368{ 1369 return (r >> 20U) & 0x1U; 1370} 1371static inline u32 fb_mmu_fault_info_gpc_id_v(u32 r) 1372{ 1373 return (r >> 24U) & 0x1fU; 1374} 1375static inline u32 fb_mmu_fault_info_protected_mode_v(u32 r) 1376{ 1377 return (r >> 29U) & 0x1U; 1378} 1379static inline u32 fb_mmu_fault_info_replayable_fault_en_v(u32 r) 1380{ 1381 return (r >> 30U) & 0x1U; 1382} 1383static inline u32 fb_mmu_fault_info_valid_v(u32 r) 1384{ 1385 return (r >> 31U) & 0x1U; 1386} 1387static inline u32 fb_mmu_fault_status_r(void) 1388{ 1389 return 0x00100e60U; 1390} 1391static inline u32 fb_mmu_fault_status_dropped_bar1_phys_m(void) 1392{ 1393 return 0x1U << 0U; 1394} 1395static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_v(void) 1396{ 1397 return 0x00000001U; 1398} 1399static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_f(void) 1400{ 1401 return 0x1U; 1402} 1403static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_v(void) 1404{ 1405 return 0x00000001U; 1406} 1407static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_f(void) 1408{ 1409 return 0x1U; 1410} 1411static inline u32 fb_mmu_fault_status_dropped_bar1_virt_m(void) 1412{ 1413 return 0x1U << 1U; 1414} 1415static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_v(void) 1416{ 1417 return 0x00000001U; 1418} 1419static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_f(void) 1420{ 1421 return 0x2U; 1422} 1423static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_v(void) 1424{ 1425 return 0x00000001U; 1426} 1427static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_f(void) 1428{ 1429 return 0x2U; 1430} 1431static inline u32 fb_mmu_fault_status_dropped_bar2_phys_m(void) 1432{ 1433 return 0x1U << 2U; 1434} 1435static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_v(void) 1436{ 1437 return 0x00000001U; 1438} 1439static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_f(void) 1440{ 1441 return 0x4U; 1442} 1443static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_v(void) 1444{ 1445 return 0x00000001U; 1446} 1447static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_f(void) 1448{ 1449 return 0x4U; 1450} 1451static inline u32 fb_mmu_fault_status_dropped_bar2_virt_m(void) 1452{ 1453 return 0x1U << 3U; 1454} 1455static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_v(void) 1456{ 1457 return 0x00000001U; 1458} 1459static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_f(void) 1460{ 1461 return 0x8U; 1462} 1463static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_v(void) 1464{ 1465 return 0x00000001U; 1466} 1467static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_f(void) 1468{ 1469 return 0x8U; 1470} 1471static inline u32 fb_mmu_fault_status_dropped_ifb_phys_m(void) 1472{ 1473 return 0x1U << 4U; 1474} 1475static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_v(void) 1476{ 1477 return 0x00000001U; 1478} 1479static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_f(void) 1480{ 1481 return 0x10U; 1482} 1483static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_v(void) 1484{ 1485 return 0x00000001U; 1486} 1487static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_f(void) 1488{ 1489 return 0x10U; 1490} 1491static inline u32 fb_mmu_fault_status_dropped_ifb_virt_m(void) 1492{ 1493 return 0x1U << 5U; 1494} 1495static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_v(void) 1496{ 1497 return 0x00000001U; 1498} 1499static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_f(void) 1500{ 1501 return 0x20U; 1502} 1503static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_v(void) 1504{ 1505 return 0x00000001U; 1506} 1507static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_f(void) 1508{ 1509 return 0x20U; 1510} 1511static inline u32 fb_mmu_fault_status_dropped_other_phys_m(void) 1512{ 1513 return 0x1U << 6U; 1514} 1515static inline u32 fb_mmu_fault_status_dropped_other_phys_set_v(void) 1516{ 1517 return 0x00000001U; 1518} 1519static inline u32 fb_mmu_fault_status_dropped_other_phys_set_f(void) 1520{ 1521 return 0x40U; 1522} 1523static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_v(void) 1524{ 1525 return 0x00000001U; 1526} 1527static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_f(void) 1528{ 1529 return 0x40U; 1530} 1531static inline u32 fb_mmu_fault_status_dropped_other_virt_m(void) 1532{ 1533 return 0x1U << 7U; 1534} 1535static inline u32 fb_mmu_fault_status_dropped_other_virt_set_v(void) 1536{ 1537 return 0x00000001U; 1538} 1539static inline u32 fb_mmu_fault_status_dropped_other_virt_set_f(void) 1540{ 1541 return 0x80U; 1542} 1543static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_v(void) 1544{ 1545 return 0x00000001U; 1546} 1547static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_f(void) 1548{ 1549 return 0x80U; 1550} 1551static inline u32 fb_mmu_fault_status_replayable_m(void) 1552{ 1553 return 0x1U << 8U; 1554} 1555static inline u32 fb_mmu_fault_status_replayable_set_v(void) 1556{ 1557 return 0x00000001U; 1558} 1559static inline u32 fb_mmu_fault_status_replayable_set_f(void) 1560{ 1561 return 0x100U; 1562} 1563static inline u32 fb_mmu_fault_status_replayable_reset_f(void) 1564{ 1565 return 0x0U; 1566} 1567static inline u32 fb_mmu_fault_status_non_replayable_m(void) 1568{ 1569 return 0x1U << 9U; 1570} 1571static inline u32 fb_mmu_fault_status_non_replayable_set_v(void) 1572{ 1573 return 0x00000001U; 1574} 1575static inline u32 fb_mmu_fault_status_non_replayable_set_f(void) 1576{ 1577 return 0x200U; 1578} 1579static inline u32 fb_mmu_fault_status_non_replayable_reset_f(void) 1580{ 1581 return 0x0U; 1582} 1583static inline u32 fb_mmu_fault_status_replayable_error_m(void) 1584{ 1585 return 0x1U << 10U; 1586} 1587static inline u32 fb_mmu_fault_status_replayable_error_set_v(void) 1588{ 1589 return 0x00000001U; 1590} 1591static inline u32 fb_mmu_fault_status_replayable_error_set_f(void) 1592{ 1593 return 0x400U; 1594} 1595static inline u32 fb_mmu_fault_status_replayable_error_reset_f(void) 1596{ 1597 return 0x0U; 1598} 1599static inline u32 fb_mmu_fault_status_non_replayable_error_m(void) 1600{ 1601 return 0x1U << 11U; 1602} 1603static inline u32 fb_mmu_fault_status_non_replayable_error_set_v(void) 1604{ 1605 return 0x00000001U; 1606} 1607static inline u32 fb_mmu_fault_status_non_replayable_error_set_f(void) 1608{ 1609 return 0x800U; 1610} 1611static inline u32 fb_mmu_fault_status_non_replayable_error_reset_f(void) 1612{ 1613 return 0x0U; 1614} 1615static inline u32 fb_mmu_fault_status_replayable_overflow_m(void) 1616{ 1617 return 0x1U << 12U; 1618} 1619static inline u32 fb_mmu_fault_status_replayable_overflow_set_v(void) 1620{ 1621 return 0x00000001U; 1622} 1623static inline u32 fb_mmu_fault_status_replayable_overflow_set_f(void) 1624{ 1625 return 0x1000U; 1626} 1627static inline u32 fb_mmu_fault_status_replayable_overflow_reset_f(void) 1628{ 1629 return 0x0U; 1630} 1631static inline u32 fb_mmu_fault_status_non_replayable_overflow_m(void) 1632{ 1633 return 0x1U << 13U; 1634} 1635static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_v(void) 1636{ 1637 return 0x00000001U; 1638} 1639static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_f(void) 1640{ 1641 return 0x2000U; 1642} 1643static inline u32 fb_mmu_fault_status_non_replayable_overflow_reset_f(void) 1644{ 1645 return 0x0U; 1646} 1647static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_m(void) 1648{ 1649 return 0x1U << 14U; 1650} 1651static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_v(void) 1652{ 1653 return 0x00000001U; 1654} 1655static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_f(void) 1656{ 1657 return 0x4000U; 1658} 1659static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_m(void) 1660{ 1661 return 0x1U << 15U; 1662} 1663static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_v(void) 1664{ 1665 return 0x00000001U; 1666} 1667static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_f(void) 1668{ 1669 return 0x8000U; 1670} 1671static inline u32 fb_mmu_fault_status_busy_m(void) 1672{ 1673 return 0x1U << 30U; 1674} 1675static inline u32 fb_mmu_fault_status_busy_true_v(void) 1676{ 1677 return 0x00000001U; 1678} 1679static inline u32 fb_mmu_fault_status_busy_true_f(void) 1680{ 1681 return 0x40000000U; 1682} 1683static inline u32 fb_mmu_fault_status_valid_m(void) 1684{ 1685 return 0x1U << 31U; 1686} 1687static inline u32 fb_mmu_fault_status_valid_set_v(void) 1688{ 1689 return 0x00000001U; 1690} 1691static inline u32 fb_mmu_fault_status_valid_set_f(void) 1692{ 1693 return 0x80000000U; 1694} 1695static inline u32 fb_mmu_fault_status_valid_clear_v(void) 1696{ 1697 return 0x00000001U; 1698} 1699static inline u32 fb_mmu_fault_status_valid_clear_f(void) 1700{ 1701 return 0x80000000U; 1702} 1703static inline u32 fb_mmu_local_memory_range_r(void) 1704{ 1705 return 0x00100ce0U; 1706} 1707static inline u32 fb_mmu_local_memory_range_lower_scale_v(u32 r) 1708{ 1709 return (r >> 0U) & 0xfU; 1710} 1711static inline u32 fb_mmu_local_memory_range_lower_mag_v(u32 r) 1712{ 1713 return (r >> 4U) & 0x3fU; 1714} 1715static inline u32 fb_mmu_local_memory_range_ecc_mode_v(u32 r) 1716{ 1717 return (r >> 30U) & 0x1U; 1718} 1719static inline u32 fb_niso_scrub_status_r(void) 1720{ 1721 return 0x00100b20U; 1722} 1723static inline u32 fb_niso_scrub_status_flag_v(u32 r) 1724{ 1725 return (r >> 0U) & 0x1U; 1726} 1727static inline u32 fb_mmu_priv_level_mask_r(void) 1728{ 1729 return 0x00100cdcU; 1730} 1731static inline u32 fb_mmu_priv_level_mask_write_violation_f(u32 v) 1732{ 1733 return (v & 0x1U) << 7U; 1734} 1735static inline u32 fb_mmu_priv_level_mask_write_violation_m(void) 1736{ 1737 return 0x1U << 7U; 1738} 1739static inline u32 fb_mmu_priv_level_mask_write_violation_v(u32 r) 1740{ 1741 return (r >> 7U) & 0x1U; 1742} 1743static inline u32 fb_hshub_config0_r(void) 1744{ 1745 return 0x001fbc00U; 1746} 1747static inline u32 fb_hshub_config0_sysmem_nvlink_mask_f(u32 v) 1748{ 1749 return (v & 0xffffU) << 0U; 1750} 1751static inline u32 fb_hshub_config0_sysmem_nvlink_mask_m(void) 1752{ 1753 return 0xffffU << 0U; 1754} 1755static inline u32 fb_hshub_config0_sysmem_nvlink_mask_v(u32 r) 1756{ 1757 return (r >> 0U) & 0xffffU; 1758} 1759static inline u32 fb_hshub_config0_peer_pcie_mask_f(u32 v) 1760{ 1761 return (v & 0xffffU) << 16U; 1762} 1763static inline u32 fb_hshub_config0_peer_pcie_mask_v(u32 r) 1764{ 1765 return (r >> 16U) & 0xffffU; 1766} 1767static inline u32 fb_hshub_config1_r(void) 1768{ 1769 return 0x001fbc04U; 1770} 1771static inline u32 fb_hshub_config1_peer_0_nvlink_mask_f(u32 v) 1772{ 1773 return (v & 0xffU) << 0U; 1774} 1775static inline u32 fb_hshub_config1_peer_0_nvlink_mask_v(u32 r) 1776{ 1777 return (r >> 0U) & 0xffU; 1778} 1779static inline u32 fb_hshub_config1_peer_1_nvlink_mask_f(u32 v) 1780{ 1781 return (v & 0xffU) << 8U; 1782} 1783static inline u32 fb_hshub_config1_peer_1_nvlink_mask_v(u32 r) 1784{ 1785 return (r >> 8U) & 0xffU; 1786} 1787static inline u32 fb_hshub_config1_peer_2_nvlink_mask_f(u32 v) 1788{ 1789 return (v & 0xffU) << 16U; 1790} 1791static inline u32 fb_hshub_config1_peer_2_nvlink_mask_v(u32 r) 1792{ 1793 return (r >> 16U) & 0xffU; 1794} 1795static inline u32 fb_hshub_config1_peer_3_nvlink_mask_f(u32 v) 1796{ 1797 return (v & 0xffU) << 24U; 1798} 1799static inline u32 fb_hshub_config1_peer_3_nvlink_mask_v(u32 r) 1800{ 1801 return (r >> 24U) & 0xffU; 1802} 1803static inline u32 fb_hshub_config2_r(void) 1804{ 1805 return 0x001fbc08U; 1806} 1807static inline u32 fb_hshub_config2_peer_4_nvlink_mask_f(u32 v) 1808{ 1809 return (v & 0xffU) << 0U; 1810} 1811static inline u32 fb_hshub_config2_peer_4_nvlink_mask_v(u32 r) 1812{ 1813 return (r >> 0U) & 0xffU; 1814} 1815static inline u32 fb_hshub_config2_peer_5_nvlink_mask_f(u32 v) 1816{ 1817 return (v & 0xffU) << 8U; 1818} 1819static inline u32 fb_hshub_config2_peer_5_nvlink_mask_v(u32 r) 1820{ 1821 return (r >> 8U) & 0xffU; 1822} 1823static inline u32 fb_hshub_config2_peer_6_nvlink_mask_f(u32 v) 1824{ 1825 return (v & 0xffU) << 16U; 1826} 1827static inline u32 fb_hshub_config2_peer_6_nvlink_mask_v(u32 r) 1828{ 1829 return (r >> 16U) & 0xffU; 1830} 1831static inline u32 fb_hshub_config2_peer_7_nvlink_mask_f(u32 v) 1832{ 1833 return (v & 0xffU) << 24U; 1834} 1835static inline u32 fb_hshub_config2_peer_7_nvlink_mask_v(u32 r) 1836{ 1837 return (r >> 24U) & 0xffU; 1838} 1839static inline u32 fb_hshub_config6_r(void) 1840{ 1841 return 0x001fbc18U; 1842} 1843static inline u32 fb_hshub_config7_r(void) 1844{ 1845 return 0x001fbc1cU; 1846} 1847static inline u32 fb_hshub_config7_nvlink_logical_0_physical_portmap_f(u32 v) 1848{ 1849 return (v & 0xfU) << 0U; 1850} 1851static inline u32 fb_hshub_config7_nvlink_logical_0_physical_portmap_v(u32 r) 1852{ 1853 return (r >> 0U) & 0xfU; 1854} 1855static inline u32 fb_hshub_config7_nvlink_logical_1_physical_portmap_f(u32 v) 1856{ 1857 return (v & 0xfU) << 4U; 1858} 1859static inline u32 fb_hshub_config7_nvlink_logical_1_physical_portmap_v(u32 r) 1860{ 1861 return (r >> 4U) & 0xfU; 1862} 1863static inline u32 fb_hshub_config7_nvlink_logical_2_physical_portmap_f(u32 v) 1864{ 1865 return (v & 0xfU) << 8U; 1866} 1867static inline u32 fb_hshub_config7_nvlink_logical_2_physical_portmap_v(u32 r) 1868{ 1869 return (r >> 8U) & 0xfU; 1870} 1871static inline u32 fb_hshub_config7_nvlink_logical_3_physical_portmap_f(u32 v) 1872{ 1873 return (v & 0xfU) << 12U; 1874} 1875static inline u32 fb_hshub_config7_nvlink_logical_3_physical_portmap_v(u32 r) 1876{ 1877 return (r >> 12U) & 0xfU; 1878} 1879static inline u32 fb_hshub_config7_nvlink_logical_4_physical_portmap_f(u32 v) 1880{ 1881 return (v & 0xfU) << 16U; 1882} 1883static inline u32 fb_hshub_config7_nvlink_logical_4_physical_portmap_v(u32 r) 1884{ 1885 return (r >> 16U) & 0xfU; 1886} 1887static inline u32 fb_hshub_config7_nvlink_logical_5_physical_portmap_f(u32 v) 1888{ 1889 return (v & 0xfU) << 20U; 1890} 1891static inline u32 fb_hshub_config7_nvlink_logical_5_physical_portmap_v(u32 r) 1892{ 1893 return (r >> 20U) & 0xfU; 1894} 1895static inline u32 fb_hshub_config7_nvlink_logical_6_physical_portmap_f(u32 v) 1896{ 1897 return (v & 0xfU) << 24U; 1898} 1899static inline u32 fb_hshub_config7_nvlink_logical_6_physical_portmap_v(u32 r) 1900{ 1901 return (r >> 24U) & 0xfU; 1902} 1903static inline u32 fb_hshub_config7_nvlink_logical_7_physical_portmap_f(u32 v) 1904{ 1905 return (v & 0xfU) << 28U; 1906} 1907static inline u32 fb_hshub_config7_nvlink_logical_7_physical_portmap_v(u32 r) 1908{ 1909 return (r >> 28U) & 0xfU; 1910} 1911static inline u32 fb_hshub_nvl_cfg_priv_level_mask_r(void) 1912{ 1913 return 0x001fbc50U; 1914} 1915static inline u32 fb_hshub_nvl_cfg_priv_level_mask_write_protection_f(u32 v) 1916{ 1917 return (v & 0x7U) << 4U; 1918} 1919static inline u32 fb_hshub_nvl_cfg_priv_level_mask_write_protection_v(u32 r) 1920{ 1921 return (r >> 4U) & 0x7U; 1922} 1923#endif
diff --git a/include/nvgpu/hw/gv100/hw_fifo_gv100.h b/include/nvgpu/hw/gv100/hw_fifo_gv100.h
deleted file mode 100644
index 4e9b590..0000000
--- a/include/nvgpu/hw/gv100/hw_fifo_gv100.h
+++ /dev/null
@@ -1,531 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fifo_gv100_h_ 57#define _hw_fifo_gv100_h_ 58 59static inline u32 fifo_bar1_base_r(void) 60{ 61 return 0x00002254U; 62} 63static inline u32 fifo_bar1_base_ptr_f(u32 v) 64{ 65 return (v & 0xfffffffU) << 0U; 66} 67static inline u32 fifo_bar1_base_ptr_align_shift_v(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 fifo_bar1_base_valid_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fifo_bar1_base_valid_true_f(void) 76{ 77 return 0x10000000U; 78} 79static inline u32 fifo_userd_writeback_r(void) 80{ 81 return 0x0000225cU; 82} 83static inline u32 fifo_userd_writeback_timer_f(u32 v) 84{ 85 return (v & 0xffU) << 0U; 86} 87static inline u32 fifo_userd_writeback_timer_disabled_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 fifo_userd_writeback_timer_shorter_v(void) 92{ 93 return 0x00000003U; 94} 95static inline u32 fifo_userd_writeback_timer_100us_v(void) 96{ 97 return 0x00000064U; 98} 99static inline u32 fifo_userd_writeback_timescale_f(u32 v) 100{ 101 return (v & 0xfU) << 12U; 102} 103static inline u32 fifo_userd_writeback_timescale_0_v(void) 104{ 105 return 0x00000000U; 106} 107static inline u32 fifo_runlist_base_r(void) 108{ 109 return 0x00002270U; 110} 111static inline u32 fifo_runlist_base_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 fifo_runlist_base_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 fifo_runlist_r(void) 128{ 129 return 0x00002274U; 130} 131static inline u32 fifo_runlist_engine_f(u32 v) 132{ 133 return (v & 0xfU) << 20U; 134} 135static inline u32 fifo_eng_runlist_base_r(u32 i) 136{ 137 return 0x00002280U + i*8U; 138} 139static inline u32 fifo_eng_runlist_base__size_1_v(void) 140{ 141 return 0x0000000dU; 142} 143static inline u32 fifo_eng_runlist_r(u32 i) 144{ 145 return 0x00002284U + i*8U; 146} 147static inline u32 fifo_eng_runlist__size_1_v(void) 148{ 149 return 0x0000000dU; 150} 151static inline u32 fifo_eng_runlist_length_f(u32 v) 152{ 153 return (v & 0xffffU) << 0U; 154} 155static inline u32 fifo_eng_runlist_length_max_v(void) 156{ 157 return 0x0000ffffU; 158} 159static inline u32 fifo_eng_runlist_pending_true_f(void) 160{ 161 return 0x100000U; 162} 163static inline u32 fifo_pb_timeslice_r(u32 i) 164{ 165 return 0x00002350U + i*4U; 166} 167static inline u32 fifo_pb_timeslice_timeout_16_f(void) 168{ 169 return 0x10U; 170} 171static inline u32 fifo_pb_timeslice_timescale_0_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 fifo_pb_timeslice_enable_true_f(void) 176{ 177 return 0x10000000U; 178} 179static inline u32 fifo_pbdma_map_r(u32 i) 180{ 181 return 0x00002390U + i*4U; 182} 183static inline u32 fifo_intr_0_r(void) 184{ 185 return 0x00002100U; 186} 187static inline u32 fifo_intr_0_bind_error_pending_f(void) 188{ 189 return 0x1U; 190} 191static inline u32 fifo_intr_0_bind_error_reset_f(void) 192{ 193 return 0x1U; 194} 195static inline u32 fifo_intr_0_sched_error_pending_f(void) 196{ 197 return 0x100U; 198} 199static inline u32 fifo_intr_0_sched_error_reset_f(void) 200{ 201 return 0x100U; 202} 203static inline u32 fifo_intr_0_chsw_error_pending_f(void) 204{ 205 return 0x10000U; 206} 207static inline u32 fifo_intr_0_chsw_error_reset_f(void) 208{ 209 return 0x10000U; 210} 211static inline u32 fifo_intr_0_memop_timeout_pending_f(void) 212{ 213 return 0x800000U; 214} 215static inline u32 fifo_intr_0_memop_timeout_reset_f(void) 216{ 217 return 0x800000U; 218} 219static inline u32 fifo_intr_0_lb_error_pending_f(void) 220{ 221 return 0x1000000U; 222} 223static inline u32 fifo_intr_0_lb_error_reset_f(void) 224{ 225 return 0x1000000U; 226} 227static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) 228{ 229 return 0x20000000U; 230} 231static inline u32 fifo_intr_0_runlist_event_pending_f(void) 232{ 233 return 0x40000000U; 234} 235static inline u32 fifo_intr_0_channel_intr_pending_f(void) 236{ 237 return 0x80000000U; 238} 239static inline u32 fifo_intr_en_0_r(void) 240{ 241 return 0x00002140U; 242} 243static inline u32 fifo_intr_en_0_sched_error_f(u32 v) 244{ 245 return (v & 0x1U) << 8U; 246} 247static inline u32 fifo_intr_en_0_sched_error_m(void) 248{ 249 return 0x1U << 8U; 250} 251static inline u32 fifo_intr_en_1_r(void) 252{ 253 return 0x00002528U; 254} 255static inline u32 fifo_intr_bind_error_r(void) 256{ 257 return 0x0000252cU; 258} 259static inline u32 fifo_intr_sched_error_r(void) 260{ 261 return 0x0000254cU; 262} 263static inline u32 fifo_intr_sched_error_code_f(u32 v) 264{ 265 return (v & 0xffU) << 0U; 266} 267static inline u32 fifo_intr_chsw_error_r(void) 268{ 269 return 0x0000256cU; 270} 271static inline u32 fifo_intr_pbdma_id_r(void) 272{ 273 return 0x000025a0U; 274} 275static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) 276{ 277 return (v & 0x1U) << (0U + i*1U); 278} 279static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) 280{ 281 return (r >> (0U + i*1U)) & 0x1U; 282} 283static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) 284{ 285 return 0x0000000eU; 286} 287static inline u32 fifo_intr_runlist_r(void) 288{ 289 return 0x00002a00U; 290} 291static inline u32 fifo_fb_timeout_r(void) 292{ 293 return 0x00002a04U; 294} 295static inline u32 fifo_fb_timeout_period_m(void) 296{ 297 return 0x3fffffffU << 0U; 298} 299static inline u32 fifo_fb_timeout_period_max_f(void) 300{ 301 return 0x3fffffffU; 302} 303static inline u32 fifo_fb_timeout_period_init_f(void) 304{ 305 return 0x3c00U; 306} 307static inline u32 fifo_sched_disable_r(void) 308{ 309 return 0x00002630U; 310} 311static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) 312{ 313 return (v & 0x1U) << (0U + i*1U); 314} 315static inline u32 fifo_sched_disable_runlist_m(u32 i) 316{ 317 return 0x1U << (0U + i*1U); 318} 319static inline u32 fifo_sched_disable_true_v(void) 320{ 321 return 0x00000001U; 322} 323static inline u32 fifo_runlist_preempt_r(void) 324{ 325 return 0x00002638U; 326} 327static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i) 328{ 329 return (v & 0x1U) << (0U + i*1U); 330} 331static inline u32 fifo_runlist_preempt_runlist_m(u32 i) 332{ 333 return 0x1U << (0U + i*1U); 334} 335static inline u32 fifo_runlist_preempt_runlist_pending_v(void) 336{ 337 return 0x00000001U; 338} 339static inline u32 fifo_preempt_r(void) 340{ 341 return 0x00002634U; 342} 343static inline u32 fifo_preempt_pending_true_f(void) 344{ 345 return 0x100000U; 346} 347static inline u32 fifo_preempt_type_channel_f(void) 348{ 349 return 0x0U; 350} 351static inline u32 fifo_preempt_type_tsg_f(void) 352{ 353 return 0x1000000U; 354} 355static inline u32 fifo_preempt_chid_f(u32 v) 356{ 357 return (v & 0xfffU) << 0U; 358} 359static inline u32 fifo_preempt_id_f(u32 v) 360{ 361 return (v & 0xfffU) << 0U; 362} 363static inline u32 fifo_engine_status_r(u32 i) 364{ 365 return 0x00002640U + i*8U; 366} 367static inline u32 fifo_engine_status__size_1_v(void) 368{ 369 return 0x0000000fU; 370} 371static inline u32 fifo_engine_status_id_v(u32 r) 372{ 373 return (r >> 0U) & 0xfffU; 374} 375static inline u32 fifo_engine_status_id_type_v(u32 r) 376{ 377 return (r >> 12U) & 0x1U; 378} 379static inline u32 fifo_engine_status_id_type_chid_v(void) 380{ 381 return 0x00000000U; 382} 383static inline u32 fifo_engine_status_id_type_tsgid_v(void) 384{ 385 return 0x00000001U; 386} 387static inline u32 fifo_engine_status_ctx_status_v(u32 r) 388{ 389 return (r >> 13U) & 0x7U; 390} 391static inline u32 fifo_engine_status_ctx_status_valid_v(void) 392{ 393 return 0x00000001U; 394} 395static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) 396{ 397 return 0x00000005U; 398} 399static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) 400{ 401 return 0x00000006U; 402} 403static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) 404{ 405 return 0x00000007U; 406} 407static inline u32 fifo_engine_status_next_id_v(u32 r) 408{ 409 return (r >> 16U) & 0xfffU; 410} 411static inline u32 fifo_engine_status_next_id_type_v(u32 r) 412{ 413 return (r >> 28U) & 0x1U; 414} 415static inline u32 fifo_engine_status_next_id_type_chid_v(void) 416{ 417 return 0x00000000U; 418} 419static inline u32 fifo_engine_status_eng_reload_v(u32 r) 420{ 421 return (r >> 29U) & 0x1U; 422} 423static inline u32 fifo_engine_status_faulted_v(u32 r) 424{ 425 return (r >> 30U) & 0x1U; 426} 427static inline u32 fifo_engine_status_faulted_true_v(void) 428{ 429 return 0x00000001U; 430} 431static inline u32 fifo_engine_status_engine_v(u32 r) 432{ 433 return (r >> 31U) & 0x1U; 434} 435static inline u32 fifo_engine_status_engine_idle_v(void) 436{ 437 return 0x00000000U; 438} 439static inline u32 fifo_engine_status_engine_busy_v(void) 440{ 441 return 0x00000001U; 442} 443static inline u32 fifo_engine_status_ctxsw_v(u32 r) 444{ 445 return (r >> 15U) & 0x1U; 446} 447static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) 448{ 449 return 0x00000001U; 450} 451static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) 452{ 453 return 0x8000U; 454} 455static inline u32 fifo_pbdma_status_r(u32 i) 456{ 457 return 0x00003080U + i*4U; 458} 459static inline u32 fifo_pbdma_status__size_1_v(void) 460{ 461 return 0x0000000eU; 462} 463static inline u32 fifo_pbdma_status_id_v(u32 r) 464{ 465 return (r >> 0U) & 0xfffU; 466} 467static inline u32 fifo_pbdma_status_id_type_v(u32 r) 468{ 469 return (r >> 12U) & 0x1U; 470} 471static inline u32 fifo_pbdma_status_id_type_chid_v(void) 472{ 473 return 0x00000000U; 474} 475static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) 476{ 477 return 0x00000001U; 478} 479static inline u32 fifo_pbdma_status_chan_status_v(u32 r) 480{ 481 return (r >> 13U) & 0x7U; 482} 483static inline u32 fifo_pbdma_status_chan_status_valid_v(void) 484{ 485 return 0x00000001U; 486} 487static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) 488{ 489 return 0x00000005U; 490} 491static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) 492{ 493 return 0x00000006U; 494} 495static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) 496{ 497 return 0x00000007U; 498} 499static inline u32 fifo_pbdma_status_next_id_v(u32 r) 500{ 501 return (r >> 16U) & 0xfffU; 502} 503static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) 504{ 505 return (r >> 28U) & 0x1U; 506} 507static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) 508{ 509 return 0x00000000U; 510} 511static inline u32 fifo_pbdma_status_chsw_v(u32 r) 512{ 513 return (r >> 15U) & 0x1U; 514} 515static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) 516{ 517 return 0x00000001U; 518} 519static inline u32 fifo_cfg0_r(void) 520{ 521 return 0x00002004U; 522} 523static inline u32 fifo_cfg0_num_pbdma_v(u32 r) 524{ 525 return (r >> 0U) & 0xffU; 526} 527static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r) 528{ 529 return (r >> 16U) & 0xffU; 530} 531#endif
diff --git a/include/nvgpu/hw/gv100/hw_flush_gv100.h b/include/nvgpu/hw/gv100/hw_flush_gv100.h
deleted file mode 100644
index b604562..0000000
--- a/include/nvgpu/hw/gv100/hw_flush_gv100.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_flush_gv100_h_ 57#define _hw_flush_gv100_h_ 58 59static inline u32 flush_l2_system_invalidate_r(void) 60{ 61 return 0x00070004U; 62} 63static inline u32 flush_l2_system_invalidate_pending_v(u32 r) 64{ 65 return (r >> 0U) & 0x1U; 66} 67static inline u32 flush_l2_system_invalidate_pending_busy_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 flush_l2_system_invalidate_pending_busy_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) 76{ 77 return (r >> 1U) & 0x1U; 78} 79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 flush_l2_flush_dirty_r(void) 84{ 85 return 0x00070010U; 86} 87static inline u32 flush_l2_flush_dirty_pending_v(u32 r) 88{ 89 return (r >> 0U) & 0x1U; 90} 91static inline u32 flush_l2_flush_dirty_pending_empty_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 flush_l2_flush_dirty_pending_empty_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 flush_l2_flush_dirty_pending_busy_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 flush_l2_flush_dirty_pending_busy_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 flush_l2_clean_comptags_r(void) 124{ 125 return 0x0007000cU; 126} 127static inline u32 flush_l2_clean_comptags_pending_v(u32 r) 128{ 129 return (r >> 0U) & 0x1U; 130} 131static inline u32 flush_l2_clean_comptags_pending_empty_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 flush_l2_clean_comptags_pending_empty_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 flush_l2_clean_comptags_pending_busy_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 flush_l2_clean_comptags_pending_busy_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 flush_fb_flush_r(void) 164{ 165 return 0x00070000U; 166} 167static inline u32 flush_fb_flush_pending_v(u32 r) 168{ 169 return (r >> 0U) & 0x1U; 170} 171static inline u32 flush_fb_flush_pending_busy_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 flush_fb_flush_pending_busy_f(void) 176{ 177 return 0x1U; 178} 179static inline u32 flush_fb_flush_outstanding_v(u32 r) 180{ 181 return (r >> 1U) & 0x1U; 182} 183static inline u32 flush_fb_flush_outstanding_true_v(void) 184{ 185 return 0x00000001U; 186} 187#endif
diff --git a/include/nvgpu/hw/gv100/hw_fuse_gv100.h b/include/nvgpu/hw/gv100/hw_fuse_gv100.h
deleted file mode 100644
index 48194ea..0000000
--- a/include/nvgpu/hw/gv100/hw_fuse_gv100.h
+++ /dev/null
@@ -1,147 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fuse_gv100_h_ 57#define _hw_fuse_gv100_h_ 58 59static inline u32 fuse_status_opt_gpc_r(void) 60{ 61 return 0x00021c1cU; 62} 63static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) 64{ 65 return 0x00021c38U + i*4U; 66} 67static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) 68{ 69 return 0x00021838U + i*4U; 70} 71static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) 72{ 73 return 0x00021944U; 74} 75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) 76{ 77 return (v & 0xffU) << 0U; 78} 79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) 80{ 81 return 0xffU << 0U; 82} 83static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) 84{ 85 return (r >> 0U) & 0xffU; 86} 87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) 88{ 89 return 0x00021948U; 90} 91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) 92{ 93 return (v & 0x1U) << 0U; 94} 95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) 96{ 97 return 0x1U << 0U; 98} 99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 fuse_status_opt_fbio_r(void) 112{ 113 return 0x00021c14U; 114} 115static inline u32 fuse_status_opt_fbio_data_f(u32 v) 116{ 117 return (v & 0xffffU) << 0U; 118} 119static inline u32 fuse_status_opt_fbio_data_m(void) 120{ 121 return 0xffffU << 0U; 122} 123static inline u32 fuse_status_opt_fbio_data_v(u32 r) 124{ 125 return (r >> 0U) & 0xffffU; 126} 127static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) 128{ 129 return 0x00021d70U + i*4U; 130} 131static inline u32 fuse_status_opt_fbp_r(void) 132{ 133 return 0x00021d38U; 134} 135static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) 136{ 137 return (r >> (0U + i*1U)) & 0x1U; 138} 139static inline u32 fuse_opt_ecc_en_r(void) 140{ 141 return 0x00021228U; 142} 143static inline u32 fuse_opt_feature_fuses_override_disable_r(void) 144{ 145 return 0x000213f0U; 146} 147#endif
diff --git a/include/nvgpu/hw/gv100/hw_gmmu_gv100.h b/include/nvgpu/hw/gv100/hw_gmmu_gv100.h
deleted file mode 100644
index 8cccfa9..0000000
--- a/include/nvgpu/hw/gv100/hw_gmmu_gv100.h
+++ /dev/null
@@ -1,355 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gmmu_gv100_h_ 57#define _hw_gmmu_gv100_h_ 58 59static inline u32 gmmu_new_pde_is_pte_w(void) 60{ 61 return 0U; 62} 63static inline u32 gmmu_new_pde_is_pte_false_f(void) 64{ 65 return 0x0U; 66} 67static inline u32 gmmu_new_pde_aperture_w(void) 68{ 69 return 0U; 70} 71static inline u32 gmmu_new_pde_aperture_invalid_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 gmmu_new_pde_aperture_video_memory_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void) 84{ 85 return 0x6U; 86} 87static inline u32 gmmu_new_pde_address_sys_f(u32 v) 88{ 89 return (v & 0xffffffU) << 8U; 90} 91static inline u32 gmmu_new_pde_address_sys_w(void) 92{ 93 return 0U; 94} 95static inline u32 gmmu_new_pde_vol_w(void) 96{ 97 return 0U; 98} 99static inline u32 gmmu_new_pde_vol_true_f(void) 100{ 101 return 0x8U; 102} 103static inline u32 gmmu_new_pde_vol_false_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 gmmu_new_pde_address_shift_v(void) 108{ 109 return 0x0000000cU; 110} 111static inline u32 gmmu_new_pde__size_v(void) 112{ 113 return 0x00000008U; 114} 115static inline u32 gmmu_new_dual_pde_is_pte_w(void) 116{ 117 return 0U; 118} 119static inline u32 gmmu_new_dual_pde_is_pte_false_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 gmmu_new_dual_pde_aperture_big_w(void) 124{ 125 return 0U; 126} 127static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void) 128{ 129 return 0x0U; 130} 131static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void) 132{ 133 return 0x2U; 134} 135static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void) 136{ 137 return 0x4U; 138} 139static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void) 140{ 141 return 0x6U; 142} 143static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v) 144{ 145 return (v & 0xfffffffU) << 4U; 146} 147static inline u32 gmmu_new_dual_pde_address_big_sys_w(void) 148{ 149 return 0U; 150} 151static inline u32 gmmu_new_dual_pde_aperture_small_w(void) 152{ 153 return 2U; 154} 155static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void) 164{ 165 return 0x4U; 166} 167static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void) 168{ 169 return 0x6U; 170} 171static inline u32 gmmu_new_dual_pde_vol_small_w(void) 172{ 173 return 2U; 174} 175static inline u32 gmmu_new_dual_pde_vol_small_true_f(void) 176{ 177 return 0x8U; 178} 179static inline u32 gmmu_new_dual_pde_vol_small_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 gmmu_new_dual_pde_vol_big_w(void) 184{ 185 return 0U; 186} 187static inline u32 gmmu_new_dual_pde_vol_big_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 gmmu_new_dual_pde_vol_big_false_f(void) 192{ 193 return 0x0U; 194} 195static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v) 196{ 197 return (v & 0xffffffU) << 8U; 198} 199static inline u32 gmmu_new_dual_pde_address_small_sys_w(void) 200{ 201 return 2U; 202} 203static inline u32 gmmu_new_dual_pde_address_shift_v(void) 204{ 205 return 0x0000000cU; 206} 207static inline u32 gmmu_new_dual_pde_address_big_shift_v(void) 208{ 209 return 0x00000008U; 210} 211static inline u32 gmmu_new_dual_pde__size_v(void) 212{ 213 return 0x00000010U; 214} 215static inline u32 gmmu_new_pte__size_v(void) 216{ 217 return 0x00000008U; 218} 219static inline u32 gmmu_new_pte_valid_w(void) 220{ 221 return 0U; 222} 223static inline u32 gmmu_new_pte_valid_true_f(void) 224{ 225 return 0x1U; 226} 227static inline u32 gmmu_new_pte_valid_false_f(void) 228{ 229 return 0x0U; 230} 231static inline u32 gmmu_new_pte_privilege_w(void) 232{ 233 return 0U; 234} 235static inline u32 gmmu_new_pte_privilege_true_f(void) 236{ 237 return 0x20U; 238} 239static inline u32 gmmu_new_pte_privilege_false_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gmmu_new_pte_address_sys_f(u32 v) 244{ 245 return (v & 0xffffffU) << 8U; 246} 247static inline u32 gmmu_new_pte_address_sys_w(void) 248{ 249 return 0U; 250} 251static inline u32 gmmu_new_pte_address_vid_f(u32 v) 252{ 253 return (v & 0xffffffU) << 8U; 254} 255static inline u32 gmmu_new_pte_address_vid_w(void) 256{ 257 return 0U; 258} 259static inline u32 gmmu_new_pte_vol_w(void) 260{ 261 return 0U; 262} 263static inline u32 gmmu_new_pte_vol_true_f(void) 264{ 265 return 0x8U; 266} 267static inline u32 gmmu_new_pte_vol_false_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 gmmu_new_pte_aperture_w(void) 272{ 273 return 0U; 274} 275static inline u32 gmmu_new_pte_aperture_video_memory_f(void) 276{ 277 return 0x0U; 278} 279static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void) 280{ 281 return 0x4U; 282} 283static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void) 284{ 285 return 0x6U; 286} 287static inline u32 gmmu_new_pte_read_only_w(void) 288{ 289 return 0U; 290} 291static inline u32 gmmu_new_pte_read_only_true_f(void) 292{ 293 return 0x40U; 294} 295static inline u32 gmmu_new_pte_comptagline_f(u32 v) 296{ 297 return (v & 0x3ffffU) << 4U; 298} 299static inline u32 gmmu_new_pte_comptagline_w(void) 300{ 301 return 1U; 302} 303static inline u32 gmmu_new_pte_kind_f(u32 v) 304{ 305 return (v & 0xffU) << 24U; 306} 307static inline u32 gmmu_new_pte_kind_w(void) 308{ 309 return 1U; 310} 311static inline u32 gmmu_new_pte_address_shift_v(void) 312{ 313 return 0x0000000cU; 314} 315static inline u32 gmmu_pte_kind_f(u32 v) 316{ 317 return (v & 0xffU) << 4U; 318} 319static inline u32 gmmu_pte_kind_w(void) 320{ 321 return 1U; 322} 323static inline u32 gmmu_pte_kind_invalid_v(void) 324{ 325 return 0x000000ffU; 326} 327static inline u32 gmmu_pte_kind_pitch_v(void) 328{ 329 return 0x00000000U; 330} 331static inline u32 gmmu_fault_client_type_gpc_v(void) 332{ 333 return 0x00000000U; 334} 335static inline u32 gmmu_fault_client_type_hub_v(void) 336{ 337 return 0x00000001U; 338} 339static inline u32 gmmu_fault_type_unbound_inst_block_v(void) 340{ 341 return 0x00000004U; 342} 343static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void) 344{ 345 return 0x00000005U; 346} 347static inline u32 gmmu_fault_mmu_eng_id_physical_v(void) 348{ 349 return 0x0000001fU; 350} 351static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void) 352{ 353 return 0x0000000fU; 354} 355#endif
diff --git a/include/nvgpu/hw/gv100/hw_gr_gv100.h b/include/nvgpu/hw/gv100/hw_gr_gv100.h
deleted file mode 100644
index 3955a63..0000000
--- a/include/nvgpu/hw/gv100/hw_gr_gv100.h
+++ /dev/null
@@ -1,4123 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gr_gv100_h_ 57#define _hw_gr_gv100_h_ 58 59static inline u32 gr_intr_r(void) 60{ 61 return 0x00400100U; 62} 63static inline u32 gr_intr_notify_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 gr_intr_notify_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gr_intr_semaphore_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gr_intr_semaphore_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gr_intr_illegal_method_pending_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 gr_intr_illegal_method_reset_f(void) 84{ 85 return 0x10U; 86} 87static inline u32 gr_intr_illegal_notify_pending_f(void) 88{ 89 return 0x40U; 90} 91static inline u32 gr_intr_illegal_notify_reset_f(void) 92{ 93 return 0x40U; 94} 95static inline u32 gr_intr_firmware_method_f(u32 v) 96{ 97 return (v & 0x1U) << 8U; 98} 99static inline u32 gr_intr_firmware_method_pending_f(void) 100{ 101 return 0x100U; 102} 103static inline u32 gr_intr_firmware_method_reset_f(void) 104{ 105 return 0x100U; 106} 107static inline u32 gr_intr_illegal_class_pending_f(void) 108{ 109 return 0x20U; 110} 111static inline u32 gr_intr_illegal_class_reset_f(void) 112{ 113 return 0x20U; 114} 115static inline u32 gr_intr_fecs_error_pending_f(void) 116{ 117 return 0x80000U; 118} 119static inline u32 gr_intr_fecs_error_reset_f(void) 120{ 121 return 0x80000U; 122} 123static inline u32 gr_intr_class_error_pending_f(void) 124{ 125 return 0x100000U; 126} 127static inline u32 gr_intr_class_error_reset_f(void) 128{ 129 return 0x100000U; 130} 131static inline u32 gr_intr_exception_pending_f(void) 132{ 133 return 0x200000U; 134} 135static inline u32 gr_intr_exception_reset_f(void) 136{ 137 return 0x200000U; 138} 139static inline u32 gr_fecs_intr_r(void) 140{ 141 return 0x00400144U; 142} 143static inline u32 gr_class_error_r(void) 144{ 145 return 0x00400110U; 146} 147static inline u32 gr_class_error_code_v(u32 r) 148{ 149 return (r >> 0U) & 0xffffU; 150} 151static inline u32 gr_intr_nonstall_r(void) 152{ 153 return 0x00400120U; 154} 155static inline u32 gr_intr_nonstall_trap_pending_f(void) 156{ 157 return 0x2U; 158} 159static inline u32 gr_intr_en_r(void) 160{ 161 return 0x0040013cU; 162} 163static inline u32 gr_exception_r(void) 164{ 165 return 0x00400108U; 166} 167static inline u32 gr_exception_fe_m(void) 168{ 169 return 0x1U << 0U; 170} 171static inline u32 gr_exception_gpc_m(void) 172{ 173 return 0x1U << 24U; 174} 175static inline u32 gr_exception_memfmt_m(void) 176{ 177 return 0x1U << 1U; 178} 179static inline u32 gr_exception_ds_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 gr_exception_sked_m(void) 184{ 185 return 0x1U << 8U; 186} 187static inline u32 gr_exception_pd_m(void) 188{ 189 return 0x1U << 2U; 190} 191static inline u32 gr_exception_scc_m(void) 192{ 193 return 0x1U << 3U; 194} 195static inline u32 gr_exception_ssync_m(void) 196{ 197 return 0x1U << 5U; 198} 199static inline u32 gr_exception_mme_m(void) 200{ 201 return 0x1U << 7U; 202} 203static inline u32 gr_exception1_r(void) 204{ 205 return 0x00400118U; 206} 207static inline u32 gr_exception1_gpc_0_pending_f(void) 208{ 209 return 0x1U; 210} 211static inline u32 gr_exception2_r(void) 212{ 213 return 0x0040011cU; 214} 215static inline u32 gr_exception_en_r(void) 216{ 217 return 0x00400138U; 218} 219static inline u32 gr_exception_en_fe_m(void) 220{ 221 return 0x1U << 0U; 222} 223static inline u32 gr_exception_en_fe_enabled_f(void) 224{ 225 return 0x1U; 226} 227static inline u32 gr_exception_en_gpc_m(void) 228{ 229 return 0x1U << 24U; 230} 231static inline u32 gr_exception_en_gpc_enabled_f(void) 232{ 233 return 0x1000000U; 234} 235static inline u32 gr_exception_en_memfmt_m(void) 236{ 237 return 0x1U << 1U; 238} 239static inline u32 gr_exception_en_memfmt_enabled_f(void) 240{ 241 return 0x2U; 242} 243static inline u32 gr_exception_en_ds_m(void) 244{ 245 return 0x1U << 4U; 246} 247static inline u32 gr_exception_en_ds_enabled_f(void) 248{ 249 return 0x10U; 250} 251static inline u32 gr_exception_en_pd_m(void) 252{ 253 return 0x1U << 2U; 254} 255static inline u32 gr_exception_en_pd_enabled_f(void) 256{ 257 return 0x4U; 258} 259static inline u32 gr_exception_en_scc_m(void) 260{ 261 return 0x1U << 3U; 262} 263static inline u32 gr_exception_en_scc_enabled_f(void) 264{ 265 return 0x8U; 266} 267static inline u32 gr_exception_en_ssync_m(void) 268{ 269 return 0x1U << 5U; 270} 271static inline u32 gr_exception_en_ssync_enabled_f(void) 272{ 273 return 0x20U; 274} 275static inline u32 gr_exception_en_mme_m(void) 276{ 277 return 0x1U << 7U; 278} 279static inline u32 gr_exception_en_mme_enabled_f(void) 280{ 281 return 0x80U; 282} 283static inline u32 gr_exception_en_sked_m(void) 284{ 285 return 0x1U << 8U; 286} 287static inline u32 gr_exception_en_sked_enabled_f(void) 288{ 289 return 0x100U; 290} 291static inline u32 gr_exception1_en_r(void) 292{ 293 return 0x00400130U; 294} 295static inline u32 gr_exception2_en_r(void) 296{ 297 return 0x00400134U; 298} 299static inline u32 gr_gpfifo_ctl_r(void) 300{ 301 return 0x00400500U; 302} 303static inline u32 gr_gpfifo_ctl_access_f(u32 v) 304{ 305 return (v & 0x1U) << 0U; 306} 307static inline u32 gr_gpfifo_ctl_access_disabled_f(void) 308{ 309 return 0x0U; 310} 311static inline u32 gr_gpfifo_ctl_access_enabled_f(void) 312{ 313 return 0x1U; 314} 315static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) 316{ 317 return (v & 0x1U) << 16U; 318} 319static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) 320{ 321 return 0x00000001U; 322} 323static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) 324{ 325 return 0x10000U; 326} 327static inline u32 gr_gpfifo_status_r(void) 328{ 329 return 0x00400504U; 330} 331static inline u32 gr_trapped_addr_r(void) 332{ 333 return 0x00400704U; 334} 335static inline u32 gr_trapped_addr_mthd_v(u32 r) 336{ 337 return (r >> 2U) & 0xfffU; 338} 339static inline u32 gr_trapped_addr_subch_v(u32 r) 340{ 341 return (r >> 16U) & 0x7U; 342} 343static inline u32 gr_trapped_addr_mme_generated_v(u32 r) 344{ 345 return (r >> 20U) & 0x1U; 346} 347static inline u32 gr_trapped_addr_datahigh_v(u32 r) 348{ 349 return (r >> 24U) & 0x1U; 350} 351static inline u32 gr_trapped_addr_priv_v(u32 r) 352{ 353 return (r >> 28U) & 0x1U; 354} 355static inline u32 gr_trapped_addr_status_v(u32 r) 356{ 357 return (r >> 31U) & 0x1U; 358} 359static inline u32 gr_trapped_data_lo_r(void) 360{ 361 return 0x00400708U; 362} 363static inline u32 gr_trapped_data_hi_r(void) 364{ 365 return 0x0040070cU; 366} 367static inline u32 gr_trapped_data_mme_r(void) 368{ 369 return 0x00400710U; 370} 371static inline u32 gr_trapped_data_mme_pc_v(u32 r) 372{ 373 return (r >> 0U) & 0xfffU; 374} 375static inline u32 gr_status_r(void) 376{ 377 return 0x00400700U; 378} 379static inline u32 gr_status_fe_method_upper_v(u32 r) 380{ 381 return (r >> 1U) & 0x1U; 382} 383static inline u32 gr_status_fe_method_lower_v(u32 r) 384{ 385 return (r >> 2U) & 0x1U; 386} 387static inline u32 gr_status_fe_method_lower_idle_v(void) 388{ 389 return 0x00000000U; 390} 391static inline u32 gr_status_fe_gi_v(u32 r) 392{ 393 return (r >> 21U) & 0x1U; 394} 395static inline u32 gr_status_mask_r(void) 396{ 397 return 0x00400610U; 398} 399static inline u32 gr_status_1_r(void) 400{ 401 return 0x00400604U; 402} 403static inline u32 gr_status_2_r(void) 404{ 405 return 0x00400608U; 406} 407static inline u32 gr_engine_status_r(void) 408{ 409 return 0x0040060cU; 410} 411static inline u32 gr_engine_status_value_busy_f(void) 412{ 413 return 0x1U; 414} 415static inline u32 gr_pri_be0_becs_be_exception_r(void) 416{ 417 return 0x00410204U; 418} 419static inline u32 gr_pri_be0_becs_be_exception_en_r(void) 420{ 421 return 0x00410208U; 422} 423static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) 424{ 425 return 0x00502c90U; 426} 427static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) 428{ 429 return 0x00502c94U; 430} 431static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) 432{ 433 return 0x00504508U; 434} 435static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 436{ 437 return 0x0050450cU; 438} 439static inline u32 gr_activity_0_r(void) 440{ 441 return 0x00400380U; 442} 443static inline u32 gr_activity_1_r(void) 444{ 445 return 0x00400384U; 446} 447static inline u32 gr_activity_2_r(void) 448{ 449 return 0x00400388U; 450} 451static inline u32 gr_activity_4_r(void) 452{ 453 return 0x00400390U; 454} 455static inline u32 gr_activity_4_gpc0_s(void) 456{ 457 return 3U; 458} 459static inline u32 gr_activity_4_gpc0_f(u32 v) 460{ 461 return (v & 0x7U) << 0U; 462} 463static inline u32 gr_activity_4_gpc0_m(void) 464{ 465 return 0x7U << 0U; 466} 467static inline u32 gr_activity_4_gpc0_v(u32 r) 468{ 469 return (r >> 0U) & 0x7U; 470} 471static inline u32 gr_activity_4_gpc0_empty_v(void) 472{ 473 return 0x00000000U; 474} 475static inline u32 gr_activity_4_gpc0_preempted_v(void) 476{ 477 return 0x00000004U; 478} 479static inline u32 gr_pri_gpc0_gcc_dbg_r(void) 480{ 481 return 0x00501000U; 482} 483static inline u32 gr_pri_gpcs_gcc_dbg_r(void) 484{ 485 return 0x00419000U; 486} 487static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) 488{ 489 return 0x1U << 1U; 490} 491static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) 492{ 493 return 0x0050433cU; 494} 495static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) 496{ 497 return 0x00419b3cU; 498} 499static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) 500{ 501 return 0x1U << 0U; 502} 503static inline u32 gr_pri_sked_activity_r(void) 504{ 505 return 0x00407054U; 506} 507static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) 508{ 509 return 0x00502c80U; 510} 511static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) 512{ 513 return 0x00502c84U; 514} 515static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) 516{ 517 return 0x00502c88U; 518} 519static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) 520{ 521 return 0x00502c8cU; 522} 523static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) 524{ 525 return 0x00504500U; 526} 527static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) 528{ 529 return 0x00504d00U; 530} 531static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) 532{ 533 return 0x00501d00U; 534} 535static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) 536{ 537 return 0x0041ac80U; 538} 539static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) 540{ 541 return 0x0041ac84U; 542} 543static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) 544{ 545 return 0x0041ac88U; 546} 547static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) 548{ 549 return 0x0041ac8cU; 550} 551static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) 552{ 553 return 0x0041c500U; 554} 555static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) 556{ 557 return 0x0041cd00U; 558} 559static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) 560{ 561 return 0x00419d00U; 562} 563static inline u32 gr_pri_be0_becs_be_activity0_r(void) 564{ 565 return 0x00410200U; 566} 567static inline u32 gr_pri_be1_becs_be_activity0_r(void) 568{ 569 return 0x00410600U; 570} 571static inline u32 gr_pri_bes_becs_be_activity0_r(void) 572{ 573 return 0x00408a00U; 574} 575static inline u32 gr_pri_ds_mpipe_status_r(void) 576{ 577 return 0x00405858U; 578} 579static inline u32 gr_pri_fe_go_idle_info_r(void) 580{ 581 return 0x00404194U; 582} 583static inline u32 gr_pri_fe_chip_def_info_r(void) 584{ 585 return 0x00404030U; 586} 587static inline u32 gr_pri_fe_chip_def_info_max_veid_count_v(u32 r) 588{ 589 return (r >> 0U) & 0xfffU; 590} 591static inline u32 gr_pri_fe_chip_def_info_max_veid_count_init_v(void) 592{ 593 return 0x00000040U; 594} 595static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) 596{ 597 return 0x00504238U; 598} 599static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void) 600{ 601 return 0x00504358U; 602} 603static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m(void) 604{ 605 return 0x1U << 0U; 606} 607static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m(void) 608{ 609 return 0x1U << 1U; 610} 611static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m(void) 612{ 613 return 0x1U << 2U; 614} 615static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m(void) 616{ 617 return 0x1U << 3U; 618} 619static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m(void) 620{ 621 return 0x1U << 4U; 622} 623static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m(void) 624{ 625 return 0x1U << 5U; 626} 627static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m(void) 628{ 629 return 0x1U << 6U; 630} 631static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m(void) 632{ 633 return 0x1U << 7U; 634} 635static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m(void) 636{ 637 return 0x1U << 8U; 638} 639static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m(void) 640{ 641 return 0x1U << 9U; 642} 643static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m(void) 644{ 645 return 0x1U << 10U; 646} 647static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m(void) 648{ 649 return 0x1U << 11U; 650} 651static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m(void) 652{ 653 return 0x1U << 12U; 654} 655static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m(void) 656{ 657 return 0x1U << 13U; 658} 659static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m(void) 660{ 661 return 0x1U << 14U; 662} 663static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m(void) 664{ 665 return 0x1U << 15U; 666} 667static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 668{ 669 return (r >> 24U) & 0x1U; 670} 671static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 672{ 673 return (r >> 26U) & 0x1U; 674} 675static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f(void) 676{ 677 return 0x40000000U; 678} 679static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r(void) 680{ 681 return 0x0050435cU; 682} 683static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s(void) 684{ 685 return 16U; 686} 687static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(u32 r) 688{ 689 return (r >> 0U) & 0xffffU; 690} 691static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r(void) 692{ 693 return 0x00504360U; 694} 695static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s(void) 696{ 697 return 16U; 698} 699static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(u32 r) 700{ 701 return (r >> 0U) & 0xffffU; 702} 703static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r(void) 704{ 705 return 0x0050436cU; 706} 707static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m(void) 708{ 709 return 0x1U << 0U; 710} 711static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m(void) 712{ 713 return 0x1U << 1U; 714} 715static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m(void) 716{ 717 return 0x1U << 2U; 718} 719static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m(void) 720{ 721 return 0x1U << 3U; 722} 723static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 724{ 725 return (r >> 8U) & 0x1U; 726} 727static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 728{ 729 return (r >> 10U) & 0x1U; 730} 731static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f(void) 732{ 733 return 0x40000000U; 734} 735static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r(void) 736{ 737 return 0x00504370U; 738} 739static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s(void) 740{ 741 return 16U; 742} 743static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(u32 r) 744{ 745 return (r >> 0U) & 0xffffU; 746} 747static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r(void) 748{ 749 return 0x00504374U; 750} 751static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s(void) 752{ 753 return 16U; 754} 755static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(u32 r) 756{ 757 return (r >> 0U) & 0xffffU; 758} 759static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r(void) 760{ 761 return 0x00504638U; 762} 763static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m(void) 764{ 765 return 0x1U << 0U; 766} 767static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m(void) 768{ 769 return 0x1U << 1U; 770} 771static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m(void) 772{ 773 return 0x1U << 2U; 774} 775static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m(void) 776{ 777 return 0x1U << 3U; 778} 779static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m(void) 780{ 781 return 0x1U << 4U; 782} 783static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m(void) 784{ 785 return 0x1U << 5U; 786} 787static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m(void) 788{ 789 return 0x1U << 6U; 790} 791static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m(void) 792{ 793 return 0x1U << 7U; 794} 795static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 796{ 797 return (r >> 16U) & 0x1U; 798} 799static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 800{ 801 return (r >> 18U) & 0x1U; 802} 803static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f(void) 804{ 805 return 0x40000000U; 806} 807static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r(void) 808{ 809 return 0x0050463cU; 810} 811static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s(void) 812{ 813 return 16U; 814} 815static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(u32 r) 816{ 817 return (r >> 0U) & 0xffffU; 818} 819static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r(void) 820{ 821 return 0x00504640U; 822} 823static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s(void) 824{ 825 return 16U; 826} 827static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(u32 r) 828{ 829 return (r >> 0U) & 0xffffU; 830} 831static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void) 832{ 833 return 0x005042c4U; 834} 835static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void) 836{ 837 return 0x0U; 838} 839static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void) 840{ 841 return 0x1U; 842} 843static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void) 844{ 845 return 0x2U; 846} 847static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void) 848{ 849 return 0x00504430U; 850} 851static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void) 852{ 853 return 0x40000000U; 854} 855static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void) 856{ 857 return 0x00504434U; 858} 859static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r) 860{ 861 return (r >> 0U) & 0x3fU; 862} 863static inline u32 gr_pri_be0_crop_status1_r(void) 864{ 865 return 0x00410134U; 866} 867static inline u32 gr_pri_bes_crop_status1_r(void) 868{ 869 return 0x00408934U; 870} 871static inline u32 gr_pri_be0_zrop_status_r(void) 872{ 873 return 0x00410048U; 874} 875static inline u32 gr_pri_be0_zrop_status2_r(void) 876{ 877 return 0x0041004cU; 878} 879static inline u32 gr_pri_bes_zrop_status_r(void) 880{ 881 return 0x00408848U; 882} 883static inline u32 gr_pri_bes_zrop_status2_r(void) 884{ 885 return 0x0040884cU; 886} 887static inline u32 gr_pipe_bundle_address_r(void) 888{ 889 return 0x00400200U; 890} 891static inline u32 gr_pipe_bundle_address_value_v(u32 r) 892{ 893 return (r >> 0U) & 0xffffU; 894} 895static inline u32 gr_pipe_bundle_address_veid_f(u32 v) 896{ 897 return (v & 0x3fU) << 20U; 898} 899static inline u32 gr_pipe_bundle_address_veid_w(void) 900{ 901 return 0U; 902} 903static inline u32 gr_pipe_bundle_data_r(void) 904{ 905 return 0x00400204U; 906} 907static inline u32 gr_pipe_bundle_config_r(void) 908{ 909 return 0x00400208U; 910} 911static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) 912{ 913 return 0x0U; 914} 915static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) 916{ 917 return 0x80000000U; 918} 919static inline u32 gr_fe_hww_esr_r(void) 920{ 921 return 0x00404000U; 922} 923static inline u32 gr_fe_hww_esr_reset_active_f(void) 924{ 925 return 0x40000000U; 926} 927static inline u32 gr_fe_hww_esr_en_enable_f(void) 928{ 929 return 0x80000000U; 930} 931static inline u32 gr_fe_hww_esr_info_r(void) 932{ 933 return 0x004041b0U; 934} 935static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(void) 936{ 937 return 0x00419eacU; 938} 939static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r(void) 940{ 941 return 0x0050472cU; 942} 943static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) 944{ 945 return 0x4U; 946} 947static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_int_report_f(void) 948{ 949 return 0x10U; 950} 951static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_pause_report_f(void) 952{ 953 return 0x20U; 954} 955static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_single_step_complete_report_f(void) 956{ 957 return 0x40U; 958} 959static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_error_in_trap_report_f(void) 960{ 961 return 0x100U; 962} 963static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_r(void) 964{ 965 return 0x00419eb4U; 966} 967static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_r(void) 968{ 969 return 0x00504734U; 970} 971static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_m(void) 972{ 973 return 0x1U << 4U; 974} 975static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f(void) 976{ 977 return 0x10U; 978} 979static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_m(void) 980{ 981 return 0x1U << 5U; 982} 983static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(void) 984{ 985 return 0x20U; 986} 987static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_m(void) 988{ 989 return 0x1U << 6U; 990} 991static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(void) 992{ 993 return 0x40U; 994} 995static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_m(void) 996{ 997 return 0x1U << 2U; 998} 999static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f(void) 1000{ 1001 return 0x4U; 1002} 1003static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_m(void) 1004{ 1005 return 0x1U << 8U; 1006} 1007static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_pending_f(void) 1008{ 1009 return 0x100U; 1010} 1011static inline u32 gr_fe_go_idle_timeout_r(void) 1012{ 1013 return 0x00404154U; 1014} 1015static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) 1016{ 1017 return (v & 0xffffffffU) << 0U; 1018} 1019static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) 1020{ 1021 return 0x0U; 1022} 1023static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) 1024{ 1025 return 0x1800U; 1026} 1027static inline u32 gr_fe_object_table_r(u32 i) 1028{ 1029 return 0x00404200U + i*4U; 1030} 1031static inline u32 gr_fe_object_table_nvclass_v(u32 r) 1032{ 1033 return (r >> 0U) & 0xffffU; 1034} 1035static inline u32 gr_fe_tpc_fs_r(u32 i) 1036{ 1037 return 0x0040a200U + i*4U; 1038} 1039static inline u32 gr_pri_mme_shadow_raw_index_r(void) 1040{ 1041 return 0x00404488U; 1042} 1043static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) 1044{ 1045 return 0x80000000U; 1046} 1047static inline u32 gr_pri_mme_shadow_raw_data_r(void) 1048{ 1049 return 0x0040448cU; 1050} 1051static inline u32 gr_mme_hww_esr_r(void) 1052{ 1053 return 0x00404490U; 1054} 1055static inline u32 gr_mme_hww_esr_reset_active_f(void) 1056{ 1057 return 0x40000000U; 1058} 1059static inline u32 gr_mme_hww_esr_en_enable_f(void) 1060{ 1061 return 0x80000000U; 1062} 1063static inline u32 gr_mme_hww_esr_info_r(void) 1064{ 1065 return 0x00404494U; 1066} 1067static inline u32 gr_memfmt_hww_esr_r(void) 1068{ 1069 return 0x00404600U; 1070} 1071static inline u32 gr_memfmt_hww_esr_reset_active_f(void) 1072{ 1073 return 0x40000000U; 1074} 1075static inline u32 gr_memfmt_hww_esr_en_enable_f(void) 1076{ 1077 return 0x80000000U; 1078} 1079static inline u32 gr_fecs_cpuctl_r(void) 1080{ 1081 return 0x00409100U; 1082} 1083static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) 1084{ 1085 return (v & 0x1U) << 1U; 1086} 1087static inline u32 gr_fecs_cpuctl_alias_r(void) 1088{ 1089 return 0x00409130U; 1090} 1091static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) 1092{ 1093 return (v & 0x1U) << 1U; 1094} 1095static inline u32 gr_fecs_dmactl_r(void) 1096{ 1097 return 0x0040910cU; 1098} 1099static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) 1100{ 1101 return (v & 0x1U) << 0U; 1102} 1103static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) 1104{ 1105 return 0x1U << 1U; 1106} 1107static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) 1108{ 1109 return 0x1U << 2U; 1110} 1111static inline u32 gr_fecs_os_r(void) 1112{ 1113 return 0x00409080U; 1114} 1115static inline u32 gr_fecs_idlestate_r(void) 1116{ 1117 return 0x0040904cU; 1118} 1119static inline u32 gr_fecs_mailbox0_r(void) 1120{ 1121 return 0x00409040U; 1122} 1123static inline u32 gr_fecs_mailbox1_r(void) 1124{ 1125 return 0x00409044U; 1126} 1127static inline u32 gr_fecs_irqstat_r(void) 1128{ 1129 return 0x00409008U; 1130} 1131static inline u32 gr_fecs_irqmode_r(void) 1132{ 1133 return 0x0040900cU; 1134} 1135static inline u32 gr_fecs_irqmask_r(void) 1136{ 1137 return 0x00409018U; 1138} 1139static inline u32 gr_fecs_irqdest_r(void) 1140{ 1141 return 0x0040901cU; 1142} 1143static inline u32 gr_fecs_curctx_r(void) 1144{ 1145 return 0x00409050U; 1146} 1147static inline u32 gr_fecs_nxtctx_r(void) 1148{ 1149 return 0x00409054U; 1150} 1151static inline u32 gr_fecs_engctl_r(void) 1152{ 1153 return 0x004090a4U; 1154} 1155static inline u32 gr_fecs_debug1_r(void) 1156{ 1157 return 0x00409090U; 1158} 1159static inline u32 gr_fecs_debuginfo_r(void) 1160{ 1161 return 0x00409094U; 1162} 1163static inline u32 gr_fecs_icd_cmd_r(void) 1164{ 1165 return 0x00409200U; 1166} 1167static inline u32 gr_fecs_icd_cmd_opc_s(void) 1168{ 1169 return 4U; 1170} 1171static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) 1172{ 1173 return (v & 0xfU) << 0U; 1174} 1175static inline u32 gr_fecs_icd_cmd_opc_m(void) 1176{ 1177 return 0xfU << 0U; 1178} 1179static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) 1180{ 1181 return (r >> 0U) & 0xfU; 1182} 1183static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) 1184{ 1185 return 0x8U; 1186} 1187static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) 1188{ 1189 return 0xeU; 1190} 1191static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) 1192{ 1193 return (v & 0x1fU) << 8U; 1194} 1195static inline u32 gr_fecs_icd_rdata_r(void) 1196{ 1197 return 0x0040920cU; 1198} 1199static inline u32 gr_fecs_imemc_r(u32 i) 1200{ 1201 return 0x00409180U + i*16U; 1202} 1203static inline u32 gr_fecs_imemc_offs_f(u32 v) 1204{ 1205 return (v & 0x3fU) << 2U; 1206} 1207static inline u32 gr_fecs_imemc_blk_f(u32 v) 1208{ 1209 return (v & 0xffU) << 8U; 1210} 1211static inline u32 gr_fecs_imemc_aincw_f(u32 v) 1212{ 1213 return (v & 0x1U) << 24U; 1214} 1215static inline u32 gr_fecs_imemd_r(u32 i) 1216{ 1217 return 0x00409184U + i*16U; 1218} 1219static inline u32 gr_fecs_imemt_r(u32 i) 1220{ 1221 return 0x00409188U + i*16U; 1222} 1223static inline u32 gr_fecs_imemt_tag_f(u32 v) 1224{ 1225 return (v & 0xffffU) << 0U; 1226} 1227static inline u32 gr_fecs_dmemc_r(u32 i) 1228{ 1229 return 0x004091c0U + i*8U; 1230} 1231static inline u32 gr_fecs_dmemc_offs_s(void) 1232{ 1233 return 6U; 1234} 1235static inline u32 gr_fecs_dmemc_offs_f(u32 v) 1236{ 1237 return (v & 0x3fU) << 2U; 1238} 1239static inline u32 gr_fecs_dmemc_offs_m(void) 1240{ 1241 return 0x3fU << 2U; 1242} 1243static inline u32 gr_fecs_dmemc_offs_v(u32 r) 1244{ 1245 return (r >> 2U) & 0x3fU; 1246} 1247static inline u32 gr_fecs_dmemc_blk_f(u32 v) 1248{ 1249 return (v & 0xffU) << 8U; 1250} 1251static inline u32 gr_fecs_dmemc_aincw_f(u32 v) 1252{ 1253 return (v & 0x1U) << 24U; 1254} 1255static inline u32 gr_fecs_dmemd_r(u32 i) 1256{ 1257 return 0x004091c4U + i*8U; 1258} 1259static inline u32 gr_fecs_dmatrfbase_r(void) 1260{ 1261 return 0x00409110U; 1262} 1263static inline u32 gr_fecs_dmatrfmoffs_r(void) 1264{ 1265 return 0x00409114U; 1266} 1267static inline u32 gr_fecs_dmatrffboffs_r(void) 1268{ 1269 return 0x0040911cU; 1270} 1271static inline u32 gr_fecs_dmatrfcmd_r(void) 1272{ 1273 return 0x00409118U; 1274} 1275static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) 1276{ 1277 return (v & 0x1U) << 4U; 1278} 1279static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) 1280{ 1281 return (v & 0x1U) << 5U; 1282} 1283static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) 1284{ 1285 return (v & 0x7U) << 8U; 1286} 1287static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) 1288{ 1289 return (v & 0x7U) << 12U; 1290} 1291static inline u32 gr_fecs_bootvec_r(void) 1292{ 1293 return 0x00409104U; 1294} 1295static inline u32 gr_fecs_bootvec_vec_f(u32 v) 1296{ 1297 return (v & 0xffffffffU) << 0U; 1298} 1299static inline u32 gr_fecs_falcon_hwcfg_r(void) 1300{ 1301 return 0x00409108U; 1302} 1303static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) 1304{ 1305 return 0x0041a108U; 1306} 1307static inline u32 gr_fecs_falcon_rm_r(void) 1308{ 1309 return 0x00409084U; 1310} 1311static inline u32 gr_fecs_current_ctx_r(void) 1312{ 1313 return 0x00409b00U; 1314} 1315static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) 1316{ 1317 return (v & 0xfffffffU) << 0U; 1318} 1319static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) 1320{ 1321 return (r >> 0U) & 0xfffffffU; 1322} 1323static inline u32 gr_fecs_current_ctx_target_s(void) 1324{ 1325 return 2U; 1326} 1327static inline u32 gr_fecs_current_ctx_target_f(u32 v) 1328{ 1329 return (v & 0x3U) << 28U; 1330} 1331static inline u32 gr_fecs_current_ctx_target_m(void) 1332{ 1333 return 0x3U << 28U; 1334} 1335static inline u32 gr_fecs_current_ctx_target_v(u32 r) 1336{ 1337 return (r >> 28U) & 0x3U; 1338} 1339static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) 1340{ 1341 return 0x0U; 1342} 1343static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) 1344{ 1345 return 0x20000000U; 1346} 1347static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) 1348{ 1349 return 0x30000000U; 1350} 1351static inline u32 gr_fecs_current_ctx_valid_s(void) 1352{ 1353 return 1U; 1354} 1355static inline u32 gr_fecs_current_ctx_valid_f(u32 v) 1356{ 1357 return (v & 0x1U) << 31U; 1358} 1359static inline u32 gr_fecs_current_ctx_valid_m(void) 1360{ 1361 return 0x1U << 31U; 1362} 1363static inline u32 gr_fecs_current_ctx_valid_v(u32 r) 1364{ 1365 return (r >> 31U) & 0x1U; 1366} 1367static inline u32 gr_fecs_current_ctx_valid_false_f(void) 1368{ 1369 return 0x0U; 1370} 1371static inline u32 gr_fecs_method_data_r(void) 1372{ 1373 return 0x00409500U; 1374} 1375static inline u32 gr_fecs_method_push_r(void) 1376{ 1377 return 0x00409504U; 1378} 1379static inline u32 gr_fecs_method_push_adr_f(u32 v) 1380{ 1381 return (v & 0xfffU) << 0U; 1382} 1383static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) 1384{ 1385 return 0x00000003U; 1386} 1387static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) 1388{ 1389 return 0x3U; 1390} 1391static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) 1392{ 1393 return 0x00000010U; 1394} 1395static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) 1396{ 1397 return 0x00000009U; 1398} 1399static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) 1400{ 1401 return 0x00000015U; 1402} 1403static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) 1404{ 1405 return 0x00000016U; 1406} 1407static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) 1408{ 1409 return 0x00000025U; 1410} 1411static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) 1412{ 1413 return 0x00000030U; 1414} 1415static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) 1416{ 1417 return 0x00000031U; 1418} 1419static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) 1420{ 1421 return 0x00000032U; 1422} 1423static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) 1424{ 1425 return 0x00000038U; 1426} 1427static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) 1428{ 1429 return 0x00000039U; 1430} 1431static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) 1432{ 1433 return 0x21U; 1434} 1435static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void) 1436{ 1437 return 0x0000001aU; 1438} 1439static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) 1440{ 1441 return 0x00000004U; 1442} 1443static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void) 1444{ 1445 return 0x0000003aU; 1446} 1447static inline u32 gr_fecs_host_int_status_r(void) 1448{ 1449 return 0x00409c18U; 1450} 1451static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) 1452{ 1453 return (v & 0x1U) << 16U; 1454} 1455static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) 1456{ 1457 return (v & 0x1U) << 17U; 1458} 1459static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) 1460{ 1461 return (v & 0x1U) << 18U; 1462} 1463static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) 1464{ 1465 return (v & 0xffffU) << 0U; 1466} 1467static inline u32 gr_fecs_host_int_clear_r(void) 1468{ 1469 return 0x00409c20U; 1470} 1471static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) 1472{ 1473 return (v & 0x1U) << 1U; 1474} 1475static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) 1476{ 1477 return 0x2U; 1478} 1479static inline u32 gr_fecs_host_int_enable_r(void) 1480{ 1481 return 0x00409c24U; 1482} 1483static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) 1484{ 1485 return 0x2U; 1486} 1487static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) 1488{ 1489 return 0x10000U; 1490} 1491static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) 1492{ 1493 return 0x20000U; 1494} 1495static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) 1496{ 1497 return 0x40000U; 1498} 1499static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) 1500{ 1501 return 0x80000U; 1502} 1503static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) 1504{ 1505 return 0x00409614U; 1506} 1507static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) 1508{ 1509 return 0x0U; 1510} 1511static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) 1512{ 1513 return 0x0U; 1514} 1515static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) 1516{ 1517 return 0x0U; 1518} 1519static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) 1520{ 1521 return 0x10U; 1522} 1523static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) 1524{ 1525 return 0x20U; 1526} 1527static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) 1528{ 1529 return 0x40U; 1530} 1531static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) 1532{ 1533 return 0x0U; 1534} 1535static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) 1536{ 1537 return 0x100U; 1538} 1539static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) 1540{ 1541 return 0x0U; 1542} 1543static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) 1544{ 1545 return 0x200U; 1546} 1547static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) 1548{ 1549 return 1U; 1550} 1551static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) 1552{ 1553 return (v & 0x1U) << 10U; 1554} 1555static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) 1556{ 1557 return 0x1U << 10U; 1558} 1559static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) 1560{ 1561 return (r >> 10U) & 0x1U; 1562} 1563static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) 1564{ 1565 return 0x0U; 1566} 1567static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) 1568{ 1569 return 0x400U; 1570} 1571static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) 1572{ 1573 return 0x0040960cU; 1574} 1575static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) 1576{ 1577 return 0x00409800U + i*4U; 1578} 1579static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) 1580{ 1581 return 0x00000010U; 1582} 1583static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) 1584{ 1585 return (v & 0xffffffffU) << 0U; 1586} 1587static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) 1588{ 1589 return 0x00000001U; 1590} 1591static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) 1592{ 1593 return 0x00000002U; 1594} 1595static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) 1596{ 1597 return 0x004098c0U + i*4U; 1598} 1599static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) 1600{ 1601 return (v & 0xffffffffU) << 0U; 1602} 1603static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) 1604{ 1605 return 0x00409840U + i*4U; 1606} 1607static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) 1608{ 1609 return (v & 0xffffffffU) << 0U; 1610} 1611static inline u32 gr_fecs_fs_r(void) 1612{ 1613 return 0x00409604U; 1614} 1615static inline u32 gr_fecs_fs_num_available_gpcs_s(void) 1616{ 1617 return 5U; 1618} 1619static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) 1620{ 1621 return (v & 0x1fU) << 0U; 1622} 1623static inline u32 gr_fecs_fs_num_available_gpcs_m(void) 1624{ 1625 return 0x1fU << 0U; 1626} 1627static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) 1628{ 1629 return (r >> 0U) & 0x1fU; 1630} 1631static inline u32 gr_fecs_fs_num_available_fbps_s(void) 1632{ 1633 return 5U; 1634} 1635static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) 1636{ 1637 return (v & 0x1fU) << 16U; 1638} 1639static inline u32 gr_fecs_fs_num_available_fbps_m(void) 1640{ 1641 return 0x1fU << 16U; 1642} 1643static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) 1644{ 1645 return (r >> 16U) & 0x1fU; 1646} 1647static inline u32 gr_fecs_cfg_r(void) 1648{ 1649 return 0x00409620U; 1650} 1651static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) 1652{ 1653 return (r >> 0U) & 0xffU; 1654} 1655static inline u32 gr_fecs_rc_lanes_r(void) 1656{ 1657 return 0x00409880U; 1658} 1659static inline u32 gr_fecs_rc_lanes_num_chains_s(void) 1660{ 1661 return 6U; 1662} 1663static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) 1664{ 1665 return (v & 0x3fU) << 0U; 1666} 1667static inline u32 gr_fecs_rc_lanes_num_chains_m(void) 1668{ 1669 return 0x3fU << 0U; 1670} 1671static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) 1672{ 1673 return (r >> 0U) & 0x3fU; 1674} 1675static inline u32 gr_fecs_ctxsw_status_1_r(void) 1676{ 1677 return 0x00409400U; 1678} 1679static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) 1680{ 1681 return 1U; 1682} 1683static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) 1684{ 1685 return (v & 0x1U) << 12U; 1686} 1687static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) 1688{ 1689 return 0x1U << 12U; 1690} 1691static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) 1692{ 1693 return (r >> 12U) & 0x1U; 1694} 1695static inline u32 gr_fecs_arb_ctx_adr_r(void) 1696{ 1697 return 0x00409a24U; 1698} 1699static inline u32 gr_fecs_new_ctx_r(void) 1700{ 1701 return 0x00409b04U; 1702} 1703static inline u32 gr_fecs_new_ctx_ptr_s(void) 1704{ 1705 return 28U; 1706} 1707static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) 1708{ 1709 return (v & 0xfffffffU) << 0U; 1710} 1711static inline u32 gr_fecs_new_ctx_ptr_m(void) 1712{ 1713 return 0xfffffffU << 0U; 1714} 1715static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) 1716{ 1717 return (r >> 0U) & 0xfffffffU; 1718} 1719static inline u32 gr_fecs_new_ctx_target_s(void) 1720{ 1721 return 2U; 1722} 1723static inline u32 gr_fecs_new_ctx_target_f(u32 v) 1724{ 1725 return (v & 0x3U) << 28U; 1726} 1727static inline u32 gr_fecs_new_ctx_target_m(void) 1728{ 1729 return 0x3U << 28U; 1730} 1731static inline u32 gr_fecs_new_ctx_target_v(u32 r) 1732{ 1733 return (r >> 28U) & 0x3U; 1734} 1735static inline u32 gr_fecs_new_ctx_valid_s(void) 1736{ 1737 return 1U; 1738} 1739static inline u32 gr_fecs_new_ctx_valid_f(u32 v) 1740{ 1741 return (v & 0x1U) << 31U; 1742} 1743static inline u32 gr_fecs_new_ctx_valid_m(void) 1744{ 1745 return 0x1U << 31U; 1746} 1747static inline u32 gr_fecs_new_ctx_valid_v(u32 r) 1748{ 1749 return (r >> 31U) & 0x1U; 1750} 1751static inline u32 gr_fecs_arb_ctx_ptr_r(void) 1752{ 1753 return 0x00409a0cU; 1754} 1755static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) 1756{ 1757 return 28U; 1758} 1759static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) 1760{ 1761 return (v & 0xfffffffU) << 0U; 1762} 1763static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) 1764{ 1765 return 0xfffffffU << 0U; 1766} 1767static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) 1768{ 1769 return (r >> 0U) & 0xfffffffU; 1770} 1771static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) 1772{ 1773 return 2U; 1774} 1775static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) 1776{ 1777 return (v & 0x3U) << 28U; 1778} 1779static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) 1780{ 1781 return 0x3U << 28U; 1782} 1783static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) 1784{ 1785 return (r >> 28U) & 0x3U; 1786} 1787static inline u32 gr_fecs_arb_ctx_cmd_r(void) 1788{ 1789 return 0x00409a10U; 1790} 1791static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) 1792{ 1793 return 5U; 1794} 1795static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) 1796{ 1797 return (v & 0x1fU) << 0U; 1798} 1799static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) 1800{ 1801 return 0x1fU << 0U; 1802} 1803static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) 1804{ 1805 return (r >> 0U) & 0x1fU; 1806} 1807static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) 1808{ 1809 return 0x00409c00U; 1810} 1811static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) 1812{ 1813 return 0x00502c04U; 1814} 1815static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) 1816{ 1817 return 0x00502400U; 1818} 1819static inline u32 gr_gpc0_gpccs_ctxsw_mailbox__size_1_v(void) 1820{ 1821 return 0x00000010U; 1822} 1823static inline u32 gr_fecs_ctxsw_idlestate_r(void) 1824{ 1825 return 0x00409420U; 1826} 1827static inline u32 gr_fecs_feature_override_ecc_r(void) 1828{ 1829 return 0x00409658U; 1830} 1831static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r) 1832{ 1833 return (r >> 3U) & 0x1U; 1834} 1835static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r) 1836{ 1837 return (r >> 15U) & 0x1U; 1838} 1839static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r) 1840{ 1841 return (r >> 0U) & 0x1U; 1842} 1843static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r) 1844{ 1845 return (r >> 12U) & 0x1U; 1846} 1847static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) 1848{ 1849 return 0x00502420U; 1850} 1851static inline u32 gr_rstr2d_gpc_map_r(u32 i) 1852{ 1853 return 0x0040780cU + i*4U; 1854} 1855static inline u32 gr_rstr2d_map_table_cfg_r(void) 1856{ 1857 return 0x004078bcU; 1858} 1859static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) 1860{ 1861 return (v & 0xffU) << 0U; 1862} 1863static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) 1864{ 1865 return (v & 0xffU) << 8U; 1866} 1867static inline u32 gr_pd_hww_esr_r(void) 1868{ 1869 return 0x00406018U; 1870} 1871static inline u32 gr_pd_hww_esr_reset_active_f(void) 1872{ 1873 return 0x40000000U; 1874} 1875static inline u32 gr_pd_hww_esr_en_enable_f(void) 1876{ 1877 return 0x80000000U; 1878} 1879static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) 1880{ 1881 return 0x00406028U + i*4U; 1882} 1883static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) 1884{ 1885 return 0x00000004U; 1886} 1887static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) 1888{ 1889 return (v & 0xfU) << 0U; 1890} 1891static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) 1892{ 1893 return (v & 0xfU) << 4U; 1894} 1895static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) 1896{ 1897 return (v & 0xfU) << 8U; 1898} 1899static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) 1900{ 1901 return (v & 0xfU) << 12U; 1902} 1903static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) 1904{ 1905 return (v & 0xfU) << 16U; 1906} 1907static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) 1908{ 1909 return (v & 0xfU) << 20U; 1910} 1911static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) 1912{ 1913 return (v & 0xfU) << 24U; 1914} 1915static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) 1916{ 1917 return (v & 0xfU) << 28U; 1918} 1919static inline u32 gr_pd_ab_dist_cfg0_r(void) 1920{ 1921 return 0x004064c0U; 1922} 1923static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) 1924{ 1925 return 0x80000000U; 1926} 1927static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) 1928{ 1929 return 0x0U; 1930} 1931static inline u32 gr_pd_ab_dist_cfg1_r(void) 1932{ 1933 return 0x004064c4U; 1934} 1935static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) 1936{ 1937 return 0xffffU; 1938} 1939static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) 1940{ 1941 return (v & 0xffffU) << 16U; 1942} 1943static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) 1944{ 1945 return 0x00000080U; 1946} 1947static inline u32 gr_pd_ab_dist_cfg2_r(void) 1948{ 1949 return 0x004064c8U; 1950} 1951static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) 1952{ 1953 return (v & 0x1fffU) << 0U; 1954} 1955static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) 1956{ 1957 return 0x00001680U; 1958} 1959static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) 1960{ 1961 return (v & 0x1fffU) << 16U; 1962} 1963static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) 1964{ 1965 return 0x00000020U; 1966} 1967static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) 1968{ 1969 return 0x00001680U; 1970} 1971static inline u32 gr_pd_dist_skip_table_r(u32 i) 1972{ 1973 return 0x004064d0U + i*4U; 1974} 1975static inline u32 gr_pd_dist_skip_table__size_1_v(void) 1976{ 1977 return 0x00000008U; 1978} 1979static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) 1980{ 1981 return (v & 0xffU) << 0U; 1982} 1983static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) 1984{ 1985 return (v & 0xffU) << 8U; 1986} 1987static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) 1988{ 1989 return (v & 0xffU) << 16U; 1990} 1991static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) 1992{ 1993 return (v & 0xffU) << 24U; 1994} 1995static inline u32 gr_ds_debug_r(void) 1996{ 1997 return 0x00405800U; 1998} 1999static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) 2000{ 2001 return 0x0U; 2002} 2003static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) 2004{ 2005 return 0x8000000U; 2006} 2007static inline u32 gr_ds_zbc_color_r_r(void) 2008{ 2009 return 0x00405804U; 2010} 2011static inline u32 gr_ds_zbc_color_r_val_f(u32 v) 2012{ 2013 return (v & 0xffffffffU) << 0U; 2014} 2015static inline u32 gr_ds_zbc_color_g_r(void) 2016{ 2017 return 0x00405808U; 2018} 2019static inline u32 gr_ds_zbc_color_g_val_f(u32 v) 2020{ 2021 return (v & 0xffffffffU) << 0U; 2022} 2023static inline u32 gr_ds_zbc_color_b_r(void) 2024{ 2025 return 0x0040580cU; 2026} 2027static inline u32 gr_ds_zbc_color_b_val_f(u32 v) 2028{ 2029 return (v & 0xffffffffU) << 0U; 2030} 2031static inline u32 gr_ds_zbc_color_a_r(void) 2032{ 2033 return 0x00405810U; 2034} 2035static inline u32 gr_ds_zbc_color_a_val_f(u32 v) 2036{ 2037 return (v & 0xffffffffU) << 0U; 2038} 2039static inline u32 gr_ds_zbc_color_fmt_r(void) 2040{ 2041 return 0x00405814U; 2042} 2043static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) 2044{ 2045 return (v & 0x7fU) << 0U; 2046} 2047static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) 2048{ 2049 return 0x0U; 2050} 2051static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) 2052{ 2053 return 0x00000001U; 2054} 2055static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) 2056{ 2057 return 0x00000002U; 2058} 2059static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) 2060{ 2061 return 0x00000004U; 2062} 2063static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) 2064{ 2065 return 0x00000028U; 2066} 2067static inline u32 gr_ds_zbc_z_r(void) 2068{ 2069 return 0x00405818U; 2070} 2071static inline u32 gr_ds_zbc_z_val_s(void) 2072{ 2073 return 32U; 2074} 2075static inline u32 gr_ds_zbc_z_val_f(u32 v) 2076{ 2077 return (v & 0xffffffffU) << 0U; 2078} 2079static inline u32 gr_ds_zbc_z_val_m(void) 2080{ 2081 return 0xffffffffU << 0U; 2082} 2083static inline u32 gr_ds_zbc_z_val_v(u32 r) 2084{ 2085 return (r >> 0U) & 0xffffffffU; 2086} 2087static inline u32 gr_ds_zbc_z_val__init_v(void) 2088{ 2089 return 0x00000000U; 2090} 2091static inline u32 gr_ds_zbc_z_val__init_f(void) 2092{ 2093 return 0x0U; 2094} 2095static inline u32 gr_ds_zbc_z_fmt_r(void) 2096{ 2097 return 0x0040581cU; 2098} 2099static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) 2100{ 2101 return (v & 0x1U) << 0U; 2102} 2103static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) 2104{ 2105 return 0x0U; 2106} 2107static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) 2108{ 2109 return 0x00000001U; 2110} 2111static inline u32 gr_ds_zbc_tbl_index_r(void) 2112{ 2113 return 0x00405820U; 2114} 2115static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) 2116{ 2117 return (v & 0xfU) << 0U; 2118} 2119static inline u32 gr_ds_zbc_tbl_ld_r(void) 2120{ 2121 return 0x00405824U; 2122} 2123static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) 2124{ 2125 return 0x0U; 2126} 2127static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) 2128{ 2129 return 0x1U; 2130} 2131static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) 2132{ 2133 return 0x0U; 2134} 2135static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) 2136{ 2137 return 0x4U; 2138} 2139static inline u32 gr_ds_tga_constraintlogic_beta_r(void) 2140{ 2141 return 0x00405830U; 2142} 2143static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) 2144{ 2145 return (v & 0x3fffffU) << 0U; 2146} 2147static inline u32 gr_ds_tga_constraintlogic_alpha_r(void) 2148{ 2149 return 0x0040585cU; 2150} 2151static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) 2152{ 2153 return (v & 0xffffU) << 0U; 2154} 2155static inline u32 gr_ds_hww_esr_r(void) 2156{ 2157 return 0x00405840U; 2158} 2159static inline u32 gr_ds_hww_esr_reset_s(void) 2160{ 2161 return 1U; 2162} 2163static inline u32 gr_ds_hww_esr_reset_f(u32 v) 2164{ 2165 return (v & 0x1U) << 30U; 2166} 2167static inline u32 gr_ds_hww_esr_reset_m(void) 2168{ 2169 return 0x1U << 30U; 2170} 2171static inline u32 gr_ds_hww_esr_reset_v(u32 r) 2172{ 2173 return (r >> 30U) & 0x1U; 2174} 2175static inline u32 gr_ds_hww_esr_reset_task_v(void) 2176{ 2177 return 0x00000001U; 2178} 2179static inline u32 gr_ds_hww_esr_reset_task_f(void) 2180{ 2181 return 0x40000000U; 2182} 2183static inline u32 gr_ds_hww_esr_en_enabled_f(void) 2184{ 2185 return 0x80000000U; 2186} 2187static inline u32 gr_ds_hww_esr_2_r(void) 2188{ 2189 return 0x00405848U; 2190} 2191static inline u32 gr_ds_hww_esr_2_reset_s(void) 2192{ 2193 return 1U; 2194} 2195static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) 2196{ 2197 return (v & 0x1U) << 30U; 2198} 2199static inline u32 gr_ds_hww_esr_2_reset_m(void) 2200{ 2201 return 0x1U << 30U; 2202} 2203static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) 2204{ 2205 return (r >> 30U) & 0x1U; 2206} 2207static inline u32 gr_ds_hww_esr_2_reset_task_v(void) 2208{ 2209 return 0x00000001U; 2210} 2211static inline u32 gr_ds_hww_esr_2_reset_task_f(void) 2212{ 2213 return 0x40000000U; 2214} 2215static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) 2216{ 2217 return 0x80000000U; 2218} 2219static inline u32 gr_ds_hww_report_mask_r(void) 2220{ 2221 return 0x00405844U; 2222} 2223static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) 2224{ 2225 return 0x1U; 2226} 2227static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) 2228{ 2229 return 0x2U; 2230} 2231static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) 2232{ 2233 return 0x4U; 2234} 2235static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) 2236{ 2237 return 0x8U; 2238} 2239static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) 2240{ 2241 return 0x10U; 2242} 2243static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) 2244{ 2245 return 0x20U; 2246} 2247static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) 2248{ 2249 return 0x40U; 2250} 2251static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) 2252{ 2253 return 0x80U; 2254} 2255static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) 2256{ 2257 return 0x100U; 2258} 2259static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) 2260{ 2261 return 0x200U; 2262} 2263static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) 2264{ 2265 return 0x400U; 2266} 2267static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) 2268{ 2269 return 0x800U; 2270} 2271static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) 2272{ 2273 return 0x1000U; 2274} 2275static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) 2276{ 2277 return 0x2000U; 2278} 2279static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) 2280{ 2281 return 0x4000U; 2282} 2283static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) 2284{ 2285 return 0x8000U; 2286} 2287static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) 2288{ 2289 return 0x10000U; 2290} 2291static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) 2292{ 2293 return 0x20000U; 2294} 2295static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) 2296{ 2297 return 0x40000U; 2298} 2299static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) 2300{ 2301 return 0x80000U; 2302} 2303static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) 2304{ 2305 return 0x100000U; 2306} 2307static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) 2308{ 2309 return 0x200000U; 2310} 2311static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) 2312{ 2313 return 0x400000U; 2314} 2315static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) 2316{ 2317 return 0x800000U; 2318} 2319static inline u32 gr_ds_hww_report_mask_2_r(void) 2320{ 2321 return 0x0040584cU; 2322} 2323static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) 2324{ 2325 return 0x1U; 2326} 2327static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) 2328{ 2329 return 0x00405870U + i*4U; 2330} 2331static inline u32 gr_scc_bundle_cb_base_r(void) 2332{ 2333 return 0x00408004U; 2334} 2335static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) 2336{ 2337 return (v & 0xffffffffU) << 0U; 2338} 2339static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) 2340{ 2341 return 0x00000008U; 2342} 2343static inline u32 gr_scc_bundle_cb_size_r(void) 2344{ 2345 return 0x00408008U; 2346} 2347static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) 2348{ 2349 return (v & 0x7ffU) << 0U; 2350} 2351static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) 2352{ 2353 return 0x00000030U; 2354} 2355static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) 2356{ 2357 return 0x00000100U; 2358} 2359static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) 2360{ 2361 return 0x00000000U; 2362} 2363static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) 2364{ 2365 return 0x0U; 2366} 2367static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) 2368{ 2369 return 0x80000000U; 2370} 2371static inline u32 gr_scc_pagepool_base_r(void) 2372{ 2373 return 0x0040800cU; 2374} 2375static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) 2376{ 2377 return (v & 0xffffffffU) << 0U; 2378} 2379static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) 2380{ 2381 return 0x00000008U; 2382} 2383static inline u32 gr_scc_pagepool_r(void) 2384{ 2385 return 0x00408010U; 2386} 2387static inline u32 gr_scc_pagepool_total_pages_f(u32 v) 2388{ 2389 return (v & 0x3ffU) << 0U; 2390} 2391static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) 2392{ 2393 return 0x00000000U; 2394} 2395static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) 2396{ 2397 return 0x00000200U; 2398} 2399static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) 2400{ 2401 return 0x00000100U; 2402} 2403static inline u32 gr_scc_pagepool_max_valid_pages_s(void) 2404{ 2405 return 10U; 2406} 2407static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) 2408{ 2409 return (v & 0x3ffU) << 10U; 2410} 2411static inline u32 gr_scc_pagepool_max_valid_pages_m(void) 2412{ 2413 return 0x3ffU << 10U; 2414} 2415static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) 2416{ 2417 return (r >> 10U) & 0x3ffU; 2418} 2419static inline u32 gr_scc_pagepool_valid_true_f(void) 2420{ 2421 return 0x80000000U; 2422} 2423static inline u32 gr_scc_init_r(void) 2424{ 2425 return 0x0040802cU; 2426} 2427static inline u32 gr_scc_init_ram_trigger_f(void) 2428{ 2429 return 0x1U; 2430} 2431static inline u32 gr_scc_hww_esr_r(void) 2432{ 2433 return 0x00408030U; 2434} 2435static inline u32 gr_scc_hww_esr_reset_active_f(void) 2436{ 2437 return 0x40000000U; 2438} 2439static inline u32 gr_scc_hww_esr_en_enable_f(void) 2440{ 2441 return 0x80000000U; 2442} 2443static inline u32 gr_ssync_hww_esr_r(void) 2444{ 2445 return 0x00405a14U; 2446} 2447static inline u32 gr_ssync_hww_esr_reset_active_f(void) 2448{ 2449 return 0x40000000U; 2450} 2451static inline u32 gr_ssync_hww_esr_en_enable_f(void) 2452{ 2453 return 0x80000000U; 2454} 2455static inline u32 gr_sked_hww_esr_r(void) 2456{ 2457 return 0x00407020U; 2458} 2459static inline u32 gr_sked_hww_esr_reset_active_f(void) 2460{ 2461 return 0x40000000U; 2462} 2463static inline u32 gr_sked_hww_esr_en_r(void) 2464{ 2465 return 0x00407024U; 2466} 2467static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(void) 2468{ 2469 return 0x1U << 25U; 2470} 2471static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f(void) 2472{ 2473 return 0x0U; 2474} 2475static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f(void) 2476{ 2477 return 0x2000000U; 2478} 2479static inline u32 gr_cwd_fs_r(void) 2480{ 2481 return 0x00405b00U; 2482} 2483static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) 2484{ 2485 return (v & 0xffU) << 0U; 2486} 2487static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) 2488{ 2489 return (v & 0xffU) << 8U; 2490} 2491static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) 2492{ 2493 return 0x00405b60U + i*4U; 2494} 2495static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) 2496{ 2497 return 4U; 2498} 2499static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) 2500{ 2501 return (v & 0xfU) << 0U; 2502} 2503static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) 2504{ 2505 return 4U; 2506} 2507static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) 2508{ 2509 return (v & 0xfU) << 4U; 2510} 2511static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) 2512{ 2513 return (v & 0xfU) << 8U; 2514} 2515static inline u32 gr_cwd_sm_id_r(u32 i) 2516{ 2517 return 0x00405ba0U + i*4U; 2518} 2519static inline u32 gr_cwd_sm_id__size_1_v(void) 2520{ 2521 return 0x00000010U; 2522} 2523static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) 2524{ 2525 return (v & 0xffU) << 0U; 2526} 2527static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) 2528{ 2529 return (v & 0xffU) << 8U; 2530} 2531static inline u32 gr_gpc0_fs_gpc_r(void) 2532{ 2533 return 0x00502608U; 2534} 2535static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) 2536{ 2537 return (r >> 0U) & 0x1fU; 2538} 2539static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) 2540{ 2541 return (r >> 16U) & 0x1fU; 2542} 2543static inline u32 gr_gpc0_cfg_r(void) 2544{ 2545 return 0x00502620U; 2546} 2547static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) 2548{ 2549 return (r >> 0U) & 0xffU; 2550} 2551static inline u32 gr_gpccs_rc_lanes_r(void) 2552{ 2553 return 0x00502880U; 2554} 2555static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) 2556{ 2557 return 6U; 2558} 2559static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) 2560{ 2561 return (v & 0x3fU) << 0U; 2562} 2563static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) 2564{ 2565 return 0x3fU << 0U; 2566} 2567static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) 2568{ 2569 return (r >> 0U) & 0x3fU; 2570} 2571static inline u32 gr_gpccs_rc_lane_size_r(void) 2572{ 2573 return 0x00502910U; 2574} 2575static inline u32 gr_gpccs_rc_lane_size_v_s(void) 2576{ 2577 return 24U; 2578} 2579static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) 2580{ 2581 return (v & 0xffffffU) << 0U; 2582} 2583static inline u32 gr_gpccs_rc_lane_size_v_m(void) 2584{ 2585 return 0xffffffU << 0U; 2586} 2587static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) 2588{ 2589 return (r >> 0U) & 0xffffffU; 2590} 2591static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) 2592{ 2593 return 0x00000000U; 2594} 2595static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) 2596{ 2597 return 0x0U; 2598} 2599static inline u32 gr_gpc0_zcull_fs_r(void) 2600{ 2601 return 0x00500910U; 2602} 2603static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) 2604{ 2605 return (v & 0x1ffU) << 0U; 2606} 2607static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) 2608{ 2609 return (v & 0xfU) << 16U; 2610} 2611static inline u32 gr_gpc0_zcull_ram_addr_r(void) 2612{ 2613 return 0x00500914U; 2614} 2615static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) 2616{ 2617 return (v & 0xfU) << 0U; 2618} 2619static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) 2620{ 2621 return (v & 0xfU) << 8U; 2622} 2623static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) 2624{ 2625 return 0x00500918U; 2626} 2627static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) 2628{ 2629 return (v & 0xffffffU) << 0U; 2630} 2631static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) 2632{ 2633 return 0x00800000U; 2634} 2635static inline u32 gr_gpc0_zcull_total_ram_size_r(void) 2636{ 2637 return 0x00500920U; 2638} 2639static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) 2640{ 2641 return (v & 0xffffU) << 0U; 2642} 2643static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) 2644{ 2645 return 0x00500a04U + i*32U; 2646} 2647static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) 2648{ 2649 return 0x00000040U; 2650} 2651static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) 2652{ 2653 return 0x00000010U; 2654} 2655static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) 2656{ 2657 return 0x00500c10U + i*4U; 2658} 2659static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) 2660{ 2661 return (v & 0xffU) << 0U; 2662} 2663static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) 2664{ 2665 return 0x00500c30U + i*4U; 2666} 2667static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) 2668{ 2669 return (r >> 0U) & 0xffU; 2670} 2671static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) 2672{ 2673 return 0x00504088U; 2674} 2675static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) 2676{ 2677 return (v & 0xffffU) << 0U; 2678} 2679static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) 2680{ 2681 return 0x00504608U; 2682} 2683static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_f(u32 v) 2684{ 2685 return (v & 0xffffU) << 0U; 2686} 2687static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_v(u32 r) 2688{ 2689 return (r >> 0U) & 0xffffU; 2690} 2691static inline u32 gr_gpc0_tpc0_sm_arch_r(void) 2692{ 2693 return 0x00504330U; 2694} 2695static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) 2696{ 2697 return (r >> 0U) & 0xffU; 2698} 2699static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) 2700{ 2701 return (r >> 8U) & 0xfffU; 2702} 2703static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) 2704{ 2705 return (r >> 20U) & 0xfffU; 2706} 2707static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) 2708{ 2709 return 0x00503018U; 2710} 2711static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) 2712{ 2713 return 0x1U << 0U; 2714} 2715static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) 2716{ 2717 return 0x1U; 2718} 2719static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) 2720{ 2721 return 0x005030c0U; 2722} 2723static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) 2724{ 2725 return (v & 0x3fffffU) << 0U; 2726} 2727static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) 2728{ 2729 return 0x3fffffU << 0U; 2730} 2731static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) 2732{ 2733 return 0x00000480U; 2734} 2735static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void) 2736{ 2737 return 0x00000d10U; 2738} 2739static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) 2740{ 2741 return 0x00000020U; 2742} 2743static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) 2744{ 2745 return 0x005030f4U; 2746} 2747static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) 2748{ 2749 return 0x005030e4U; 2750} 2751static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) 2752{ 2753 return (v & 0xffffU) << 0U; 2754} 2755static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) 2756{ 2757 return 0xffffU << 0U; 2758} 2759static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) 2760{ 2761 return 0x00000800U; 2762} 2763static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) 2764{ 2765 return 0x00000020U; 2766} 2767static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) 2768{ 2769 return 0x005030f8U; 2770} 2771static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void) 2772{ 2773 return 0x005030f0U; 2774} 2775static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v) 2776{ 2777 return (v & 0x3fffffU) << 0U; 2778} 2779static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void) 2780{ 2781 return 0x00000480U; 2782} 2783static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void) 2784{ 2785 return 0x00419e00U; 2786} 2787static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v) 2788{ 2789 return (v & 0xffffffffU) << 0U; 2790} 2791static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void) 2792{ 2793 return 0x00419e04U; 2794} 2795static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void) 2796{ 2797 return 21U; 2798} 2799static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v) 2800{ 2801 return (v & 0x1fffffU) << 0U; 2802} 2803static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void) 2804{ 2805 return 0x1fffffU << 0U; 2806} 2807static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r) 2808{ 2809 return (r >> 0U) & 0x1fffffU; 2810} 2811static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void) 2812{ 2813 return 0x80U; 2814} 2815static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void) 2816{ 2817 return 1U; 2818} 2819static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v) 2820{ 2821 return (v & 0x1U) << 31U; 2822} 2823static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void) 2824{ 2825 return 0x1U << 31U; 2826} 2827static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r) 2828{ 2829 return (r >> 31U) & 0x1U; 2830} 2831static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void) 2832{ 2833 return 0x80000000U; 2834} 2835static inline u32 gr_gpccs_falcon_addr_r(void) 2836{ 2837 return 0x0041a0acU; 2838} 2839static inline u32 gr_gpccs_falcon_addr_lsb_s(void) 2840{ 2841 return 6U; 2842} 2843static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) 2844{ 2845 return (v & 0x3fU) << 0U; 2846} 2847static inline u32 gr_gpccs_falcon_addr_lsb_m(void) 2848{ 2849 return 0x3fU << 0U; 2850} 2851static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) 2852{ 2853 return (r >> 0U) & 0x3fU; 2854} 2855static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) 2856{ 2857 return 0x00000000U; 2858} 2859static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) 2860{ 2861 return 0x0U; 2862} 2863static inline u32 gr_gpccs_falcon_addr_msb_s(void) 2864{ 2865 return 6U; 2866} 2867static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) 2868{ 2869 return (v & 0x3fU) << 6U; 2870} 2871static inline u32 gr_gpccs_falcon_addr_msb_m(void) 2872{ 2873 return 0x3fU << 6U; 2874} 2875static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) 2876{ 2877 return (r >> 6U) & 0x3fU; 2878} 2879static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) 2880{ 2881 return 0x00000000U; 2882} 2883static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) 2884{ 2885 return 0x0U; 2886} 2887static inline u32 gr_gpccs_falcon_addr_ext_s(void) 2888{ 2889 return 12U; 2890} 2891static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) 2892{ 2893 return (v & 0xfffU) << 0U; 2894} 2895static inline u32 gr_gpccs_falcon_addr_ext_m(void) 2896{ 2897 return 0xfffU << 0U; 2898} 2899static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) 2900{ 2901 return (r >> 0U) & 0xfffU; 2902} 2903static inline u32 gr_gpccs_cpuctl_r(void) 2904{ 2905 return 0x0041a100U; 2906} 2907static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) 2908{ 2909 return (v & 0x1U) << 1U; 2910} 2911static inline u32 gr_gpccs_dmactl_r(void) 2912{ 2913 return 0x0041a10cU; 2914} 2915static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) 2916{ 2917 return (v & 0x1U) << 0U; 2918} 2919static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) 2920{ 2921 return 0x1U << 1U; 2922} 2923static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) 2924{ 2925 return 0x1U << 2U; 2926} 2927static inline u32 gr_gpccs_imemc_r(u32 i) 2928{ 2929 return 0x0041a180U + i*16U; 2930} 2931static inline u32 gr_gpccs_imemc_offs_f(u32 v) 2932{ 2933 return (v & 0x3fU) << 2U; 2934} 2935static inline u32 gr_gpccs_imemc_blk_f(u32 v) 2936{ 2937 return (v & 0xffU) << 8U; 2938} 2939static inline u32 gr_gpccs_imemc_aincw_f(u32 v) 2940{ 2941 return (v & 0x1U) << 24U; 2942} 2943static inline u32 gr_gpccs_imemd_r(u32 i) 2944{ 2945 return 0x0041a184U + i*16U; 2946} 2947static inline u32 gr_gpccs_imemt_r(u32 i) 2948{ 2949 return 0x0041a188U + i*16U; 2950} 2951static inline u32 gr_gpccs_imemt__size_1_v(void) 2952{ 2953 return 0x00000004U; 2954} 2955static inline u32 gr_gpccs_imemt_tag_f(u32 v) 2956{ 2957 return (v & 0xffffU) << 0U; 2958} 2959static inline u32 gr_gpccs_dmemc_r(u32 i) 2960{ 2961 return 0x0041a1c0U + i*8U; 2962} 2963static inline u32 gr_gpccs_dmemc_offs_f(u32 v) 2964{ 2965 return (v & 0x3fU) << 2U; 2966} 2967static inline u32 gr_gpccs_dmemc_blk_f(u32 v) 2968{ 2969 return (v & 0xffU) << 8U; 2970} 2971static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) 2972{ 2973 return (v & 0x1U) << 24U; 2974} 2975static inline u32 gr_gpccs_dmemd_r(u32 i) 2976{ 2977 return 0x0041a1c4U + i*8U; 2978} 2979static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) 2980{ 2981 return 0x0041a800U + i*4U; 2982} 2983static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) 2984{ 2985 return (v & 0xffffffffU) << 0U; 2986} 2987static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) 2988{ 2989 return 0x00418e24U; 2990} 2991static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) 2992{ 2993 return 32U; 2994} 2995static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) 2996{ 2997 return (v & 0xffffffffU) << 0U; 2998} 2999static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) 3000{ 3001 return 0xffffffffU << 0U; 3002} 3003static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) 3004{ 3005 return (r >> 0U) & 0xffffffffU; 3006} 3007static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) 3008{ 3009 return 0x00000000U; 3010} 3011static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) 3012{ 3013 return 0x0U; 3014} 3015static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) 3016{ 3017 return 0x00418e28U; 3018} 3019static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) 3020{ 3021 return 11U; 3022} 3023static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) 3024{ 3025 return (v & 0x7ffU) << 0U; 3026} 3027static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) 3028{ 3029 return 0x7ffU << 0U; 3030} 3031static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) 3032{ 3033 return (r >> 0U) & 0x7ffU; 3034} 3035static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) 3036{ 3037 return 0x00000030U; 3038} 3039static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) 3040{ 3041 return 0x30U; 3042} 3043static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) 3044{ 3045 return 1U; 3046} 3047static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) 3048{ 3049 return (v & 0x1U) << 31U; 3050} 3051static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) 3052{ 3053 return 0x1U << 31U; 3054} 3055static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) 3056{ 3057 return (r >> 31U) & 0x1U; 3058} 3059static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) 3060{ 3061 return 0x00000000U; 3062} 3063static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) 3064{ 3065 return 0x0U; 3066} 3067static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) 3068{ 3069 return 0x00000001U; 3070} 3071static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) 3072{ 3073 return 0x80000000U; 3074} 3075static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void) 3076{ 3077 return 0x005001dcU; 3078} 3079static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v) 3080{ 3081 return (v & 0xffffU) << 0U; 3082} 3083static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void) 3084{ 3085 return 0x000004b0U; 3086} 3087static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void) 3088{ 3089 return 0x00000100U; 3090} 3091static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void) 3092{ 3093 return 0x005001d8U; 3094} 3095static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v) 3096{ 3097 return (v & 0xffffffffU) << 0U; 3098} 3099static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void) 3100{ 3101 return 0x00000008U; 3102} 3103static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void) 3104{ 3105 return 0x004181e4U; 3106} 3107static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v) 3108{ 3109 return (v & 0xfffU) << 0U; 3110} 3111static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void) 3112{ 3113 return 0x00000100U; 3114} 3115static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void) 3116{ 3117 return 0x0041befcU; 3118} 3119static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v) 3120{ 3121 return (v & 0xfffU) << 0U; 3122} 3123static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) 3124{ 3125 return 0x00418ea0U + i*4U; 3126} 3127static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) 3128{ 3129 return (v & 0x3fffffU) << 0U; 3130} 3131static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) 3132{ 3133 return 0x3fffffU << 0U; 3134} 3135static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i) 3136{ 3137 return 0x00418010U + i*4U; 3138} 3139static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v) 3140{ 3141 return (v & 0xffffffffU) << 0U; 3142} 3143static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i) 3144{ 3145 return 0x0041804cU + i*4U; 3146} 3147static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v) 3148{ 3149 return (v & 0xffffffffU) << 0U; 3150} 3151static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i) 3152{ 3153 return 0x00418088U + i*4U; 3154} 3155static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v) 3156{ 3157 return (v & 0xffffffffU) << 0U; 3158} 3159static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i) 3160{ 3161 return 0x004180c4U + i*4U; 3162} 3163static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v) 3164{ 3165 return (v & 0xffffffffU) << 0U; 3166} 3167static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void) 3168{ 3169 return 0x00418100U; 3170} 3171static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i) 3172{ 3173 return 0x00418110U + i*4U; 3174} 3175static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v) 3176{ 3177 return (v & 0xffffffffU) << 0U; 3178} 3179static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void) 3180{ 3181 return 0x0041814cU; 3182} 3183static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i) 3184{ 3185 return 0x0041815cU + i*4U; 3186} 3187static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v) 3188{ 3189 return (v & 0xffU) << 0U; 3190} 3191static inline u32 gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r(void) 3192{ 3193 return 0x00418198U; 3194} 3195static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) 3196{ 3197 return 0x00418810U; 3198} 3199static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) 3200{ 3201 return (v & 0xfffffffU) << 0U; 3202} 3203static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) 3204{ 3205 return 0x0000000cU; 3206} 3207static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) 3208{ 3209 return 0x80000000U; 3210} 3211static inline u32 gr_crstr_gpc_map_r(u32 i) 3212{ 3213 return 0x00418b08U + i*4U; 3214} 3215static inline u32 gr_crstr_gpc_map_tile0_f(u32 v) 3216{ 3217 return (v & 0x1fU) << 0U; 3218} 3219static inline u32 gr_crstr_gpc_map_tile1_f(u32 v) 3220{ 3221 return (v & 0x1fU) << 5U; 3222} 3223static inline u32 gr_crstr_gpc_map_tile2_f(u32 v) 3224{ 3225 return (v & 0x1fU) << 10U; 3226} 3227static inline u32 gr_crstr_gpc_map_tile3_f(u32 v) 3228{ 3229 return (v & 0x1fU) << 15U; 3230} 3231static inline u32 gr_crstr_gpc_map_tile4_f(u32 v) 3232{ 3233 return (v & 0x1fU) << 20U; 3234} 3235static inline u32 gr_crstr_gpc_map_tile5_f(u32 v) 3236{ 3237 return (v & 0x1fU) << 25U; 3238} 3239static inline u32 gr_crstr_map_table_cfg_r(void) 3240{ 3241 return 0x00418bb8U; 3242} 3243static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) 3244{ 3245 return (v & 0xffU) << 0U; 3246} 3247static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) 3248{ 3249 return (v & 0xffU) << 8U; 3250} 3251static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i) 3252{ 3253 return 0x00418980U + i*4U; 3254} 3255static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v) 3256{ 3257 return (v & 0x7U) << 0U; 3258} 3259static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(u32 v) 3260{ 3261 return (v & 0x7U) << 4U; 3262} 3263static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(u32 v) 3264{ 3265 return (v & 0x7U) << 8U; 3266} 3267static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(u32 v) 3268{ 3269 return (v & 0x7U) << 12U; 3270} 3271static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(u32 v) 3272{ 3273 return (v & 0x7U) << 16U; 3274} 3275static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(u32 v) 3276{ 3277 return (v & 0x7U) << 20U; 3278} 3279static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(u32 v) 3280{ 3281 return (v & 0x7U) << 24U; 3282} 3283static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(u32 v) 3284{ 3285 return (v & 0x7U) << 28U; 3286} 3287static inline u32 gr_gpcs_gpm_pd_cfg_r(void) 3288{ 3289 return 0x00418c6cU; 3290} 3291static inline u32 gr_gpcs_gcc_pagepool_base_r(void) 3292{ 3293 return 0x00419004U; 3294} 3295static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) 3296{ 3297 return (v & 0xffffffffU) << 0U; 3298} 3299static inline u32 gr_gpcs_gcc_pagepool_r(void) 3300{ 3301 return 0x00419008U; 3302} 3303static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) 3304{ 3305 return (v & 0x3ffU) << 0U; 3306} 3307static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) 3308{ 3309 return 0x0041980cU; 3310} 3311static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) 3312{ 3313 return 0x10U; 3314} 3315static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) 3316{ 3317 return 0x00419848U; 3318} 3319static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) 3320{ 3321 return (v & 0xfffffffU) << 0U; 3322} 3323static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) 3324{ 3325 return (v & 0x1U) << 28U; 3326} 3327static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) 3328{ 3329 return 0x10000000U; 3330} 3331static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) 3332{ 3333 return 0x00419c00U; 3334} 3335static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) 3336{ 3337 return 0x0U; 3338} 3339static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) 3340{ 3341 return 0x8U; 3342} 3343static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) 3344{ 3345 return 0x00419c2cU; 3346} 3347static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) 3348{ 3349 return (v & 0xfffffffU) << 0U; 3350} 3351static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) 3352{ 3353 return (v & 0x1U) << 28U; 3354} 3355static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) 3356{ 3357 return 0x10000000U; 3358} 3359static inline u32 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(void) 3360{ 3361 return 0x00419ea8U; 3362} 3363static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r(void) 3364{ 3365 return 0x00504728U; 3366} 3367static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f(void) 3368{ 3369 return 0x2U; 3370} 3371static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f(void) 3372{ 3373 return 0x4U; 3374} 3375static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f(void) 3376{ 3377 return 0x10U; 3378} 3379static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f(void) 3380{ 3381 return 0x20U; 3382} 3383static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f(void) 3384{ 3385 return 0x40U; 3386} 3387static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f(void) 3388{ 3389 return 0x100U; 3390} 3391static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) 3392{ 3393 return 0x200U; 3394} 3395static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) 3396{ 3397 return 0x800U; 3398} 3399static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f(void) 3400{ 3401 return 0x2000U; 3402} 3403static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f(void) 3404{ 3405 return 0x4000U; 3406} 3407static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f(void) 3408{ 3409 return 0x8000U; 3410} 3411static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) 3412{ 3413 return 0x10000U; 3414} 3415static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) 3416{ 3417 return 0x40000U; 3418} 3419static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f(void) 3420{ 3421 return 0x800000U; 3422} 3423static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f(void) 3424{ 3425 return 0x400000U; 3426} 3427static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_nack_report_f(void) 3428{ 3429 return 0x4000000U; 3430} 3431static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) 3432{ 3433 return 0x00419d0cU; 3434} 3435static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) 3436{ 3437 return 0x2U; 3438} 3439static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) 3440{ 3441 return 0x1U; 3442} 3443static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void) 3444{ 3445 return 0x10U; 3446} 3447static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 3448{ 3449 return 0x0050450cU; 3450} 3451static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) 3452{ 3453 return (r >> 1U) & 0x1U; 3454} 3455static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) 3456{ 3457 return 0x2U; 3458} 3459static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void) 3460{ 3461 return 0x10U; 3462} 3463static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) 3464{ 3465 return 0x0041ac94U; 3466} 3467static inline u32 gr_gpcs_gpccs_gpc_exception_en_gcc_f(u32 v) 3468{ 3469 return (v & 0x1U) << 2U; 3470} 3471static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) 3472{ 3473 return (v & 0xffU) << 16U; 3474} 3475static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) 3476{ 3477 return 0x00502c90U; 3478} 3479static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) 3480{ 3481 return (r >> 2U) & 0x1U; 3482} 3483static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) 3484{ 3485 return (r >> 16U) & 0xffU; 3486} 3487static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) 3488{ 3489 return 0x00000001U; 3490} 3491static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) 3492{ 3493 return 0x00504508U; 3494} 3495static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) 3496{ 3497 return (r >> 0U) & 0x1U; 3498} 3499static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) 3500{ 3501 return 0x00000001U; 3502} 3503static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) 3504{ 3505 return (r >> 1U) & 0x1U; 3506} 3507static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) 3508{ 3509 return 0x00000001U; 3510} 3511static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void) 3512{ 3513 return 0x1U << 4U; 3514} 3515static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void) 3516{ 3517 return 0x10U; 3518} 3519static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void) 3520{ 3521 return 0x00504704U; 3522} 3523static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(void) 3524{ 3525 return 0x1U << 0U; 3526} 3527static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(u32 r) 3528{ 3529 return (r >> 0U) & 0x1U; 3530} 3531static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v(void) 3532{ 3533 return 0x00000001U; 3534} 3535static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(void) 3536{ 3537 return 0x1U; 3538} 3539static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_v(void) 3540{ 3541 return 0x00000000U; 3542} 3543static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(void) 3544{ 3545 return 0x0U; 3546} 3547static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(void) 3548{ 3549 return 0x1U << 31U; 3550} 3551static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(void) 3552{ 3553 return 0x80000000U; 3554} 3555static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f(void) 3556{ 3557 return 0x0U; 3558} 3559static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(void) 3560{ 3561 return 0x1U << 3U; 3562} 3563static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f(void) 3564{ 3565 return 0x8U; 3566} 3567static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f(void) 3568{ 3569 return 0x0U; 3570} 3571static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(void) 3572{ 3573 return 0x40000000U; 3574} 3575static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r(void) 3576{ 3577 return 0x00504708U; 3578} 3579static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r(void) 3580{ 3581 return 0x0050470cU; 3582} 3583static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r(void) 3584{ 3585 return 0x00504710U; 3586} 3587static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r(void) 3588{ 3589 return 0x00504714U; 3590} 3591static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r(void) 3592{ 3593 return 0x00504718U; 3594} 3595static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r(void) 3596{ 3597 return 0x0050471cU; 3598} 3599static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(void) 3600{ 3601 return 0x00419e90U; 3602} 3603static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r(void) 3604{ 3605 return 0x00419e94U; 3606} 3607static inline u32 gr_gpcs_tpcs_sms_dbgr_status0_r(void) 3608{ 3609 return 0x00419e80U; 3610} 3611static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_r(void) 3612{ 3613 return 0x00504700U; 3614} 3615static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_sm_in_trap_mode_v(u32 r) 3616{ 3617 return (r >> 0U) & 0x1U; 3618} 3619static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(u32 r) 3620{ 3621 return (r >> 4U) & 0x1U; 3622} 3623static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v(void) 3624{ 3625 return 0x00000001U; 3626} 3627static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_r(void) 3628{ 3629 return 0x00504730U; 3630} 3631static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(u32 r) 3632{ 3633 return (r >> 0U) & 0xffffU; 3634} 3635static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v(void) 3636{ 3637 return 0x00000000U; 3638} 3639static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_f(void) 3640{ 3641 return 0x0U; 3642} 3643static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_stack_error_f(void) 3644{ 3645 return 0x1U; 3646} 3647static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_api_stack_error_f(void) 3648{ 3649 return 0x2U; 3650} 3651static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_pc_wrap_f(void) 3652{ 3653 return 0x4U; 3654} 3655static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_misaligned_pc_f(void) 3656{ 3657 return 0x5U; 3658} 3659static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_pc_overflow_f(void) 3660{ 3661 return 0x6U; 3662} 3663static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_misaligned_reg_f(void) 3664{ 3665 return 0x8U; 3666} 3667static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_illegal_instr_encoding_f(void) 3668{ 3669 return 0x9U; 3670} 3671static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_illegal_instr_param_f(void) 3672{ 3673 return 0xbU; 3674} 3675static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_oor_reg_f(void) 3676{ 3677 return 0xdU; 3678} 3679static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_oor_addr_f(void) 3680{ 3681 return 0xeU; 3682} 3683static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_misaligned_addr_f(void) 3684{ 3685 return 0xfU; 3686} 3687static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_invalid_addr_space_f(void) 3688{ 3689 return 0x10U; 3690} 3691static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_invalid_const_addr_ldc_f(void) 3692{ 3693 return 0x12U; 3694} 3695static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_stack_overflow_f(void) 3696{ 3697 return 0x16U; 3698} 3699static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_mmu_fault_f(void) 3700{ 3701 return 0x17U; 3702} 3703static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_tex_format_f(void) 3704{ 3705 return 0x18U; 3706} 3707static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_tex_layout_f(void) 3708{ 3709 return 0x19U; 3710} 3711static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_mmu_nack_f(void) 3712{ 3713 return 0x20U; 3714} 3715static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m(void) 3716{ 3717 return 0xffU << 16U; 3718} 3719static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(void) 3720{ 3721 return 0xfU << 24U; 3722} 3723static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f(void) 3724{ 3725 return 0x0U; 3726} 3727static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r(void) 3728{ 3729 return 0x0050460cU; 3730} 3731static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(u32 r) 3732{ 3733 return (r >> 0U) & 0x1U; 3734} 3735static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(u32 r) 3736{ 3737 return (r >> 1U) & 0x1U; 3738} 3739static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r(void) 3740{ 3741 return 0x00504738U; 3742} 3743static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_hi_r(void) 3744{ 3745 return 0x0050473cU; 3746} 3747static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) 3748{ 3749 return 0x005043a0U; 3750} 3751static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) 3752{ 3753 return 0x00419ba0U; 3754} 3755static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) 3756{ 3757 return 0x1U << 4U; 3758} 3759static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) 3760{ 3761 return (v & 0x1U) << 4U; 3762} 3763static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) 3764{ 3765 return 0x005043b0U; 3766} 3767static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) 3768{ 3769 return 0x00419bb0U; 3770} 3771static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) 3772{ 3773 return 0x1U << 0U; 3774} 3775static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) 3776{ 3777 return (v & 0x1U) << 0U; 3778} 3779static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) 3780{ 3781 return 0x0041be08U; 3782} 3783static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) 3784{ 3785 return 0x4U; 3786} 3787static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i) 3788{ 3789 return 0x0041bf00U + i*4U; 3790} 3791static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) 3792{ 3793 return 0x0041bfd0U; 3794} 3795static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) 3796{ 3797 return (v & 0xffU) << 0U; 3798} 3799static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) 3800{ 3801 return (v & 0xffU) << 8U; 3802} 3803static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) 3804{ 3805 return (v & 0x1fU) << 16U; 3806} 3807static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) 3808{ 3809 return (v & 0x7U) << 21U; 3810} 3811static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) 3812{ 3813 return 0x0041bfd4U; 3814} 3815static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) 3816{ 3817 return (v & 0xffffffU) << 0U; 3818} 3819static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i) 3820{ 3821 return 0x0041bfb0U + i*4U; 3822} 3823static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void) 3824{ 3825 return 0x00000005U; 3826} 3827static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(u32 v) 3828{ 3829 return (v & 0xffU) << 0U; 3830} 3831static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(u32 v) 3832{ 3833 return (v & 0xffU) << 8U; 3834} 3835static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(u32 v) 3836{ 3837 return (v & 0xffU) << 16U; 3838} 3839static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(u32 v) 3840{ 3841 return (v & 0xffU) << 24U; 3842} 3843static inline u32 gr_bes_zrop_settings_r(void) 3844{ 3845 return 0x00408850U; 3846} 3847static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) 3848{ 3849 return (v & 0xfU) << 0U; 3850} 3851static inline u32 gr_be0_crop_debug3_r(void) 3852{ 3853 return 0x00410108U; 3854} 3855static inline u32 gr_bes_crop_debug3_r(void) 3856{ 3857 return 0x00408908U; 3858} 3859static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) 3860{ 3861 return 0x1U << 31U; 3862} 3863static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void) 3864{ 3865 return 0x1U << 1U; 3866} 3867static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void) 3868{ 3869 return 0x0U; 3870} 3871static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void) 3872{ 3873 return 0x2U; 3874} 3875static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void) 3876{ 3877 return 0x1U << 2U; 3878} 3879static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void) 3880{ 3881 return 0x0U; 3882} 3883static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void) 3884{ 3885 return 0x4U; 3886} 3887static inline u32 gr_bes_crop_debug4_r(void) 3888{ 3889 return 0x0040894cU; 3890} 3891static inline u32 gr_bes_crop_debug4_clamp_fp_blend_m(void) 3892{ 3893 return 0x1U << 18U; 3894} 3895static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_inf_f(void) 3896{ 3897 return 0x0U; 3898} 3899static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_maxval_f(void) 3900{ 3901 return 0x40000U; 3902} 3903static inline u32 gr_bes_crop_settings_r(void) 3904{ 3905 return 0x00408958U; 3906} 3907static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) 3908{ 3909 return (v & 0xfU) << 0U; 3910} 3911static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) 3912{ 3913 return 0x00000020U; 3914} 3915static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) 3916{ 3917 return 0x00000020U; 3918} 3919static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) 3920{ 3921 return 0x000000c0U; 3922} 3923static inline u32 gr_zcull_subregion_qty_v(void) 3924{ 3925 return 0x00000010U; 3926} 3927static inline u32 gr_gpcs_tpcs_tex_in_dbg_r(void) 3928{ 3929 return 0x00419a00U; 3930} 3931static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(u32 v) 3932{ 3933 return (v & 0x1U) << 19U; 3934} 3935static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(void) 3936{ 3937 return 0x1U << 19U; 3938} 3939static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_r(void) 3940{ 3941 return 0x00419bf0U; 3942} 3943static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(u32 v) 3944{ 3945 return (v & 0x1U) << 5U; 3946} 3947static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(void) 3948{ 3949 return 0x1U << 5U; 3950} 3951static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(u32 v) 3952{ 3953 return (v & 0x1U) << 10U; 3954} 3955static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(void) 3956{ 3957 return 0x1U << 10U; 3958} 3959static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_m(void) 3960{ 3961 return 0x1U << 28U; 3962} 3963static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_disable_f(void) 3964{ 3965 return 0x0U; 3966} 3967static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_enable_f(void) 3968{ 3969 return 0x10000000U; 3970} 3971static inline u32 gr_fe_pwr_mode_r(void) 3972{ 3973 return 0x00404170U; 3974} 3975static inline u32 gr_fe_pwr_mode_mode_auto_f(void) 3976{ 3977 return 0x0U; 3978} 3979static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) 3980{ 3981 return 0x2U; 3982} 3983static inline u32 gr_fe_pwr_mode_req_v(u32 r) 3984{ 3985 return (r >> 4U) & 0x1U; 3986} 3987static inline u32 gr_fe_pwr_mode_req_send_f(void) 3988{ 3989 return 0x10U; 3990} 3991static inline u32 gr_fe_pwr_mode_req_done_v(void) 3992{ 3993 return 0x00000000U; 3994} 3995static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) 3996{ 3997 return 0x00418880U; 3998} 3999static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) 4000{ 4001 return 0x1U << 0U; 4002} 4003static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) 4004{ 4005 return 0x1U << 11U; 4006} 4007static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) 4008{ 4009 return 0x1U << 1U; 4010} 4011static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) 4012{ 4013 return 0x1U << 2U; 4014} 4015static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) 4016{ 4017 return 0x3U << 3U; 4018} 4019static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) 4020{ 4021 return 0x3U << 5U; 4022} 4023static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) 4024{ 4025 return 0x3U << 28U; 4026} 4027static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) 4028{ 4029 return 0x1U << 30U; 4030} 4031static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) 4032{ 4033 return 0x1U << 31U; 4034} 4035static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) 4036{ 4037 return 0x00418890U; 4038} 4039static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) 4040{ 4041 return 0x00418894U; 4042} 4043static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) 4044{ 4045 return 0x004188b0U; 4046} 4047static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) 4048{ 4049 return (r >> 16U) & 0x1U; 4050} 4051static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) 4052{ 4053 return 0x00000001U; 4054} 4055static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) 4056{ 4057 return 0x004188b4U; 4058} 4059static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) 4060{ 4061 return 0x004188b8U; 4062} 4063static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) 4064{ 4065 return 0x004188acU; 4066} 4067static inline u32 gr_gpcs_tpcs_sms_dbgr_control0_r(void) 4068{ 4069 return 0x00419e84U; 4070} 4071static inline u32 gr_fe_gfxp_wfi_timeout_r(void) 4072{ 4073 return 0x004041c0U; 4074} 4075static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v) 4076{ 4077 return (v & 0xffffffffU) << 0U; 4078} 4079static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void) 4080{ 4081 return 0x0U; 4082} 4083static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void) 4084{ 4085 return 0x00419bd8U; 4086} 4087static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v) 4088{ 4089 return (v & 0x7U) << 8U; 4090} 4091static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void) 4092{ 4093 return 0x7U << 8U; 4094} 4095static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void) 4096{ 4097 return 0x100U; 4098} 4099static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void) 4100{ 4101 return 0x00419ba4U; 4102} 4103static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void) 4104{ 4105 return 0x3U << 11U; 4106} 4107static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void) 4108{ 4109 return 0x1000U; 4110} 4111static inline u32 gr_gpcs_tc_debug0_r(void) 4112{ 4113 return 0x00418708U; 4114} 4115static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v) 4116{ 4117 return (v & 0x1ffU) << 0U; 4118} 4119static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void) 4120{ 4121 return 0x1ffU << 0U; 4122} 4123#endif
diff --git a/include/nvgpu/hw/gv100/hw_ioctrl_gv100.h b/include/nvgpu/hw/gv100/hw_ioctrl_gv100.h
deleted file mode 100644
index c27e607..0000000
--- a/include/nvgpu/hw/gv100/hw_ioctrl_gv100.h
+++ /dev/null
@@ -1,331 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ioctrl_gv100_h_ 57#define _hw_ioctrl_gv100_h_ 58 59static inline u32 ioctrl_reset_r(void) 60{ 61 return 0x00000140U; 62} 63static inline u32 ioctrl_reset_sw_post_reset_delay_microseconds_v(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ioctrl_reset_linkreset_f(u32 v) 68{ 69 return (v & 0x3fU) << 8U; 70} 71static inline u32 ioctrl_reset_linkreset_m(void) 72{ 73 return 0x3fU << 8U; 74} 75static inline u32 ioctrl_reset_linkreset_v(u32 r) 76{ 77 return (r >> 8U) & 0x3fU; 78} 79static inline u32 ioctrl_debug_reset_r(void) 80{ 81 return 0x00000144U; 82} 83static inline u32 ioctrl_debug_reset_link_f(u32 v) 84{ 85 return (v & 0x3fU) << 0U; 86} 87static inline u32 ioctrl_debug_reset_link_m(void) 88{ 89 return 0x3fU << 0U; 90} 91static inline u32 ioctrl_debug_reset_link_v(u32 r) 92{ 93 return (r >> 0U) & 0x3fU; 94} 95static inline u32 ioctrl_debug_reset_common_f(u32 v) 96{ 97 return (v & 0x1U) << 31U; 98} 99static inline u32 ioctrl_debug_reset_common_m(void) 100{ 101 return 0x1U << 31U; 102} 103static inline u32 ioctrl_debug_reset_common_v(u32 r) 104{ 105 return (r >> 31U) & 0x1U; 106} 107static inline u32 ioctrl_clock_control_r(u32 i) 108{ 109 return 0x00000180U + i*4U; 110} 111static inline u32 ioctrl_clock_control__size_1_v(void) 112{ 113 return 0x00000006U; 114} 115static inline u32 ioctrl_clock_control_clkdis_f(u32 v) 116{ 117 return (v & 0x1U) << 0U; 118} 119static inline u32 ioctrl_clock_control_clkdis_m(void) 120{ 121 return 0x1U << 0U; 122} 123static inline u32 ioctrl_clock_control_clkdis_v(u32 r) 124{ 125 return (r >> 0U) & 0x1U; 126} 127static inline u32 ioctrl_top_intr_0_status_r(void) 128{ 129 return 0x00000200U; 130} 131static inline u32 ioctrl_top_intr_0_status_link_f(u32 v) 132{ 133 return (v & 0x3fU) << 0U; 134} 135static inline u32 ioctrl_top_intr_0_status_link_m(void) 136{ 137 return 0x3fU << 0U; 138} 139static inline u32 ioctrl_top_intr_0_status_link_v(u32 r) 140{ 141 return (r >> 0U) & 0x3fU; 142} 143static inline u32 ioctrl_top_intr_0_status_common_f(u32 v) 144{ 145 return (v & 0x1U) << 31U; 146} 147static inline u32 ioctrl_top_intr_0_status_common_m(void) 148{ 149 return 0x1U << 31U; 150} 151static inline u32 ioctrl_top_intr_0_status_common_v(u32 r) 152{ 153 return (r >> 31U) & 0x1U; 154} 155static inline u32 ioctrl_common_intr_0_mask_r(void) 156{ 157 return 0x00000220U; 158} 159static inline u32 ioctrl_common_intr_0_mask_fatal_f(u32 v) 160{ 161 return (v & 0x1U) << 0U; 162} 163static inline u32 ioctrl_common_intr_0_mask_fatal_v(u32 r) 164{ 165 return (r >> 0U) & 0x1U; 166} 167static inline u32 ioctrl_common_intr_0_mask_nonfatal_f(u32 v) 168{ 169 return (v & 0x1U) << 1U; 170} 171static inline u32 ioctrl_common_intr_0_mask_nonfatal_v(u32 r) 172{ 173 return (r >> 1U) & 0x1U; 174} 175static inline u32 ioctrl_common_intr_0_mask_correctable_f(u32 v) 176{ 177 return (v & 0x1U) << 2U; 178} 179static inline u32 ioctrl_common_intr_0_mask_correctable_v(u32 r) 180{ 181 return (r >> 2U) & 0x1U; 182} 183static inline u32 ioctrl_common_intr_0_mask_intra_f(u32 v) 184{ 185 return (v & 0x1U) << 3U; 186} 187static inline u32 ioctrl_common_intr_0_mask_intra_v(u32 r) 188{ 189 return (r >> 3U) & 0x1U; 190} 191static inline u32 ioctrl_common_intr_0_mask_intrb_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 ioctrl_common_intr_0_mask_intrb_v(u32 r) 196{ 197 return (r >> 4U) & 0x1U; 198} 199static inline u32 ioctrl_common_intr_0_status_r(void) 200{ 201 return 0x00000224U; 202} 203static inline u32 ioctrl_common_intr_0_status_fatal_f(u32 v) 204{ 205 return (v & 0x1U) << 0U; 206} 207static inline u32 ioctrl_common_intr_0_status_fatal_v(u32 r) 208{ 209 return (r >> 0U) & 0x1U; 210} 211static inline u32 ioctrl_common_intr_0_status_nonfatal_f(u32 v) 212{ 213 return (v & 0x1U) << 1U; 214} 215static inline u32 ioctrl_common_intr_0_status_nonfatal_v(u32 r) 216{ 217 return (r >> 1U) & 0x1U; 218} 219static inline u32 ioctrl_common_intr_0_status_correctable_f(u32 v) 220{ 221 return (v & 0x1U) << 2U; 222} 223static inline u32 ioctrl_common_intr_0_status_correctable_v(u32 r) 224{ 225 return (r >> 2U) & 0x1U; 226} 227static inline u32 ioctrl_common_intr_0_status_intra_f(u32 v) 228{ 229 return (v & 0x1U) << 3U; 230} 231static inline u32 ioctrl_common_intr_0_status_intra_v(u32 r) 232{ 233 return (r >> 3U) & 0x1U; 234} 235static inline u32 ioctrl_common_intr_0_status_intrb_f(u32 v) 236{ 237 return (v & 0x1U) << 4U; 238} 239static inline u32 ioctrl_common_intr_0_status_intrb_v(u32 r) 240{ 241 return (r >> 4U) & 0x1U; 242} 243static inline u32 ioctrl_link_intr_0_mask_r(u32 i) 244{ 245 return 0x00000240U + i*20U; 246} 247static inline u32 ioctrl_link_intr_0_mask_fatal_f(u32 v) 248{ 249 return (v & 0x1U) << 0U; 250} 251static inline u32 ioctrl_link_intr_0_mask_fatal_v(u32 r) 252{ 253 return (r >> 0U) & 0x1U; 254} 255static inline u32 ioctrl_link_intr_0_mask_nonfatal_f(u32 v) 256{ 257 return (v & 0x1U) << 1U; 258} 259static inline u32 ioctrl_link_intr_0_mask_nonfatal_v(u32 r) 260{ 261 return (r >> 1U) & 0x1U; 262} 263static inline u32 ioctrl_link_intr_0_mask_correctable_f(u32 v) 264{ 265 return (v & 0x1U) << 2U; 266} 267static inline u32 ioctrl_link_intr_0_mask_correctable_v(u32 r) 268{ 269 return (r >> 2U) & 0x1U; 270} 271static inline u32 ioctrl_link_intr_0_mask_intra_f(u32 v) 272{ 273 return (v & 0x1U) << 3U; 274} 275static inline u32 ioctrl_link_intr_0_mask_intra_v(u32 r) 276{ 277 return (r >> 3U) & 0x1U; 278} 279static inline u32 ioctrl_link_intr_0_mask_intrb_f(u32 v) 280{ 281 return (v & 0x1U) << 4U; 282} 283static inline u32 ioctrl_link_intr_0_mask_intrb_v(u32 r) 284{ 285 return (r >> 4U) & 0x1U; 286} 287static inline u32 ioctrl_link_intr_0_status_r(u32 i) 288{ 289 return 0x00000244U + i*20U; 290} 291static inline u32 ioctrl_link_intr_0_status_fatal_f(u32 v) 292{ 293 return (v & 0x1U) << 0U; 294} 295static inline u32 ioctrl_link_intr_0_status_fatal_v(u32 r) 296{ 297 return (r >> 0U) & 0x1U; 298} 299static inline u32 ioctrl_link_intr_0_status_nonfatal_f(u32 v) 300{ 301 return (v & 0x1U) << 1U; 302} 303static inline u32 ioctrl_link_intr_0_status_nonfatal_v(u32 r) 304{ 305 return (r >> 1U) & 0x1U; 306} 307static inline u32 ioctrl_link_intr_0_status_correctable_f(u32 v) 308{ 309 return (v & 0x1U) << 2U; 310} 311static inline u32 ioctrl_link_intr_0_status_correctable_v(u32 r) 312{ 313 return (r >> 2U) & 0x1U; 314} 315static inline u32 ioctrl_link_intr_0_status_intra_f(u32 v) 316{ 317 return (v & 0x1U) << 3U; 318} 319static inline u32 ioctrl_link_intr_0_status_intra_v(u32 r) 320{ 321 return (r >> 3U) & 0x1U; 322} 323static inline u32 ioctrl_link_intr_0_status_intrb_f(u32 v) 324{ 325 return (v & 0x1U) << 4U; 326} 327static inline u32 ioctrl_link_intr_0_status_intrb_v(u32 r) 328{ 329 return (r >> 4U) & 0x1U; 330} 331#endif
diff --git a/include/nvgpu/hw/gv100/hw_ioctrlmif_gv100.h b/include/nvgpu/hw/gv100/hw_ioctrlmif_gv100.h
deleted file mode 100644
index 5747a9b..0000000
--- a/include/nvgpu/hw/gv100/hw_ioctrlmif_gv100.h
+++ /dev/null
@@ -1,331 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ioctrlmif_gv100_h_ 57#define _hw_ioctrlmif_gv100_h_ 58 59static inline u32 ioctrlmif_rx_err_contain_en_0_r(void) 60{ 61 return 0x00000e0cU; 62} 63static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_f(u32 v) 64{ 65 return (v & 0x1U) << 3U; 66} 67static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_m(void) 68{ 69 return 0x1U << 3U; 70} 71static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_v(u32 r) 72{ 73 return (r >> 3U) & 0x1U; 74} 75static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr__prod_v(void) 76{ 77 return 0x00000001U; 78} 79static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr__prod_f(void) 80{ 81 return 0x8U; 82} 83static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_f(u32 v) 84{ 85 return (v & 0x1U) << 4U; 86} 87static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_m(void) 88{ 89 return 0x1U << 4U; 90} 91static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_v(u32 r) 92{ 93 return (r >> 4U) & 0x1U; 94} 95static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_f(void) 100{ 101 return 0x10U; 102} 103static inline u32 ioctrlmif_rx_err_log_en_0_r(void) 104{ 105 return 0x00000e04U; 106} 107static inline u32 ioctrlmif_rx_err_log_en_0_rxramdataparityerr_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 ioctrlmif_rx_err_log_en_0_rxramdataparityerr_m(void) 112{ 113 return 0x1U << 3U; 114} 115static inline u32 ioctrlmif_rx_err_log_en_0_rxramdataparityerr_v(u32 r) 116{ 117 return (r >> 3U) & 0x1U; 118} 119static inline u32 ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_f(u32 v) 120{ 121 return (v & 0x1U) << 4U; 122} 123static inline u32 ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_m(void) 124{ 125 return 0x1U << 4U; 126} 127static inline u32 ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_v(u32 r) 128{ 129 return (r >> 4U) & 0x1U; 130} 131static inline u32 ioctrlmif_rx_err_report_en_0_r(void) 132{ 133 return 0x00000e08U; 134} 135static inline u32 ioctrlmif_rx_err_report_en_0_rxramdataparityerr_f(u32 v) 136{ 137 return (v & 0x1U) << 3U; 138} 139static inline u32 ioctrlmif_rx_err_report_en_0_rxramdataparityerr_m(void) 140{ 141 return 0x1U << 3U; 142} 143static inline u32 ioctrlmif_rx_err_report_en_0_rxramdataparityerr_v(u32 r) 144{ 145 return (r >> 3U) & 0x1U; 146} 147static inline u32 ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_m(void) 152{ 153 return 0x1U << 4U; 154} 155static inline u32 ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_v(u32 r) 156{ 157 return (r >> 4U) & 0x1U; 158} 159static inline u32 ioctrlmif_rx_err_status_0_r(void) 160{ 161 return 0x00000e00U; 162} 163static inline u32 ioctrlmif_rx_err_status_0_rxramdataparityerr_f(u32 v) 164{ 165 return (v & 0x1U) << 3U; 166} 167static inline u32 ioctrlmif_rx_err_status_0_rxramdataparityerr_m(void) 168{ 169 return 0x1U << 3U; 170} 171static inline u32 ioctrlmif_rx_err_status_0_rxramdataparityerr_v(u32 r) 172{ 173 return (r >> 3U) & 0x1U; 174} 175static inline u32 ioctrlmif_rx_err_status_0_rxramhdrparityerr_f(u32 v) 176{ 177 return (v & 0x1U) << 4U; 178} 179static inline u32 ioctrlmif_rx_err_status_0_rxramhdrparityerr_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 ioctrlmif_rx_err_status_0_rxramhdrparityerr_v(u32 r) 184{ 185 return (r >> 4U) & 0x1U; 186} 187static inline u32 ioctrlmif_rx_err_first_0_r(void) 188{ 189 return 0x00000e14U; 190} 191static inline u32 ioctrlmif_tx_err_contain_en_0_r(void) 192{ 193 return 0x00000a90U; 194} 195static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr_f(u32 v) 196{ 197 return (v & 0x1U) << 0U; 198} 199static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr_m(void) 200{ 201 return 0x1U << 0U; 202} 203static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr_v(u32 r) 204{ 205 return (r >> 0U) & 0x1U; 206} 207static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_v(void) 208{ 209 return 0x00000001U; 210} 211static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_f(void) 212{ 213 return 0x1U; 214} 215static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_f(u32 v) 216{ 217 return (v & 0x1U) << 1U; 218} 219static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_m(void) 220{ 221 return 0x1U << 1U; 222} 223static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_v(u32 r) 224{ 225 return (r >> 1U) & 0x1U; 226} 227static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_v(void) 228{ 229 return 0x00000001U; 230} 231static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_f(void) 232{ 233 return 0x2U; 234} 235static inline u32 ioctrlmif_tx_err_log_en_0_r(void) 236{ 237 return 0x00000a88U; 238} 239static inline u32 ioctrlmif_tx_err_log_en_0_txramdataparityerr_f(u32 v) 240{ 241 return (v & 0x1U) << 0U; 242} 243static inline u32 ioctrlmif_tx_err_log_en_0_txramdataparityerr_m(void) 244{ 245 return 0x1U << 0U; 246} 247static inline u32 ioctrlmif_tx_err_log_en_0_txramdataparityerr_v(u32 r) 248{ 249 return (r >> 0U) & 0x1U; 250} 251static inline u32 ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(u32 v) 252{ 253 return (v & 0x1U) << 1U; 254} 255static inline u32 ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m(void) 256{ 257 return 0x1U << 1U; 258} 259static inline u32 ioctrlmif_tx_err_log_en_0_txramhdrparityerr_v(u32 r) 260{ 261 return (r >> 1U) & 0x1U; 262} 263static inline u32 ioctrlmif_tx_err_report_en_0_r(void) 264{ 265 return 0x00000e08U; 266} 267static inline u32 ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(u32 v) 268{ 269 return (v & 0x1U) << 0U; 270} 271static inline u32 ioctrlmif_tx_err_report_en_0_txramdataparityerr_m(void) 272{ 273 return 0x1U << 0U; 274} 275static inline u32 ioctrlmif_tx_err_report_en_0_txramdataparityerr_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 ioctrlmif_tx_err_report_en_0_txramhdrparityerr_f(u32 v) 280{ 281 return (v & 0x1U) << 1U; 282} 283static inline u32 ioctrlmif_tx_err_report_en_0_txramhdrparityerr_m(void) 284{ 285 return 0x1U << 1U; 286} 287static inline u32 ioctrlmif_tx_err_report_en_0_txramhdrparityerr_v(u32 r) 288{ 289 return (r >> 1U) & 0x1U; 290} 291static inline u32 ioctrlmif_tx_err_status_0_r(void) 292{ 293 return 0x00000a84U; 294} 295static inline u32 ioctrlmif_tx_err_status_0_txramdataparityerr_f(u32 v) 296{ 297 return (v & 0x1U) << 0U; 298} 299static inline u32 ioctrlmif_tx_err_status_0_txramdataparityerr_m(void) 300{ 301 return 0x1U << 0U; 302} 303static inline u32 ioctrlmif_tx_err_status_0_txramdataparityerr_v(u32 r) 304{ 305 return (r >> 0U) & 0x1U; 306} 307static inline u32 ioctrlmif_tx_err_status_0_txramhdrparityerr_f(u32 v) 308{ 309 return (v & 0x1U) << 1U; 310} 311static inline u32 ioctrlmif_tx_err_status_0_txramhdrparityerr_m(void) 312{ 313 return 0x1U << 1U; 314} 315static inline u32 ioctrlmif_tx_err_status_0_txramhdrparityerr_v(u32 r) 316{ 317 return (r >> 1U) & 0x1U; 318} 319static inline u32 ioctrlmif_tx_err_first_0_r(void) 320{ 321 return 0x00000a98U; 322} 323static inline u32 ioctrlmif_tx_ctrl_buffer_ready_r(void) 324{ 325 return 0x00000a7cU; 326} 327static inline u32 ioctrlmif_rx_ctrl_buffer_ready_r(void) 328{ 329 return 0x00000dfcU; 330} 331#endif
diff --git a/include/nvgpu/hw/gv100/hw_ltc_gv100.h b/include/nvgpu/hw/gv100/hw_ltc_gv100.h
deleted file mode 100644
index 042cb7d..0000000
--- a/include/nvgpu/hw/gv100/hw_ltc_gv100.h
+++ /dev/null
@@ -1,631 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ltc_gv100_h_ 57#define _hw_ltc_gv100_h_ 58 59static inline u32 ltc_pltcg_base_v(void) 60{ 61 return 0x00140000U; 62} 63static inline u32 ltc_pltcg_extent_v(void) 64{ 65 return 0x0017ffffU; 66} 67static inline u32 ltc_ltc0_ltss_v(void) 68{ 69 return 0x00140200U; 70} 71static inline u32 ltc_ltc0_lts0_v(void) 72{ 73 return 0x00140400U; 74} 75static inline u32 ltc_ltcs_ltss_v(void) 76{ 77 return 0x0017e200U; 78} 79static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) 80{ 81 return 0x0014046cU; 82} 83static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) 84{ 85 return 0x00140518U; 86} 87static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) 88{ 89 return 0x0017e318U; 90} 91static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) 92{ 93 return 0x1U << 15U; 94} 95static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) 96{ 97 return 0x00140494U; 98} 99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) 100{ 101 return (r >> 0U) & 0xffffU; 102} 103static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) 104{ 105 return (r >> 16U) & 0x3U; 106} 107static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) 108{ 109 return 0x00000000U; 110} 111static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) 120{ 121 return 0x0017e26cU; 122} 123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) 124{ 125 return 0x1U; 126} 127static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) 128{ 129 return 0x2U; 130} 131static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) 132{ 133 return (r >> 2U) & 0x1U; 134} 135static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) 140{ 141 return 0x4U; 142} 143static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) 144{ 145 return 0x0014046cU; 146} 147static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) 148{ 149 return 0x0017e270U; 150} 151static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) 152{ 153 return (v & 0x3ffffU) << 0U; 154} 155static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) 156{ 157 return 0x0017e274U; 158} 159static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) 160{ 161 return (v & 0x3ffffU) << 0U; 162} 163static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) 164{ 165 return 0x0003ffffU; 166} 167static inline u32 ltc_ltcs_ltss_cbc_base_r(void) 168{ 169 return 0x0017e278U; 170} 171static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) 172{ 173 return 0x0000000bU; 174} 175static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) 176{ 177 return (r >> 0U) & 0x3ffffffU; 178} 179static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) 180{ 181 return 0x0017e27cU; 182} 183static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r) 184{ 185 return (r >> 0U) & 0x1fU; 186} 187static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v) 188{ 189 return (v & 0x1U) << 24U; 190} 191static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r) 192{ 193 return (r >> 24U) & 0x1U; 194} 195static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v) 196{ 197 return (v & 0x1U) << 25U; 198} 199static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r) 200{ 201 return (r >> 25U) & 0x1U; 202} 203static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) 204{ 205 return 0x0017e000U; 206} 207static inline u32 ltc_ltcs_ltss_cbc_param_r(void) 208{ 209 return 0x0017e280U; 210} 211static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) 212{ 213 return (r >> 0U) & 0xffffU; 214} 215static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) 216{ 217 return (r >> 24U) & 0xfU; 218} 219static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) 220{ 221 return (r >> 28U) & 0xfU; 222} 223static inline u32 ltc_ltcs_ltss_cbc_param2_r(void) 224{ 225 return 0x0017e3f4U; 226} 227static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r) 228{ 229 return (r >> 0U) & 0xffffU; 230} 231static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) 232{ 233 return 0x0017e2acU; 234} 235static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) 236{ 237 return (v & 0x1fU) << 16U; 238} 239static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) 240{ 241 return 0x0017e338U; 242} 243static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) 244{ 245 return (v & 0xfU) << 0U; 246} 247static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) 248{ 249 return 0x0017e33cU + i*4U; 250} 251static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) 252{ 253 return 0x00000004U; 254} 255static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) 256{ 257 return 0x0017e34cU; 258} 259static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) 260{ 261 return 32U; 262} 263static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) 264{ 265 return (v & 0xffffffffU) << 0U; 266} 267static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) 268{ 269 return 0xffffffffU << 0U; 270} 271static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) 272{ 273 return (r >> 0U) & 0xffffffffU; 274} 275static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void) 276{ 277 return 0x0017e204U; 278} 279static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void) 280{ 281 return 8U; 282} 283static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v) 284{ 285 return (v & 0xffU) << 0U; 286} 287static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void) 288{ 289 return 0xffU << 0U; 290} 291static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r) 292{ 293 return (r >> 0U) & 0xffU; 294} 295static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) 296{ 297 return 0x0017e2b0U; 298} 299static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) 300{ 301 return 0x10000000U; 302} 303static inline u32 ltc_ltcs_ltss_g_elpg_r(void) 304{ 305 return 0x0017e214U; 306} 307static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) 308{ 309 return (r >> 0U) & 0x1U; 310} 311static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) 312{ 313 return 0x00000001U; 314} 315static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) 316{ 317 return 0x1U; 318} 319static inline u32 ltc_ltc0_ltss_g_elpg_r(void) 320{ 321 return 0x00140214U; 322} 323static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) 324{ 325 return (r >> 0U) & 0x1U; 326} 327static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) 328{ 329 return 0x00000001U; 330} 331static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) 332{ 333 return 0x1U; 334} 335static inline u32 ltc_ltc1_ltss_g_elpg_r(void) 336{ 337 return 0x00142214U; 338} 339static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) 340{ 341 return (r >> 0U) & 0x1U; 342} 343static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) 344{ 345 return 0x00000001U; 346} 347static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) 348{ 349 return 0x1U; 350} 351static inline u32 ltc_ltcs_ltss_intr_r(void) 352{ 353 return 0x0017e20cU; 354} 355static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void) 356{ 357 return 0x100U; 358} 359static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void) 360{ 361 return 0x200U; 362} 363static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) 364{ 365 return 0x1U << 20U; 366} 367static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_m(void) 368{ 369 return 0x1U << 21U; 370} 371static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f(void) 372{ 373 return 0x200000U; 374} 375static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f(void) 376{ 377 return 0x0U; 378} 379static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) 380{ 381 return 0x1U << 30U; 382} 383static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void) 384{ 385 return 0x1000000U; 386} 387static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void) 388{ 389 return 0x2000000U; 390} 391static inline u32 ltc_ltc0_lts0_intr_r(void) 392{ 393 return 0x0014040cU; 394} 395static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void) 396{ 397 return 0x0014051cU; 398} 399static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void) 400{ 401 return 0xffU << 0U; 402} 403static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r) 404{ 405 return (r >> 0U) & 0xffU; 406} 407static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void) 408{ 409 return 0xffU << 16U; 410} 411static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r) 412{ 413 return (r >> 16U) & 0xffU; 414} 415static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) 416{ 417 return 0x0017e2a0U; 418} 419static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) 420{ 421 return (r >> 0U) & 0x1U; 422} 423static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) 424{ 425 return 0x00000001U; 426} 427static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) 428{ 429 return 0x1U; 430} 431static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) 432{ 433 return (r >> 8U) & 0xfU; 434} 435static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) 436{ 437 return 0x00000003U; 438} 439static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) 440{ 441 return 0x300U; 442} 443static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) 444{ 445 return (r >> 28U) & 0x1U; 446} 447static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) 448{ 449 return 0x00000001U; 450} 451static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) 452{ 453 return 0x10000000U; 454} 455static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) 456{ 457 return (r >> 29U) & 0x1U; 458} 459static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) 460{ 461 return 0x00000001U; 462} 463static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) 464{ 465 return 0x20000000U; 466} 467static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) 468{ 469 return (r >> 30U) & 0x1U; 470} 471static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) 472{ 473 return 0x00000001U; 474} 475static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) 476{ 477 return 0x40000000U; 478} 479static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) 480{ 481 return 0x0017e2a4U; 482} 483static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) 484{ 485 return (r >> 0U) & 0x1U; 486} 487static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) 488{ 489 return 0x00000001U; 490} 491static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) 492{ 493 return 0x1U; 494} 495static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) 496{ 497 return (r >> 8U) & 0xfU; 498} 499static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) 500{ 501 return 0x00000003U; 502} 503static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) 504{ 505 return 0x300U; 506} 507static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) 508{ 509 return (r >> 16U) & 0x1U; 510} 511static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) 512{ 513 return 0x00000001U; 514} 515static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) 516{ 517 return 0x10000U; 518} 519static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) 520{ 521 return (r >> 28U) & 0x1U; 522} 523static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) 524{ 525 return 0x00000001U; 526} 527static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) 528{ 529 return 0x10000000U; 530} 531static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) 532{ 533 return (r >> 29U) & 0x1U; 534} 535static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) 536{ 537 return 0x00000001U; 538} 539static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) 540{ 541 return 0x20000000U; 542} 543static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) 544{ 545 return (r >> 30U) & 0x1U; 546} 547static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) 548{ 549 return 0x00000001U; 550} 551static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) 552{ 553 return 0x40000000U; 554} 555static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) 556{ 557 return 0x001402a0U; 558} 559static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) 560{ 561 return (r >> 0U) & 0x1U; 562} 563static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) 564{ 565 return 0x00000001U; 566} 567static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) 568{ 569 return 0x1U; 570} 571static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) 572{ 573 return 0x001402a4U; 574} 575static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) 576{ 577 return (r >> 0U) & 0x1U; 578} 579static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) 580{ 581 return 0x00000001U; 582} 583static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) 584{ 585 return 0x1U; 586} 587static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) 588{ 589 return 0x001422a0U; 590} 591static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) 592{ 593 return (r >> 0U) & 0x1U; 594} 595static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) 596{ 597 return 0x00000001U; 598} 599static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) 600{ 601 return 0x1U; 602} 603static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) 604{ 605 return 0x001422a4U; 606} 607static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) 608{ 609 return (r >> 0U) & 0x1U; 610} 611static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) 612{ 613 return 0x00000001U; 614} 615static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) 616{ 617 return 0x1U; 618} 619static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void) 620{ 621 return 0x0014058cU; 622} 623static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r) 624{ 625 return (r >> 0U) & 0xffffU; 626} 627static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r) 628{ 629 return (r >> 16U) & 0x1fU; 630} 631#endif
diff --git a/include/nvgpu/hw/gv100/hw_mc_gv100.h b/include/nvgpu/hw/gv100/hw_mc_gv100.h
deleted file mode 100644
index cf406c3..0000000
--- a/include/nvgpu/hw/gv100/hw_mc_gv100.h
+++ /dev/null
@@ -1,259 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_mc_gv100_h_ 57#define _hw_mc_gv100_h_ 58 59static inline u32 mc_boot_0_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 mc_boot_0_architecture_v(u32 r) 64{ 65 return (r >> 24U) & 0x1fU; 66} 67static inline u32 mc_boot_0_implementation_v(u32 r) 68{ 69 return (r >> 20U) & 0xfU; 70} 71static inline u32 mc_boot_0_major_revision_v(u32 r) 72{ 73 return (r >> 4U) & 0xfU; 74} 75static inline u32 mc_boot_0_minor_revision_v(u32 r) 76{ 77 return (r >> 0U) & 0xfU; 78} 79static inline u32 mc_intr_r(u32 i) 80{ 81 return 0x00000100U + i*4U; 82} 83static inline u32 mc_intr_pfifo_pending_f(void) 84{ 85 return 0x100U; 86} 87static inline u32 mc_intr_hub_pending_f(void) 88{ 89 return 0x200U; 90} 91static inline u32 mc_intr_pgraph_pending_f(void) 92{ 93 return 0x1000U; 94} 95static inline u32 mc_intr_pmu_pending_f(void) 96{ 97 return 0x1000000U; 98} 99static inline u32 mc_intr_ltc_pending_f(void) 100{ 101 return 0x2000000U; 102} 103static inline u32 mc_intr_priv_ring_pending_f(void) 104{ 105 return 0x40000000U; 106} 107static inline u32 mc_intr_pbus_pending_f(void) 108{ 109 return 0x10000000U; 110} 111static inline u32 mc_intr_nvlink_pending_f(void) 112{ 113 return 0x400000U; 114} 115static inline u32 mc_intr_en_r(u32 i) 116{ 117 return 0x00000140U + i*4U; 118} 119static inline u32 mc_intr_en_set_r(u32 i) 120{ 121 return 0x00000160U + i*4U; 122} 123static inline u32 mc_intr_en_clear_r(u32 i) 124{ 125 return 0x00000180U + i*4U; 126} 127static inline u32 mc_enable_r(void) 128{ 129 return 0x00000200U; 130} 131static inline u32 mc_enable_xbar_enabled_f(void) 132{ 133 return 0x4U; 134} 135static inline u32 mc_enable_l2_enabled_f(void) 136{ 137 return 0x8U; 138} 139static inline u32 mc_enable_pmedia_s(void) 140{ 141 return 1U; 142} 143static inline u32 mc_enable_pmedia_f(u32 v) 144{ 145 return (v & 0x1U) << 4U; 146} 147static inline u32 mc_enable_pmedia_m(void) 148{ 149 return 0x1U << 4U; 150} 151static inline u32 mc_enable_pmedia_v(u32 r) 152{ 153 return (r >> 4U) & 0x1U; 154} 155static inline u32 mc_enable_ce0_m(void) 156{ 157 return 0x1U << 6U; 158} 159static inline u32 mc_enable_pfifo_enabled_f(void) 160{ 161 return 0x100U; 162} 163static inline u32 mc_enable_pgraph_enabled_f(void) 164{ 165 return 0x1000U; 166} 167static inline u32 mc_enable_pwr_v(u32 r) 168{ 169 return (r >> 13U) & 0x1U; 170} 171static inline u32 mc_enable_pwr_disabled_v(void) 172{ 173 return 0x00000000U; 174} 175static inline u32 mc_enable_pwr_enabled_f(void) 176{ 177 return 0x2000U; 178} 179static inline u32 mc_enable_pfb_enabled_f(void) 180{ 181 return 0x100000U; 182} 183static inline u32 mc_enable_ce2_m(void) 184{ 185 return 0x1U << 21U; 186} 187static inline u32 mc_enable_ce2_enabled_f(void) 188{ 189 return 0x200000U; 190} 191static inline u32 mc_enable_blg_enabled_f(void) 192{ 193 return 0x8000000U; 194} 195static inline u32 mc_enable_perfmon_enabled_f(void) 196{ 197 return 0x10000000U; 198} 199static inline u32 mc_enable_hub_enabled_f(void) 200{ 201 return 0x20000000U; 202} 203static inline u32 mc_enable_nvdec_disabled_v(void) 204{ 205 return 0x00000000U; 206} 207static inline u32 mc_enable_nvdec_enabled_f(void) 208{ 209 return 0x8000U; 210} 211static inline u32 mc_enable_nvlink_disabled_v(void) 212{ 213 return 0x00000000U; 214} 215static inline u32 mc_enable_nvlink_disabled_f(void) 216{ 217 return 0x0U; 218} 219static inline u32 mc_enable_nvlink_enabled_v(void) 220{ 221 return 0x00000001U; 222} 223static inline u32 mc_enable_nvlink_enabled_f(void) 224{ 225 return 0x2000000U; 226} 227static inline u32 mc_intr_ltc_r(void) 228{ 229 return 0x000001c0U; 230} 231static inline u32 mc_enable_pb_r(void) 232{ 233 return 0x00000204U; 234} 235static inline u32 mc_enable_pb_0_s(void) 236{ 237 return 1U; 238} 239static inline u32 mc_enable_pb_0_f(u32 v) 240{ 241 return (v & 0x1U) << 0U; 242} 243static inline u32 mc_enable_pb_0_m(void) 244{ 245 return 0x1U << 0U; 246} 247static inline u32 mc_enable_pb_0_v(u32 r) 248{ 249 return (r >> 0U) & 0x1U; 250} 251static inline u32 mc_enable_pb_0_enabled_v(void) 252{ 253 return 0x00000001U; 254} 255static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) 256{ 257 return (v & 0x1U) << (0U + i*1U); 258} 259#endif
diff --git a/include/nvgpu/hw/gv100/hw_minion_gv100.h b/include/nvgpu/hw/gv100/hw_minion_gv100.h
deleted file mode 100644
index e4bbf23..0000000
--- a/include/nvgpu/hw/gv100/hw_minion_gv100.h
+++ /dev/null
@@ -1,943 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_minion_gv100_h_ 57#define _hw_minion_gv100_h_ 58 59static inline u32 minion_minion_status_r(void) 60{ 61 return 0x00000830U; 62} 63static inline u32 minion_minion_status_status_f(u32 v) 64{ 65 return (v & 0xffU) << 0U; 66} 67static inline u32 minion_minion_status_status_m(void) 68{ 69 return 0xffU << 0U; 70} 71static inline u32 minion_minion_status_status_v(u32 r) 72{ 73 return (r >> 0U) & 0xffU; 74} 75static inline u32 minion_minion_status_status_boot_v(void) 76{ 77 return 0x00000001U; 78} 79static inline u32 minion_minion_status_status_boot_f(void) 80{ 81 return 0x1U; 82} 83static inline u32 minion_minion_status_intr_code_f(u32 v) 84{ 85 return (v & 0xffffffU) << 8U; 86} 87static inline u32 minion_minion_status_intr_code_m(void) 88{ 89 return 0xffffffU << 8U; 90} 91static inline u32 minion_minion_status_intr_code_v(u32 r) 92{ 93 return (r >> 8U) & 0xffffffU; 94} 95static inline u32 minion_falcon_irqstat_r(void) 96{ 97 return 0x00000008U; 98} 99static inline u32 minion_falcon_irqstat_halt_f(u32 v) 100{ 101 return (v & 0x1U) << 4U; 102} 103static inline u32 minion_falcon_irqstat_halt_v(u32 r) 104{ 105 return (r >> 4U) & 0x1U; 106} 107static inline u32 minion_falcon_irqstat_exterr_f(u32 v) 108{ 109 return (v & 0x1U) << 5U; 110} 111static inline u32 minion_falcon_irqstat_exterr_v(u32 r) 112{ 113 return (r >> 5U) & 0x1U; 114} 115static inline u32 minion_falcon_irqstat_exterr_true_v(void) 116{ 117 return 0x00000001U; 118} 119static inline u32 minion_falcon_irqstat_exterr_true_f(void) 120{ 121 return 0x20U; 122} 123static inline u32 minion_falcon_irqmask_r(void) 124{ 125 return 0x00000018U; 126} 127static inline u32 minion_falcon_irqsclr_r(void) 128{ 129 return 0x00000004U; 130} 131static inline u32 minion_falcon_irqsset_r(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 minion_falcon_irqmset_r(void) 136{ 137 return 0x00000010U; 138} 139static inline u32 minion_falcon_irqmset_wdtmr_f(u32 v) 140{ 141 return (v & 0x1U) << 1U; 142} 143static inline u32 minion_falcon_irqmset_wdtmr_m(void) 144{ 145 return 0x1U << 1U; 146} 147static inline u32 minion_falcon_irqmset_wdtmr_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 minion_falcon_irqmset_wdtmr_set_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 minion_falcon_irqmset_wdtmr_set_f(void) 156{ 157 return 0x2U; 158} 159static inline u32 minion_falcon_irqmset_halt_f(u32 v) 160{ 161 return (v & 0x1U) << 4U; 162} 163static inline u32 minion_falcon_irqmset_halt_m(void) 164{ 165 return 0x1U << 4U; 166} 167static inline u32 minion_falcon_irqmset_halt_v(u32 r) 168{ 169 return (r >> 4U) & 0x1U; 170} 171static inline u32 minion_falcon_irqmset_halt_set_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 minion_falcon_irqmset_halt_set_f(void) 176{ 177 return 0x10U; 178} 179static inline u32 minion_falcon_irqmset_exterr_f(u32 v) 180{ 181 return (v & 0x1U) << 5U; 182} 183static inline u32 minion_falcon_irqmset_exterr_m(void) 184{ 185 return 0x1U << 5U; 186} 187static inline u32 minion_falcon_irqmset_exterr_v(u32 r) 188{ 189 return (r >> 5U) & 0x1U; 190} 191static inline u32 minion_falcon_irqmset_exterr_set_v(void) 192{ 193 return 0x00000001U; 194} 195static inline u32 minion_falcon_irqmset_exterr_set_f(void) 196{ 197 return 0x20U; 198} 199static inline u32 minion_falcon_irqmset_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 minion_falcon_irqmset_swgen0_m(void) 204{ 205 return 0x1U << 6U; 206} 207static inline u32 minion_falcon_irqmset_swgen0_v(u32 r) 208{ 209 return (r >> 6U) & 0x1U; 210} 211static inline u32 minion_falcon_irqmset_swgen0_set_v(void) 212{ 213 return 0x00000001U; 214} 215static inline u32 minion_falcon_irqmset_swgen0_set_f(void) 216{ 217 return 0x40U; 218} 219static inline u32 minion_falcon_irqmset_swgen1_f(u32 v) 220{ 221 return (v & 0x1U) << 7U; 222} 223static inline u32 minion_falcon_irqmset_swgen1_m(void) 224{ 225 return 0x1U << 7U; 226} 227static inline u32 minion_falcon_irqmset_swgen1_v(u32 r) 228{ 229 return (r >> 7U) & 0x1U; 230} 231static inline u32 minion_falcon_irqmset_swgen1_set_v(void) 232{ 233 return 0x00000001U; 234} 235static inline u32 minion_falcon_irqmset_swgen1_set_f(void) 236{ 237 return 0x80U; 238} 239static inline u32 minion_falcon_irqdest_r(void) 240{ 241 return 0x0000001cU; 242} 243static inline u32 minion_falcon_irqdest_host_wdtmr_f(u32 v) 244{ 245 return (v & 0x1U) << 1U; 246} 247static inline u32 minion_falcon_irqdest_host_wdtmr_m(void) 248{ 249 return 0x1U << 1U; 250} 251static inline u32 minion_falcon_irqdest_host_wdtmr_v(u32 r) 252{ 253 return (r >> 1U) & 0x1U; 254} 255static inline u32 minion_falcon_irqdest_host_wdtmr_host_v(void) 256{ 257 return 0x00000001U; 258} 259static inline u32 minion_falcon_irqdest_host_wdtmr_host_f(void) 260{ 261 return 0x2U; 262} 263static inline u32 minion_falcon_irqdest_host_halt_f(u32 v) 264{ 265 return (v & 0x1U) << 4U; 266} 267static inline u32 minion_falcon_irqdest_host_halt_m(void) 268{ 269 return 0x1U << 4U; 270} 271static inline u32 minion_falcon_irqdest_host_halt_v(u32 r) 272{ 273 return (r >> 4U) & 0x1U; 274} 275static inline u32 minion_falcon_irqdest_host_halt_host_v(void) 276{ 277 return 0x00000001U; 278} 279static inline u32 minion_falcon_irqdest_host_halt_host_f(void) 280{ 281 return 0x10U; 282} 283static inline u32 minion_falcon_irqdest_host_exterr_f(u32 v) 284{ 285 return (v & 0x1U) << 5U; 286} 287static inline u32 minion_falcon_irqdest_host_exterr_m(void) 288{ 289 return 0x1U << 5U; 290} 291static inline u32 minion_falcon_irqdest_host_exterr_v(u32 r) 292{ 293 return (r >> 5U) & 0x1U; 294} 295static inline u32 minion_falcon_irqdest_host_exterr_host_v(void) 296{ 297 return 0x00000001U; 298} 299static inline u32 minion_falcon_irqdest_host_exterr_host_f(void) 300{ 301 return 0x20U; 302} 303static inline u32 minion_falcon_irqdest_host_swgen0_f(u32 v) 304{ 305 return (v & 0x1U) << 6U; 306} 307static inline u32 minion_falcon_irqdest_host_swgen0_m(void) 308{ 309 return 0x1U << 6U; 310} 311static inline u32 minion_falcon_irqdest_host_swgen0_v(u32 r) 312{ 313 return (r >> 6U) & 0x1U; 314} 315static inline u32 minion_falcon_irqdest_host_swgen0_host_v(void) 316{ 317 return 0x00000001U; 318} 319static inline u32 minion_falcon_irqdest_host_swgen0_host_f(void) 320{ 321 return 0x40U; 322} 323static inline u32 minion_falcon_irqdest_host_swgen1_f(u32 v) 324{ 325 return (v & 0x1U) << 7U; 326} 327static inline u32 minion_falcon_irqdest_host_swgen1_m(void) 328{ 329 return 0x1U << 7U; 330} 331static inline u32 minion_falcon_irqdest_host_swgen1_v(u32 r) 332{ 333 return (r >> 7U) & 0x1U; 334} 335static inline u32 minion_falcon_irqdest_host_swgen1_host_v(void) 336{ 337 return 0x00000001U; 338} 339static inline u32 minion_falcon_irqdest_host_swgen1_host_f(void) 340{ 341 return 0x80U; 342} 343static inline u32 minion_falcon_irqdest_target_wdtmr_f(u32 v) 344{ 345 return (v & 0x1U) << 17U; 346} 347static inline u32 minion_falcon_irqdest_target_wdtmr_m(void) 348{ 349 return 0x1U << 17U; 350} 351static inline u32 minion_falcon_irqdest_target_wdtmr_v(u32 r) 352{ 353 return (r >> 17U) & 0x1U; 354} 355static inline u32 minion_falcon_irqdest_target_wdtmr_host_normal_v(void) 356{ 357 return 0x00000000U; 358} 359static inline u32 minion_falcon_irqdest_target_wdtmr_host_normal_f(void) 360{ 361 return 0x0U; 362} 363static inline u32 minion_falcon_irqdest_target_halt_f(u32 v) 364{ 365 return (v & 0x1U) << 20U; 366} 367static inline u32 minion_falcon_irqdest_target_halt_m(void) 368{ 369 return 0x1U << 20U; 370} 371static inline u32 minion_falcon_irqdest_target_halt_v(u32 r) 372{ 373 return (r >> 20U) & 0x1U; 374} 375static inline u32 minion_falcon_irqdest_target_halt_host_normal_v(void) 376{ 377 return 0x00000000U; 378} 379static inline u32 minion_falcon_irqdest_target_halt_host_normal_f(void) 380{ 381 return 0x0U; 382} 383static inline u32 minion_falcon_irqdest_target_exterr_f(u32 v) 384{ 385 return (v & 0x1U) << 21U; 386} 387static inline u32 minion_falcon_irqdest_target_exterr_m(void) 388{ 389 return 0x1U << 21U; 390} 391static inline u32 minion_falcon_irqdest_target_exterr_v(u32 r) 392{ 393 return (r >> 21U) & 0x1U; 394} 395static inline u32 minion_falcon_irqdest_target_exterr_host_normal_v(void) 396{ 397 return 0x00000000U; 398} 399static inline u32 minion_falcon_irqdest_target_exterr_host_normal_f(void) 400{ 401 return 0x0U; 402} 403static inline u32 minion_falcon_irqdest_target_swgen0_f(u32 v) 404{ 405 return (v & 0x1U) << 22U; 406} 407static inline u32 minion_falcon_irqdest_target_swgen0_m(void) 408{ 409 return 0x1U << 22U; 410} 411static inline u32 minion_falcon_irqdest_target_swgen0_v(u32 r) 412{ 413 return (r >> 22U) & 0x1U; 414} 415static inline u32 minion_falcon_irqdest_target_swgen0_host_normal_v(void) 416{ 417 return 0x00000000U; 418} 419static inline u32 minion_falcon_irqdest_target_swgen0_host_normal_f(void) 420{ 421 return 0x0U; 422} 423static inline u32 minion_falcon_irqdest_target_swgen1_f(u32 v) 424{ 425 return (v & 0x1U) << 23U; 426} 427static inline u32 minion_falcon_irqdest_target_swgen1_m(void) 428{ 429 return 0x1U << 23U; 430} 431static inline u32 minion_falcon_irqdest_target_swgen1_v(u32 r) 432{ 433 return (r >> 23U) & 0x1U; 434} 435static inline u32 minion_falcon_irqdest_target_swgen1_host_normal_v(void) 436{ 437 return 0x00000000U; 438} 439static inline u32 minion_falcon_irqdest_target_swgen1_host_normal_f(void) 440{ 441 return 0x0U; 442} 443static inline u32 minion_falcon_os_r(void) 444{ 445 return 0x00000080U; 446} 447static inline u32 minion_falcon_mailbox1_r(void) 448{ 449 return 0x00000044U; 450} 451static inline u32 minion_minion_intr_r(void) 452{ 453 return 0x00000810U; 454} 455static inline u32 minion_minion_intr_fatal_f(u32 v) 456{ 457 return (v & 0x1U) << 0U; 458} 459static inline u32 minion_minion_intr_fatal_m(void) 460{ 461 return 0x1U << 0U; 462} 463static inline u32 minion_minion_intr_fatal_v(u32 r) 464{ 465 return (r >> 0U) & 0x1U; 466} 467static inline u32 minion_minion_intr_nonfatal_f(u32 v) 468{ 469 return (v & 0x1U) << 1U; 470} 471static inline u32 minion_minion_intr_nonfatal_m(void) 472{ 473 return 0x1U << 1U; 474} 475static inline u32 minion_minion_intr_nonfatal_v(u32 r) 476{ 477 return (r >> 1U) & 0x1U; 478} 479static inline u32 minion_minion_intr_falcon_stall_f(u32 v) 480{ 481 return (v & 0x1U) << 2U; 482} 483static inline u32 minion_minion_intr_falcon_stall_m(void) 484{ 485 return 0x1U << 2U; 486} 487static inline u32 minion_minion_intr_falcon_stall_v(u32 r) 488{ 489 return (r >> 2U) & 0x1U; 490} 491static inline u32 minion_minion_intr_falcon_nostall_f(u32 v) 492{ 493 return (v & 0x1U) << 3U; 494} 495static inline u32 minion_minion_intr_falcon_nostall_m(void) 496{ 497 return 0x1U << 3U; 498} 499static inline u32 minion_minion_intr_falcon_nostall_v(u32 r) 500{ 501 return (r >> 3U) & 0x1U; 502} 503static inline u32 minion_minion_intr_link_f(u32 v) 504{ 505 return (v & 0xffffU) << 16U; 506} 507static inline u32 minion_minion_intr_link_m(void) 508{ 509 return 0xffffU << 16U; 510} 511static inline u32 minion_minion_intr_link_v(u32 r) 512{ 513 return (r >> 16U) & 0xffffU; 514} 515static inline u32 minion_minion_intr_nonstall_en_r(void) 516{ 517 return 0x0000081cU; 518} 519static inline u32 minion_minion_intr_stall_en_r(void) 520{ 521 return 0x00000818U; 522} 523static inline u32 minion_minion_intr_stall_en_fatal_f(u32 v) 524{ 525 return (v & 0x1U) << 0U; 526} 527static inline u32 minion_minion_intr_stall_en_fatal_m(void) 528{ 529 return 0x1U << 0U; 530} 531static inline u32 minion_minion_intr_stall_en_fatal_v(u32 r) 532{ 533 return (r >> 0U) & 0x1U; 534} 535static inline u32 minion_minion_intr_stall_en_fatal_enable_v(void) 536{ 537 return 0x00000001U; 538} 539static inline u32 minion_minion_intr_stall_en_fatal_enable_f(void) 540{ 541 return 0x1U; 542} 543static inline u32 minion_minion_intr_stall_en_fatal_disable_v(void) 544{ 545 return 0x00000000U; 546} 547static inline u32 minion_minion_intr_stall_en_fatal_disable_f(void) 548{ 549 return 0x0U; 550} 551static inline u32 minion_minion_intr_stall_en_nonfatal_f(u32 v) 552{ 553 return (v & 0x1U) << 1U; 554} 555static inline u32 minion_minion_intr_stall_en_nonfatal_m(void) 556{ 557 return 0x1U << 1U; 558} 559static inline u32 minion_minion_intr_stall_en_nonfatal_v(u32 r) 560{ 561 return (r >> 1U) & 0x1U; 562} 563static inline u32 minion_minion_intr_stall_en_nonfatal_enable_v(void) 564{ 565 return 0x00000001U; 566} 567static inline u32 minion_minion_intr_stall_en_nonfatal_enable_f(void) 568{ 569 return 0x2U; 570} 571static inline u32 minion_minion_intr_stall_en_nonfatal_disable_v(void) 572{ 573 return 0x00000000U; 574} 575static inline u32 minion_minion_intr_stall_en_nonfatal_disable_f(void) 576{ 577 return 0x0U; 578} 579static inline u32 minion_minion_intr_stall_en_falcon_stall_f(u32 v) 580{ 581 return (v & 0x1U) << 2U; 582} 583static inline u32 minion_minion_intr_stall_en_falcon_stall_m(void) 584{ 585 return 0x1U << 2U; 586} 587static inline u32 minion_minion_intr_stall_en_falcon_stall_v(u32 r) 588{ 589 return (r >> 2U) & 0x1U; 590} 591static inline u32 minion_minion_intr_stall_en_falcon_stall_enable_v(void) 592{ 593 return 0x00000001U; 594} 595static inline u32 minion_minion_intr_stall_en_falcon_stall_enable_f(void) 596{ 597 return 0x4U; 598} 599static inline u32 minion_minion_intr_stall_en_falcon_stall_disable_v(void) 600{ 601 return 0x00000000U; 602} 603static inline u32 minion_minion_intr_stall_en_falcon_stall_disable_f(void) 604{ 605 return 0x0U; 606} 607static inline u32 minion_minion_intr_stall_en_falcon_nostall_f(u32 v) 608{ 609 return (v & 0x1U) << 3U; 610} 611static inline u32 minion_minion_intr_stall_en_falcon_nostall_m(void) 612{ 613 return 0x1U << 3U; 614} 615static inline u32 minion_minion_intr_stall_en_falcon_nostall_v(u32 r) 616{ 617 return (r >> 3U) & 0x1U; 618} 619static inline u32 minion_minion_intr_stall_en_falcon_nostall_enable_v(void) 620{ 621 return 0x00000001U; 622} 623static inline u32 minion_minion_intr_stall_en_falcon_nostall_enable_f(void) 624{ 625 return 0x8U; 626} 627static inline u32 minion_minion_intr_stall_en_falcon_nostall_disable_v(void) 628{ 629 return 0x00000000U; 630} 631static inline u32 minion_minion_intr_stall_en_falcon_nostall_disable_f(void) 632{ 633 return 0x0U; 634} 635static inline u32 minion_minion_intr_stall_en_link_f(u32 v) 636{ 637 return (v & 0xffffU) << 16U; 638} 639static inline u32 minion_minion_intr_stall_en_link_m(void) 640{ 641 return 0xffffU << 16U; 642} 643static inline u32 minion_minion_intr_stall_en_link_v(u32 r) 644{ 645 return (r >> 16U) & 0xffffU; 646} 647static inline u32 minion_nvlink_dl_cmd_r(u32 i) 648{ 649 return 0x00000900U + i*4U; 650} 651static inline u32 minion_nvlink_dl_cmd___size_1_v(void) 652{ 653 return 0x00000006U; 654} 655static inline u32 minion_nvlink_dl_cmd_command_f(u32 v) 656{ 657 return (v & 0xffU) << 0U; 658} 659static inline u32 minion_nvlink_dl_cmd_command_v(u32 r) 660{ 661 return (r >> 0U) & 0xffU; 662} 663static inline u32 minion_nvlink_dl_cmd_command_configeom_v(void) 664{ 665 return 0x00000040U; 666} 667static inline u32 minion_nvlink_dl_cmd_command_configeom_f(void) 668{ 669 return 0x40U; 670} 671static inline u32 minion_nvlink_dl_cmd_command_nop_v(void) 672{ 673 return 0x00000000U; 674} 675static inline u32 minion_nvlink_dl_cmd_command_nop_f(void) 676{ 677 return 0x0U; 678} 679static inline u32 minion_nvlink_dl_cmd_command_initphy_v(void) 680{ 681 return 0x00000001U; 682} 683static inline u32 minion_nvlink_dl_cmd_command_initphy_f(void) 684{ 685 return 0x1U; 686} 687static inline u32 minion_nvlink_dl_cmd_command_initlaneenable_v(void) 688{ 689 return 0x00000003U; 690} 691static inline u32 minion_nvlink_dl_cmd_command_initlaneenable_f(void) 692{ 693 return 0x3U; 694} 695static inline u32 minion_nvlink_dl_cmd_command_initdlpl_v(void) 696{ 697 return 0x00000004U; 698} 699static inline u32 minion_nvlink_dl_cmd_command_initdlpl_f(void) 700{ 701 return 0x4U; 702} 703static inline u32 minion_nvlink_dl_cmd_command_lanedisable_v(void) 704{ 705 return 0x00000008U; 706} 707static inline u32 minion_nvlink_dl_cmd_command_lanedisable_f(void) 708{ 709 return 0x8U; 710} 711static inline u32 minion_nvlink_dl_cmd_command_fastlanedisable_v(void) 712{ 713 return 0x00000009U; 714} 715static inline u32 minion_nvlink_dl_cmd_command_fastlanedisable_f(void) 716{ 717 return 0x9U; 718} 719static inline u32 minion_nvlink_dl_cmd_command_laneshutdown_v(void) 720{ 721 return 0x0000000cU; 722} 723static inline u32 minion_nvlink_dl_cmd_command_laneshutdown_f(void) 724{ 725 return 0xcU; 726} 727static inline u32 minion_nvlink_dl_cmd_command_setacmode_v(void) 728{ 729 return 0x0000000aU; 730} 731static inline u32 minion_nvlink_dl_cmd_command_setacmode_f(void) 732{ 733 return 0xaU; 734} 735static inline u32 minion_nvlink_dl_cmd_command_clracmode_v(void) 736{ 737 return 0x0000000bU; 738} 739static inline u32 minion_nvlink_dl_cmd_command_clracmode_f(void) 740{ 741 return 0xbU; 742} 743static inline u32 minion_nvlink_dl_cmd_command_enablepm_v(void) 744{ 745 return 0x00000010U; 746} 747static inline u32 minion_nvlink_dl_cmd_command_enablepm_f(void) 748{ 749 return 0x10U; 750} 751static inline u32 minion_nvlink_dl_cmd_command_disablepm_v(void) 752{ 753 return 0x00000011U; 754} 755static inline u32 minion_nvlink_dl_cmd_command_disablepm_f(void) 756{ 757 return 0x11U; 758} 759static inline u32 minion_nvlink_dl_cmd_command_savestate_v(void) 760{ 761 return 0x00000018U; 762} 763static inline u32 minion_nvlink_dl_cmd_command_savestate_f(void) 764{ 765 return 0x18U; 766} 767static inline u32 minion_nvlink_dl_cmd_command_restorestate_v(void) 768{ 769 return 0x00000019U; 770} 771static inline u32 minion_nvlink_dl_cmd_command_restorestate_f(void) 772{ 773 return 0x19U; 774} 775static inline u32 minion_nvlink_dl_cmd_command_initpll_0_v(void) 776{ 777 return 0x00000020U; 778} 779static inline u32 minion_nvlink_dl_cmd_command_initpll_0_f(void) 780{ 781 return 0x20U; 782} 783static inline u32 minion_nvlink_dl_cmd_command_initpll_1_v(void) 784{ 785 return 0x00000021U; 786} 787static inline u32 minion_nvlink_dl_cmd_command_initpll_1_f(void) 788{ 789 return 0x21U; 790} 791static inline u32 minion_nvlink_dl_cmd_command_initpll_2_v(void) 792{ 793 return 0x00000022U; 794} 795static inline u32 minion_nvlink_dl_cmd_command_initpll_2_f(void) 796{ 797 return 0x22U; 798} 799static inline u32 minion_nvlink_dl_cmd_command_initpll_3_v(void) 800{ 801 return 0x00000023U; 802} 803static inline u32 minion_nvlink_dl_cmd_command_initpll_3_f(void) 804{ 805 return 0x23U; 806} 807static inline u32 minion_nvlink_dl_cmd_command_initpll_4_v(void) 808{ 809 return 0x00000024U; 810} 811static inline u32 minion_nvlink_dl_cmd_command_initpll_4_f(void) 812{ 813 return 0x24U; 814} 815static inline u32 minion_nvlink_dl_cmd_command_initpll_5_v(void) 816{ 817 return 0x00000025U; 818} 819static inline u32 minion_nvlink_dl_cmd_command_initpll_5_f(void) 820{ 821 return 0x25U; 822} 823static inline u32 minion_nvlink_dl_cmd_command_initpll_6_v(void) 824{ 825 return 0x00000026U; 826} 827static inline u32 minion_nvlink_dl_cmd_command_initpll_6_f(void) 828{ 829 return 0x26U; 830} 831static inline u32 minion_nvlink_dl_cmd_command_initpll_7_v(void) 832{ 833 return 0x00000027U; 834} 835static inline u32 minion_nvlink_dl_cmd_command_initpll_7_f(void) 836{ 837 return 0x27U; 838} 839static inline u32 minion_nvlink_dl_cmd_fault_f(u32 v) 840{ 841 return (v & 0x1U) << 30U; 842} 843static inline u32 minion_nvlink_dl_cmd_fault_v(u32 r) 844{ 845 return (r >> 30U) & 0x1U; 846} 847static inline u32 minion_nvlink_dl_cmd_ready_f(u32 v) 848{ 849 return (v & 0x1U) << 31U; 850} 851static inline u32 minion_nvlink_dl_cmd_ready_v(u32 r) 852{ 853 return (r >> 31U) & 0x1U; 854} 855static inline u32 minion_misc_0_r(void) 856{ 857 return 0x000008b0U; 858} 859static inline u32 minion_misc_0_scratch_swrw_0_f(u32 v) 860{ 861 return (v & 0xffffffffU) << 0U; 862} 863static inline u32 minion_misc_0_scratch_swrw_0_v(u32 r) 864{ 865 return (r >> 0U) & 0xffffffffU; 866} 867static inline u32 minion_nvlink_link_intr_r(u32 i) 868{ 869 return 0x00000a00U + i*4U; 870} 871static inline u32 minion_nvlink_link_intr___size_1_v(void) 872{ 873 return 0x00000006U; 874} 875static inline u32 minion_nvlink_link_intr_code_f(u32 v) 876{ 877 return (v & 0xffU) << 0U; 878} 879static inline u32 minion_nvlink_link_intr_code_m(void) 880{ 881 return 0xffU << 0U; 882} 883static inline u32 minion_nvlink_link_intr_code_v(u32 r) 884{ 885 return (r >> 0U) & 0xffU; 886} 887static inline u32 minion_nvlink_link_intr_code_na_v(void) 888{ 889 return 0x00000000U; 890} 891static inline u32 minion_nvlink_link_intr_code_na_f(void) 892{ 893 return 0x0U; 894} 895static inline u32 minion_nvlink_link_intr_code_swreq_v(void) 896{ 897 return 0x00000001U; 898} 899static inline u32 minion_nvlink_link_intr_code_swreq_f(void) 900{ 901 return 0x1U; 902} 903static inline u32 minion_nvlink_link_intr_code_dlreq_v(void) 904{ 905 return 0x00000002U; 906} 907static inline u32 minion_nvlink_link_intr_code_dlreq_f(void) 908{ 909 return 0x2U; 910} 911static inline u32 minion_nvlink_link_intr_code_pmdisabled_v(void) 912{ 913 return 0x00000003U; 914} 915static inline u32 minion_nvlink_link_intr_code_pmdisabled_f(void) 916{ 917 return 0x3U; 918} 919static inline u32 minion_nvlink_link_intr_subcode_f(u32 v) 920{ 921 return (v & 0xffU) << 8U; 922} 923static inline u32 minion_nvlink_link_intr_subcode_m(void) 924{ 925 return 0xffU << 8U; 926} 927static inline u32 minion_nvlink_link_intr_subcode_v(u32 r) 928{ 929 return (r >> 8U) & 0xffU; 930} 931static inline u32 minion_nvlink_link_intr_state_f(u32 v) 932{ 933 return (v & 0x1U) << 31U; 934} 935static inline u32 minion_nvlink_link_intr_state_m(void) 936{ 937 return 0x1U << 31U; 938} 939static inline u32 minion_nvlink_link_intr_state_v(u32 r) 940{ 941 return (r >> 31U) & 0x1U; 942} 943#endif
diff --git a/include/nvgpu/hw/gv100/hw_nvl_gv100.h b/include/nvgpu/hw/gv100/hw_nvl_gv100.h
deleted file mode 100644
index 2e4ec16..0000000
--- a/include/nvgpu/hw/gv100/hw_nvl_gv100.h
+++ /dev/null
@@ -1,1571 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_nvl_gv100_h_ 57#define _hw_nvl_gv100_h_ 58 59static inline u32 nvl_link_state_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 nvl_link_state_state_f(u32 v) 64{ 65 return (v & 0xffU) << 0U; 66} 67static inline u32 nvl_link_state_state_m(void) 68{ 69 return 0xffU << 0U; 70} 71static inline u32 nvl_link_state_state_v(u32 r) 72{ 73 return (r >> 0U) & 0xffU; 74} 75static inline u32 nvl_link_state_state_init_v(void) 76{ 77 return 0x00000000U; 78} 79static inline u32 nvl_link_state_state_init_f(void) 80{ 81 return 0x0U; 82} 83static inline u32 nvl_link_state_state_hwcfg_v(void) 84{ 85 return 0x00000001U; 86} 87static inline u32 nvl_link_state_state_hwcfg_f(void) 88{ 89 return 0x1U; 90} 91static inline u32 nvl_link_state_state_swcfg_v(void) 92{ 93 return 0x00000002U; 94} 95static inline u32 nvl_link_state_state_swcfg_f(void) 96{ 97 return 0x2U; 98} 99static inline u32 nvl_link_state_state_active_v(void) 100{ 101 return 0x00000003U; 102} 103static inline u32 nvl_link_state_state_active_f(void) 104{ 105 return 0x3U; 106} 107static inline u32 nvl_link_state_state_fault_v(void) 108{ 109 return 0x00000004U; 110} 111static inline u32 nvl_link_state_state_fault_f(void) 112{ 113 return 0x4U; 114} 115static inline u32 nvl_link_state_state_rcvy_ac_v(void) 116{ 117 return 0x00000008U; 118} 119static inline u32 nvl_link_state_state_rcvy_ac_f(void) 120{ 121 return 0x8U; 122} 123static inline u32 nvl_link_state_state_rcvy_sw_v(void) 124{ 125 return 0x00000009U; 126} 127static inline u32 nvl_link_state_state_rcvy_sw_f(void) 128{ 129 return 0x9U; 130} 131static inline u32 nvl_link_state_state_rcvy_rx_v(void) 132{ 133 return 0x0000000aU; 134} 135static inline u32 nvl_link_state_state_rcvy_rx_f(void) 136{ 137 return 0xaU; 138} 139static inline u32 nvl_link_state_an0_busy_f(u32 v) 140{ 141 return (v & 0x1U) << 12U; 142} 143static inline u32 nvl_link_state_an0_busy_m(void) 144{ 145 return 0x1U << 12U; 146} 147static inline u32 nvl_link_state_an0_busy_v(u32 r) 148{ 149 return (r >> 12U) & 0x1U; 150} 151static inline u32 nvl_link_state_tl_busy_f(u32 v) 152{ 153 return (v & 0x1U) << 13U; 154} 155static inline u32 nvl_link_state_tl_busy_m(void) 156{ 157 return 0x1U << 13U; 158} 159static inline u32 nvl_link_state_tl_busy_v(u32 r) 160{ 161 return (r >> 13U) & 0x1U; 162} 163static inline u32 nvl_link_state_dbg_substate_f(u32 v) 164{ 165 return (v & 0xffffU) << 16U; 166} 167static inline u32 nvl_link_state_dbg_substate_m(void) 168{ 169 return 0xffffU << 16U; 170} 171static inline u32 nvl_link_state_dbg_substate_v(u32 r) 172{ 173 return (r >> 16U) & 0xffffU; 174} 175static inline u32 nvl_link_activity_r(void) 176{ 177 return 0x0000000cU; 178} 179static inline u32 nvl_link_activity_blkact_f(u32 v) 180{ 181 return (v & 0x7U) << 0U; 182} 183static inline u32 nvl_link_activity_blkact_m(void) 184{ 185 return 0x7U << 0U; 186} 187static inline u32 nvl_link_activity_blkact_v(u32 r) 188{ 189 return (r >> 0U) & 0x7U; 190} 191static inline u32 nvl_sublink_activity_r(u32 i) 192{ 193 return 0x00000010U + i*4U; 194} 195static inline u32 nvl_sublink_activity_blkact0_f(u32 v) 196{ 197 return (v & 0x7U) << 0U; 198} 199static inline u32 nvl_sublink_activity_blkact0_m(void) 200{ 201 return 0x7U << 0U; 202} 203static inline u32 nvl_sublink_activity_blkact0_v(u32 r) 204{ 205 return (r >> 0U) & 0x7U; 206} 207static inline u32 nvl_sublink_activity_blkact1_f(u32 v) 208{ 209 return (v & 0x7U) << 8U; 210} 211static inline u32 nvl_sublink_activity_blkact1_m(void) 212{ 213 return 0x7U << 8U; 214} 215static inline u32 nvl_sublink_activity_blkact1_v(u32 r) 216{ 217 return (r >> 8U) & 0x7U; 218} 219static inline u32 nvl_link_config_r(void) 220{ 221 return 0x00000018U; 222} 223static inline u32 nvl_link_config_ac_safe_en_f(u32 v) 224{ 225 return (v & 0x1U) << 30U; 226} 227static inline u32 nvl_link_config_ac_safe_en_m(void) 228{ 229 return 0x1U << 30U; 230} 231static inline u32 nvl_link_config_ac_safe_en_v(u32 r) 232{ 233 return (r >> 30U) & 0x1U; 234} 235static inline u32 nvl_link_config_ac_safe_en_on_v(void) 236{ 237 return 0x00000001U; 238} 239static inline u32 nvl_link_config_ac_safe_en_on_f(void) 240{ 241 return 0x40000000U; 242} 243static inline u32 nvl_link_config_link_en_f(u32 v) 244{ 245 return (v & 0x1U) << 31U; 246} 247static inline u32 nvl_link_config_link_en_m(void) 248{ 249 return 0x1U << 31U; 250} 251static inline u32 nvl_link_config_link_en_v(u32 r) 252{ 253 return (r >> 31U) & 0x1U; 254} 255static inline u32 nvl_link_config_link_en_on_v(void) 256{ 257 return 0x00000001U; 258} 259static inline u32 nvl_link_config_link_en_on_f(void) 260{ 261 return 0x80000000U; 262} 263static inline u32 nvl_link_change_r(void) 264{ 265 return 0x00000040U; 266} 267static inline u32 nvl_link_change_oldstate_mask_f(u32 v) 268{ 269 return (v & 0xfU) << 16U; 270} 271static inline u32 nvl_link_change_oldstate_mask_m(void) 272{ 273 return 0xfU << 16U; 274} 275static inline u32 nvl_link_change_oldstate_mask_v(u32 r) 276{ 277 return (r >> 16U) & 0xfU; 278} 279static inline u32 nvl_link_change_oldstate_mask_dontcare_v(void) 280{ 281 return 0x0000000fU; 282} 283static inline u32 nvl_link_change_oldstate_mask_dontcare_f(void) 284{ 285 return 0xf0000U; 286} 287static inline u32 nvl_link_change_newstate_f(u32 v) 288{ 289 return (v & 0xfU) << 4U; 290} 291static inline u32 nvl_link_change_newstate_m(void) 292{ 293 return 0xfU << 4U; 294} 295static inline u32 nvl_link_change_newstate_v(u32 r) 296{ 297 return (r >> 4U) & 0xfU; 298} 299static inline u32 nvl_link_change_newstate_hwcfg_v(void) 300{ 301 return 0x00000001U; 302} 303static inline u32 nvl_link_change_newstate_hwcfg_f(void) 304{ 305 return 0x10U; 306} 307static inline u32 nvl_link_change_newstate_swcfg_v(void) 308{ 309 return 0x00000002U; 310} 311static inline u32 nvl_link_change_newstate_swcfg_f(void) 312{ 313 return 0x20U; 314} 315static inline u32 nvl_link_change_newstate_active_v(void) 316{ 317 return 0x00000003U; 318} 319static inline u32 nvl_link_change_newstate_active_f(void) 320{ 321 return 0x30U; 322} 323static inline u32 nvl_link_change_action_f(u32 v) 324{ 325 return (v & 0x3U) << 2U; 326} 327static inline u32 nvl_link_change_action_m(void) 328{ 329 return 0x3U << 2U; 330} 331static inline u32 nvl_link_change_action_v(u32 r) 332{ 333 return (r >> 2U) & 0x3U; 334} 335static inline u32 nvl_link_change_action_ltssm_change_v(void) 336{ 337 return 0x00000001U; 338} 339static inline u32 nvl_link_change_action_ltssm_change_f(void) 340{ 341 return 0x4U; 342} 343static inline u32 nvl_link_change_status_f(u32 v) 344{ 345 return (v & 0x3U) << 0U; 346} 347static inline u32 nvl_link_change_status_m(void) 348{ 349 return 0x3U << 0U; 350} 351static inline u32 nvl_link_change_status_v(u32 r) 352{ 353 return (r >> 0U) & 0x3U; 354} 355static inline u32 nvl_link_change_status_done_v(void) 356{ 357 return 0x00000000U; 358} 359static inline u32 nvl_link_change_status_done_f(void) 360{ 361 return 0x0U; 362} 363static inline u32 nvl_link_change_status_busy_v(void) 364{ 365 return 0x00000001U; 366} 367static inline u32 nvl_link_change_status_busy_f(void) 368{ 369 return 0x1U; 370} 371static inline u32 nvl_link_change_status_fault_v(void) 372{ 373 return 0x00000002U; 374} 375static inline u32 nvl_link_change_status_fault_f(void) 376{ 377 return 0x2U; 378} 379static inline u32 nvl_sublink_change_r(void) 380{ 381 return 0x00000044U; 382} 383static inline u32 nvl_sublink_change_countdown_f(u32 v) 384{ 385 return (v & 0xfffU) << 20U; 386} 387static inline u32 nvl_sublink_change_countdown_m(void) 388{ 389 return 0xfffU << 20U; 390} 391static inline u32 nvl_sublink_change_countdown_v(u32 r) 392{ 393 return (r >> 20U) & 0xfffU; 394} 395static inline u32 nvl_sublink_change_oldstate_mask_f(u32 v) 396{ 397 return (v & 0xfU) << 16U; 398} 399static inline u32 nvl_sublink_change_oldstate_mask_m(void) 400{ 401 return 0xfU << 16U; 402} 403static inline u32 nvl_sublink_change_oldstate_mask_v(u32 r) 404{ 405 return (r >> 16U) & 0xfU; 406} 407static inline u32 nvl_sublink_change_oldstate_mask_dontcare_v(void) 408{ 409 return 0x0000000fU; 410} 411static inline u32 nvl_sublink_change_oldstate_mask_dontcare_f(void) 412{ 413 return 0xf0000U; 414} 415static inline u32 nvl_sublink_change_sublink_f(u32 v) 416{ 417 return (v & 0xfU) << 12U; 418} 419static inline u32 nvl_sublink_change_sublink_m(void) 420{ 421 return 0xfU << 12U; 422} 423static inline u32 nvl_sublink_change_sublink_v(u32 r) 424{ 425 return (r >> 12U) & 0xfU; 426} 427static inline u32 nvl_sublink_change_sublink_tx_v(void) 428{ 429 return 0x00000000U; 430} 431static inline u32 nvl_sublink_change_sublink_tx_f(void) 432{ 433 return 0x0U; 434} 435static inline u32 nvl_sublink_change_sublink_rx_v(void) 436{ 437 return 0x00000001U; 438} 439static inline u32 nvl_sublink_change_sublink_rx_f(void) 440{ 441 return 0x1000U; 442} 443static inline u32 nvl_sublink_change_newstate_f(u32 v) 444{ 445 return (v & 0xfU) << 4U; 446} 447static inline u32 nvl_sublink_change_newstate_m(void) 448{ 449 return 0xfU << 4U; 450} 451static inline u32 nvl_sublink_change_newstate_v(u32 r) 452{ 453 return (r >> 4U) & 0xfU; 454} 455static inline u32 nvl_sublink_change_newstate_hs_v(void) 456{ 457 return 0x00000000U; 458} 459static inline u32 nvl_sublink_change_newstate_hs_f(void) 460{ 461 return 0x0U; 462} 463static inline u32 nvl_sublink_change_newstate_eighth_v(void) 464{ 465 return 0x00000004U; 466} 467static inline u32 nvl_sublink_change_newstate_eighth_f(void) 468{ 469 return 0x40U; 470} 471static inline u32 nvl_sublink_change_newstate_train_v(void) 472{ 473 return 0x00000005U; 474} 475static inline u32 nvl_sublink_change_newstate_train_f(void) 476{ 477 return 0x50U; 478} 479static inline u32 nvl_sublink_change_newstate_safe_v(void) 480{ 481 return 0x00000006U; 482} 483static inline u32 nvl_sublink_change_newstate_safe_f(void) 484{ 485 return 0x60U; 486} 487static inline u32 nvl_sublink_change_newstate_off_v(void) 488{ 489 return 0x00000007U; 490} 491static inline u32 nvl_sublink_change_newstate_off_f(void) 492{ 493 return 0x70U; 494} 495static inline u32 nvl_sublink_change_action_f(u32 v) 496{ 497 return (v & 0x3U) << 2U; 498} 499static inline u32 nvl_sublink_change_action_m(void) 500{ 501 return 0x3U << 2U; 502} 503static inline u32 nvl_sublink_change_action_v(u32 r) 504{ 505 return (r >> 2U) & 0x3U; 506} 507static inline u32 nvl_sublink_change_action_slsm_change_v(void) 508{ 509 return 0x00000001U; 510} 511static inline u32 nvl_sublink_change_action_slsm_change_f(void) 512{ 513 return 0x4U; 514} 515static inline u32 nvl_sublink_change_status_f(u32 v) 516{ 517 return (v & 0x3U) << 0U; 518} 519static inline u32 nvl_sublink_change_status_m(void) 520{ 521 return 0x3U << 0U; 522} 523static inline u32 nvl_sublink_change_status_v(u32 r) 524{ 525 return (r >> 0U) & 0x3U; 526} 527static inline u32 nvl_sublink_change_status_done_v(void) 528{ 529 return 0x00000000U; 530} 531static inline u32 nvl_sublink_change_status_done_f(void) 532{ 533 return 0x0U; 534} 535static inline u32 nvl_sublink_change_status_busy_v(void) 536{ 537 return 0x00000001U; 538} 539static inline u32 nvl_sublink_change_status_busy_f(void) 540{ 541 return 0x1U; 542} 543static inline u32 nvl_sublink_change_status_fault_v(void) 544{ 545 return 0x00000002U; 546} 547static inline u32 nvl_sublink_change_status_fault_f(void) 548{ 549 return 0x2U; 550} 551static inline u32 nvl_link_test_r(void) 552{ 553 return 0x00000048U; 554} 555static inline u32 nvl_link_test_mode_f(u32 v) 556{ 557 return (v & 0x1U) << 0U; 558} 559static inline u32 nvl_link_test_mode_m(void) 560{ 561 return 0x1U << 0U; 562} 563static inline u32 nvl_link_test_mode_v(u32 r) 564{ 565 return (r >> 0U) & 0x1U; 566} 567static inline u32 nvl_link_test_mode_enable_v(void) 568{ 569 return 0x00000001U; 570} 571static inline u32 nvl_link_test_mode_enable_f(void) 572{ 573 return 0x1U; 574} 575static inline u32 nvl_link_test_auto_hwcfg_f(u32 v) 576{ 577 return (v & 0x1U) << 30U; 578} 579static inline u32 nvl_link_test_auto_hwcfg_m(void) 580{ 581 return 0x1U << 30U; 582} 583static inline u32 nvl_link_test_auto_hwcfg_v(u32 r) 584{ 585 return (r >> 30U) & 0x1U; 586} 587static inline u32 nvl_link_test_auto_hwcfg_enable_v(void) 588{ 589 return 0x00000001U; 590} 591static inline u32 nvl_link_test_auto_hwcfg_enable_f(void) 592{ 593 return 0x40000000U; 594} 595static inline u32 nvl_link_test_auto_nvhs_f(u32 v) 596{ 597 return (v & 0x1U) << 31U; 598} 599static inline u32 nvl_link_test_auto_nvhs_m(void) 600{ 601 return 0x1U << 31U; 602} 603static inline u32 nvl_link_test_auto_nvhs_v(u32 r) 604{ 605 return (r >> 31U) & 0x1U; 606} 607static inline u32 nvl_link_test_auto_nvhs_enable_v(void) 608{ 609 return 0x00000001U; 610} 611static inline u32 nvl_link_test_auto_nvhs_enable_f(void) 612{ 613 return 0x80000000U; 614} 615static inline u32 nvl_sl0_slsm_status_tx_r(void) 616{ 617 return 0x00002024U; 618} 619static inline u32 nvl_sl0_slsm_status_tx_substate_f(u32 v) 620{ 621 return (v & 0xfU) << 0U; 622} 623static inline u32 nvl_sl0_slsm_status_tx_substate_m(void) 624{ 625 return 0xfU << 0U; 626} 627static inline u32 nvl_sl0_slsm_status_tx_substate_v(u32 r) 628{ 629 return (r >> 0U) & 0xfU; 630} 631static inline u32 nvl_sl0_slsm_status_tx_primary_state_f(u32 v) 632{ 633 return (v & 0xfU) << 4U; 634} 635static inline u32 nvl_sl0_slsm_status_tx_primary_state_m(void) 636{ 637 return 0xfU << 4U; 638} 639static inline u32 nvl_sl0_slsm_status_tx_primary_state_v(u32 r) 640{ 641 return (r >> 4U) & 0xfU; 642} 643static inline u32 nvl_sl0_slsm_status_tx_primary_state_hs_v(void) 644{ 645 return 0x00000000U; 646} 647static inline u32 nvl_sl0_slsm_status_tx_primary_state_hs_f(void) 648{ 649 return 0x0U; 650} 651static inline u32 nvl_sl0_slsm_status_tx_primary_state_eighth_v(void) 652{ 653 return 0x00000004U; 654} 655static inline u32 nvl_sl0_slsm_status_tx_primary_state_eighth_f(void) 656{ 657 return 0x40U; 658} 659static inline u32 nvl_sl0_slsm_status_tx_primary_state_train_v(void) 660{ 661 return 0x00000005U; 662} 663static inline u32 nvl_sl0_slsm_status_tx_primary_state_train_f(void) 664{ 665 return 0x50U; 666} 667static inline u32 nvl_sl0_slsm_status_tx_primary_state_off_v(void) 668{ 669 return 0x00000007U; 670} 671static inline u32 nvl_sl0_slsm_status_tx_primary_state_off_f(void) 672{ 673 return 0x70U; 674} 675static inline u32 nvl_sl0_slsm_status_tx_primary_state_safe_v(void) 676{ 677 return 0x00000006U; 678} 679static inline u32 nvl_sl0_slsm_status_tx_primary_state_safe_f(void) 680{ 681 return 0x60U; 682} 683static inline u32 nvl_sl1_slsm_status_rx_r(void) 684{ 685 return 0x00003014U; 686} 687static inline u32 nvl_sl1_slsm_status_rx_substate_f(u32 v) 688{ 689 return (v & 0xfU) << 0U; 690} 691static inline u32 nvl_sl1_slsm_status_rx_substate_m(void) 692{ 693 return 0xfU << 0U; 694} 695static inline u32 nvl_sl1_slsm_status_rx_substate_v(u32 r) 696{ 697 return (r >> 0U) & 0xfU; 698} 699static inline u32 nvl_sl1_slsm_status_rx_primary_state_f(u32 v) 700{ 701 return (v & 0xfU) << 4U; 702} 703static inline u32 nvl_sl1_slsm_status_rx_primary_state_m(void) 704{ 705 return 0xfU << 4U; 706} 707static inline u32 nvl_sl1_slsm_status_rx_primary_state_v(u32 r) 708{ 709 return (r >> 4U) & 0xfU; 710} 711static inline u32 nvl_sl1_slsm_status_rx_primary_state_hs_v(void) 712{ 713 return 0x00000000U; 714} 715static inline u32 nvl_sl1_slsm_status_rx_primary_state_hs_f(void) 716{ 717 return 0x0U; 718} 719static inline u32 nvl_sl1_slsm_status_rx_primary_state_eighth_v(void) 720{ 721 return 0x00000004U; 722} 723static inline u32 nvl_sl1_slsm_status_rx_primary_state_eighth_f(void) 724{ 725 return 0x40U; 726} 727static inline u32 nvl_sl1_slsm_status_rx_primary_state_train_v(void) 728{ 729 return 0x00000005U; 730} 731static inline u32 nvl_sl1_slsm_status_rx_primary_state_train_f(void) 732{ 733 return 0x50U; 734} 735static inline u32 nvl_sl1_slsm_status_rx_primary_state_off_v(void) 736{ 737 return 0x00000007U; 738} 739static inline u32 nvl_sl1_slsm_status_rx_primary_state_off_f(void) 740{ 741 return 0x70U; 742} 743static inline u32 nvl_sl1_slsm_status_rx_primary_state_safe_v(void) 744{ 745 return 0x00000006U; 746} 747static inline u32 nvl_sl1_slsm_status_rx_primary_state_safe_f(void) 748{ 749 return 0x60U; 750} 751static inline u32 nvl_sl0_safe_ctrl2_tx_r(void) 752{ 753 return 0x00002008U; 754} 755static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_init_f(u32 v) 756{ 757 return (v & 0x7ffU) << 0U; 758} 759static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_init_m(void) 760{ 761 return 0x7ffU << 0U; 762} 763static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_init_v(u32 r) 764{ 765 return (r >> 0U) & 0x7ffU; 766} 767static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_init_init_v(void) 768{ 769 return 0x00000728U; 770} 771static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_init_init_f(void) 772{ 773 return 0x728U; 774} 775static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_initscl_f(u32 v) 776{ 777 return (v & 0x1fU) << 11U; 778} 779static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_initscl_m(void) 780{ 781 return 0x1fU << 11U; 782} 783static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_initscl_v(u32 r) 784{ 785 return (r >> 11U) & 0x1fU; 786} 787static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_initscl_init_v(void) 788{ 789 return 0x0000000fU; 790} 791static inline u32 nvl_sl0_safe_ctrl2_tx_ctr_initscl_init_f(void) 792{ 793 return 0x7800U; 794} 795static inline u32 nvl_sl1_error_rate_ctrl_r(void) 796{ 797 return 0x00003284U; 798} 799static inline u32 nvl_sl1_error_rate_ctrl_short_threshold_man_f(u32 v) 800{ 801 return (v & 0x7U) << 0U; 802} 803static inline u32 nvl_sl1_error_rate_ctrl_short_threshold_man_m(void) 804{ 805 return 0x7U << 0U; 806} 807static inline u32 nvl_sl1_error_rate_ctrl_short_threshold_man_v(u32 r) 808{ 809 return (r >> 0U) & 0x7U; 810} 811static inline u32 nvl_sl1_error_rate_ctrl_long_threshold_man_f(u32 v) 812{ 813 return (v & 0x7U) << 16U; 814} 815static inline u32 nvl_sl1_error_rate_ctrl_long_threshold_man_m(void) 816{ 817 return 0x7U << 16U; 818} 819static inline u32 nvl_sl1_error_rate_ctrl_long_threshold_man_v(u32 r) 820{ 821 return (r >> 16U) & 0x7U; 822} 823static inline u32 nvl_sl1_rxslsm_timeout_2_r(void) 824{ 825 return 0x00003034U; 826} 827static inline u32 nvl_txiobist_configreg_r(void) 828{ 829 return 0x00002e14U; 830} 831static inline u32 nvl_txiobist_configreg_io_bist_mode_in_f(u32 v) 832{ 833 return (v & 0x1U) << 17U; 834} 835static inline u32 nvl_txiobist_configreg_io_bist_mode_in_m(void) 836{ 837 return 0x1U << 17U; 838} 839static inline u32 nvl_txiobist_configreg_io_bist_mode_in_v(u32 r) 840{ 841 return (r >> 17U) & 0x1U; 842} 843static inline u32 nvl_txiobist_config_r(void) 844{ 845 return 0x00002e10U; 846} 847static inline u32 nvl_txiobist_config_dpg_prbsseedld_f(u32 v) 848{ 849 return (v & 0x1U) << 2U; 850} 851static inline u32 nvl_txiobist_config_dpg_prbsseedld_m(void) 852{ 853 return 0x1U << 2U; 854} 855static inline u32 nvl_txiobist_config_dpg_prbsseedld_v(u32 r) 856{ 857 return (r >> 2U) & 0x1U; 858} 859static inline u32 nvl_intr_r(void) 860{ 861 return 0x00000050U; 862} 863static inline u32 nvl_intr_tx_replay_f(u32 v) 864{ 865 return (v & 0x1U) << 0U; 866} 867static inline u32 nvl_intr_tx_replay_m(void) 868{ 869 return 0x1U << 0U; 870} 871static inline u32 nvl_intr_tx_replay_v(u32 r) 872{ 873 return (r >> 0U) & 0x1U; 874} 875static inline u32 nvl_intr_tx_recovery_short_f(u32 v) 876{ 877 return (v & 0x1U) << 1U; 878} 879static inline u32 nvl_intr_tx_recovery_short_m(void) 880{ 881 return 0x1U << 1U; 882} 883static inline u32 nvl_intr_tx_recovery_short_v(u32 r) 884{ 885 return (r >> 1U) & 0x1U; 886} 887static inline u32 nvl_intr_tx_recovery_long_f(u32 v) 888{ 889 return (v & 0x1U) << 2U; 890} 891static inline u32 nvl_intr_tx_recovery_long_m(void) 892{ 893 return 0x1U << 2U; 894} 895static inline u32 nvl_intr_tx_recovery_long_v(u32 r) 896{ 897 return (r >> 2U) & 0x1U; 898} 899static inline u32 nvl_intr_tx_fault_ram_f(u32 v) 900{ 901 return (v & 0x1U) << 4U; 902} 903static inline u32 nvl_intr_tx_fault_ram_m(void) 904{ 905 return 0x1U << 4U; 906} 907static inline u32 nvl_intr_tx_fault_ram_v(u32 r) 908{ 909 return (r >> 4U) & 0x1U; 910} 911static inline u32 nvl_intr_tx_fault_interface_f(u32 v) 912{ 913 return (v & 0x1U) << 5U; 914} 915static inline u32 nvl_intr_tx_fault_interface_m(void) 916{ 917 return 0x1U << 5U; 918} 919static inline u32 nvl_intr_tx_fault_interface_v(u32 r) 920{ 921 return (r >> 5U) & 0x1U; 922} 923static inline u32 nvl_intr_tx_fault_sublink_change_f(u32 v) 924{ 925 return (v & 0x1U) << 8U; 926} 927static inline u32 nvl_intr_tx_fault_sublink_change_m(void) 928{ 929 return 0x1U << 8U; 930} 931static inline u32 nvl_intr_tx_fault_sublink_change_v(u32 r) 932{ 933 return (r >> 8U) & 0x1U; 934} 935static inline u32 nvl_intr_rx_fault_sublink_change_f(u32 v) 936{ 937 return (v & 0x1U) << 16U; 938} 939static inline u32 nvl_intr_rx_fault_sublink_change_m(void) 940{ 941 return 0x1U << 16U; 942} 943static inline u32 nvl_intr_rx_fault_sublink_change_v(u32 r) 944{ 945 return (r >> 16U) & 0x1U; 946} 947static inline u32 nvl_intr_rx_fault_dl_protocol_f(u32 v) 948{ 949 return (v & 0x1U) << 20U; 950} 951static inline u32 nvl_intr_rx_fault_dl_protocol_m(void) 952{ 953 return 0x1U << 20U; 954} 955static inline u32 nvl_intr_rx_fault_dl_protocol_v(u32 r) 956{ 957 return (r >> 20U) & 0x1U; 958} 959static inline u32 nvl_intr_rx_short_error_rate_f(u32 v) 960{ 961 return (v & 0x1U) << 21U; 962} 963static inline u32 nvl_intr_rx_short_error_rate_m(void) 964{ 965 return 0x1U << 21U; 966} 967static inline u32 nvl_intr_rx_short_error_rate_v(u32 r) 968{ 969 return (r >> 21U) & 0x1U; 970} 971static inline u32 nvl_intr_rx_long_error_rate_f(u32 v) 972{ 973 return (v & 0x1U) << 22U; 974} 975static inline u32 nvl_intr_rx_long_error_rate_m(void) 976{ 977 return 0x1U << 22U; 978} 979static inline u32 nvl_intr_rx_long_error_rate_v(u32 r) 980{ 981 return (r >> 22U) & 0x1U; 982} 983static inline u32 nvl_intr_rx_ila_trigger_f(u32 v) 984{ 985 return (v & 0x1U) << 23U; 986} 987static inline u32 nvl_intr_rx_ila_trigger_m(void) 988{ 989 return 0x1U << 23U; 990} 991static inline u32 nvl_intr_rx_ila_trigger_v(u32 r) 992{ 993 return (r >> 23U) & 0x1U; 994} 995static inline u32 nvl_intr_rx_crc_counter_f(u32 v) 996{ 997 return (v & 0x1U) << 24U; 998} 999static inline u32 nvl_intr_rx_crc_counter_m(void) 1000{ 1001 return 0x1U << 24U; 1002} 1003static inline u32 nvl_intr_rx_crc_counter_v(u32 r) 1004{ 1005 return (r >> 24U) & 0x1U; 1006} 1007static inline u32 nvl_intr_ltssm_fault_f(u32 v) 1008{ 1009 return (v & 0x1U) << 28U; 1010} 1011static inline u32 nvl_intr_ltssm_fault_m(void) 1012{ 1013 return 0x1U << 28U; 1014} 1015static inline u32 nvl_intr_ltssm_fault_v(u32 r) 1016{ 1017 return (r >> 28U) & 0x1U; 1018} 1019static inline u32 nvl_intr_ltssm_protocol_f(u32 v) 1020{ 1021 return (v & 0x1U) << 29U; 1022} 1023static inline u32 nvl_intr_ltssm_protocol_m(void) 1024{ 1025 return 0x1U << 29U; 1026} 1027static inline u32 nvl_intr_ltssm_protocol_v(u32 r) 1028{ 1029 return (r >> 29U) & 0x1U; 1030} 1031static inline u32 nvl_intr_minion_request_f(u32 v) 1032{ 1033 return (v & 0x1U) << 30U; 1034} 1035static inline u32 nvl_intr_minion_request_m(void) 1036{ 1037 return 0x1U << 30U; 1038} 1039static inline u32 nvl_intr_minion_request_v(u32 r) 1040{ 1041 return (r >> 30U) & 0x1U; 1042} 1043static inline u32 nvl_intr_sw2_r(void) 1044{ 1045 return 0x00000054U; 1046} 1047static inline u32 nvl_intr_minion_r(void) 1048{ 1049 return 0x00000060U; 1050} 1051static inline u32 nvl_intr_minion_tx_replay_f(u32 v) 1052{ 1053 return (v & 0x1U) << 0U; 1054} 1055static inline u32 nvl_intr_minion_tx_replay_m(void) 1056{ 1057 return 0x1U << 0U; 1058} 1059static inline u32 nvl_intr_minion_tx_replay_v(u32 r) 1060{ 1061 return (r >> 0U) & 0x1U; 1062} 1063static inline u32 nvl_intr_minion_tx_recovery_short_f(u32 v) 1064{ 1065 return (v & 0x1U) << 1U; 1066} 1067static inline u32 nvl_intr_minion_tx_recovery_short_m(void) 1068{ 1069 return 0x1U << 1U; 1070} 1071static inline u32 nvl_intr_minion_tx_recovery_short_v(u32 r) 1072{ 1073 return (r >> 1U) & 0x1U; 1074} 1075static inline u32 nvl_intr_minion_tx_recovery_long_f(u32 v) 1076{ 1077 return (v & 0x1U) << 2U; 1078} 1079static inline u32 nvl_intr_minion_tx_recovery_long_m(void) 1080{ 1081 return 0x1U << 2U; 1082} 1083static inline u32 nvl_intr_minion_tx_recovery_long_v(u32 r) 1084{ 1085 return (r >> 2U) & 0x1U; 1086} 1087static inline u32 nvl_intr_minion_tx_fault_ram_f(u32 v) 1088{ 1089 return (v & 0x1U) << 4U; 1090} 1091static inline u32 nvl_intr_minion_tx_fault_ram_m(void) 1092{ 1093 return 0x1U << 4U; 1094} 1095static inline u32 nvl_intr_minion_tx_fault_ram_v(u32 r) 1096{ 1097 return (r >> 4U) & 0x1U; 1098} 1099static inline u32 nvl_intr_minion_tx_fault_interface_f(u32 v) 1100{ 1101 return (v & 0x1U) << 5U; 1102} 1103static inline u32 nvl_intr_minion_tx_fault_interface_m(void) 1104{ 1105 return 0x1U << 5U; 1106} 1107static inline u32 nvl_intr_minion_tx_fault_interface_v(u32 r) 1108{ 1109 return (r >> 5U) & 0x1U; 1110} 1111static inline u32 nvl_intr_minion_tx_fault_sublink_change_f(u32 v) 1112{ 1113 return (v & 0x1U) << 8U; 1114} 1115static inline u32 nvl_intr_minion_tx_fault_sublink_change_m(void) 1116{ 1117 return 0x1U << 8U; 1118} 1119static inline u32 nvl_intr_minion_tx_fault_sublink_change_v(u32 r) 1120{ 1121 return (r >> 8U) & 0x1U; 1122} 1123static inline u32 nvl_intr_minion_rx_fault_sublink_change_f(u32 v) 1124{ 1125 return (v & 0x1U) << 16U; 1126} 1127static inline u32 nvl_intr_minion_rx_fault_sublink_change_m(void) 1128{ 1129 return 0x1U << 16U; 1130} 1131static inline u32 nvl_intr_minion_rx_fault_sublink_change_v(u32 r) 1132{ 1133 return (r >> 16U) & 0x1U; 1134} 1135static inline u32 nvl_intr_minion_rx_fault_dl_protocol_f(u32 v) 1136{ 1137 return (v & 0x1U) << 20U; 1138} 1139static inline u32 nvl_intr_minion_rx_fault_dl_protocol_m(void) 1140{ 1141 return 0x1U << 20U; 1142} 1143static inline u32 nvl_intr_minion_rx_fault_dl_protocol_v(u32 r) 1144{ 1145 return (r >> 20U) & 0x1U; 1146} 1147static inline u32 nvl_intr_minion_rx_short_error_rate_f(u32 v) 1148{ 1149 return (v & 0x1U) << 21U; 1150} 1151static inline u32 nvl_intr_minion_rx_short_error_rate_m(void) 1152{ 1153 return 0x1U << 21U; 1154} 1155static inline u32 nvl_intr_minion_rx_short_error_rate_v(u32 r) 1156{ 1157 return (r >> 21U) & 0x1U; 1158} 1159static inline u32 nvl_intr_minion_rx_long_error_rate_f(u32 v) 1160{ 1161 return (v & 0x1U) << 22U; 1162} 1163static inline u32 nvl_intr_minion_rx_long_error_rate_m(void) 1164{ 1165 return 0x1U << 22U; 1166} 1167static inline u32 nvl_intr_minion_rx_long_error_rate_v(u32 r) 1168{ 1169 return (r >> 22U) & 0x1U; 1170} 1171static inline u32 nvl_intr_minion_rx_ila_trigger_f(u32 v) 1172{ 1173 return (v & 0x1U) << 23U; 1174} 1175static inline u32 nvl_intr_minion_rx_ila_trigger_m(void) 1176{ 1177 return 0x1U << 23U; 1178} 1179static inline u32 nvl_intr_minion_rx_ila_trigger_v(u32 r) 1180{ 1181 return (r >> 23U) & 0x1U; 1182} 1183static inline u32 nvl_intr_minion_rx_crc_counter_f(u32 v) 1184{ 1185 return (v & 0x1U) << 24U; 1186} 1187static inline u32 nvl_intr_minion_rx_crc_counter_m(void) 1188{ 1189 return 0x1U << 24U; 1190} 1191static inline u32 nvl_intr_minion_rx_crc_counter_v(u32 r) 1192{ 1193 return (r >> 24U) & 0x1U; 1194} 1195static inline u32 nvl_intr_minion_ltssm_fault_f(u32 v) 1196{ 1197 return (v & 0x1U) << 28U; 1198} 1199static inline u32 nvl_intr_minion_ltssm_fault_m(void) 1200{ 1201 return 0x1U << 28U; 1202} 1203static inline u32 nvl_intr_minion_ltssm_fault_v(u32 r) 1204{ 1205 return (r >> 28U) & 0x1U; 1206} 1207static inline u32 nvl_intr_minion_ltssm_protocol_f(u32 v) 1208{ 1209 return (v & 0x1U) << 29U; 1210} 1211static inline u32 nvl_intr_minion_ltssm_protocol_m(void) 1212{ 1213 return 0x1U << 29U; 1214} 1215static inline u32 nvl_intr_minion_ltssm_protocol_v(u32 r) 1216{ 1217 return (r >> 29U) & 0x1U; 1218} 1219static inline u32 nvl_intr_minion_minion_request_f(u32 v) 1220{ 1221 return (v & 0x1U) << 30U; 1222} 1223static inline u32 nvl_intr_minion_minion_request_m(void) 1224{ 1225 return 0x1U << 30U; 1226} 1227static inline u32 nvl_intr_minion_minion_request_v(u32 r) 1228{ 1229 return (r >> 30U) & 0x1U; 1230} 1231static inline u32 nvl_intr_nonstall_en_r(void) 1232{ 1233 return 0x0000005cU; 1234} 1235static inline u32 nvl_intr_stall_en_r(void) 1236{ 1237 return 0x00000058U; 1238} 1239static inline u32 nvl_intr_stall_en_tx_replay_f(u32 v) 1240{ 1241 return (v & 0x1U) << 0U; 1242} 1243static inline u32 nvl_intr_stall_en_tx_replay_m(void) 1244{ 1245 return 0x1U << 0U; 1246} 1247static inline u32 nvl_intr_stall_en_tx_replay_v(u32 r) 1248{ 1249 return (r >> 0U) & 0x1U; 1250} 1251static inline u32 nvl_intr_stall_en_tx_recovery_short_f(u32 v) 1252{ 1253 return (v & 0x1U) << 1U; 1254} 1255static inline u32 nvl_intr_stall_en_tx_recovery_short_m(void) 1256{ 1257 return 0x1U << 1U; 1258} 1259static inline u32 nvl_intr_stall_en_tx_recovery_short_v(u32 r) 1260{ 1261 return (r >> 1U) & 0x1U; 1262} 1263static inline u32 nvl_intr_stall_en_tx_recovery_short_enable_v(void) 1264{ 1265 return 0x00000001U; 1266} 1267static inline u32 nvl_intr_stall_en_tx_recovery_short_enable_f(void) 1268{ 1269 return 0x2U; 1270} 1271static inline u32 nvl_intr_stall_en_tx_recovery_long_f(u32 v) 1272{ 1273 return (v & 0x1U) << 2U; 1274} 1275static inline u32 nvl_intr_stall_en_tx_recovery_long_m(void) 1276{ 1277 return 0x1U << 2U; 1278} 1279static inline u32 nvl_intr_stall_en_tx_recovery_long_v(u32 r) 1280{ 1281 return (r >> 2U) & 0x1U; 1282} 1283static inline u32 nvl_intr_stall_en_tx_recovery_long_enable_v(void) 1284{ 1285 return 0x00000001U; 1286} 1287static inline u32 nvl_intr_stall_en_tx_recovery_long_enable_f(void) 1288{ 1289 return 0x4U; 1290} 1291static inline u32 nvl_intr_stall_en_tx_fault_ram_f(u32 v) 1292{ 1293 return (v & 0x1U) << 4U; 1294} 1295static inline u32 nvl_intr_stall_en_tx_fault_ram_m(void) 1296{ 1297 return 0x1U << 4U; 1298} 1299static inline u32 nvl_intr_stall_en_tx_fault_ram_v(u32 r) 1300{ 1301 return (r >> 4U) & 0x1U; 1302} 1303static inline u32 nvl_intr_stall_en_tx_fault_ram_enable_v(void) 1304{ 1305 return 0x00000001U; 1306} 1307static inline u32 nvl_intr_stall_en_tx_fault_ram_enable_f(void) 1308{ 1309 return 0x10U; 1310} 1311static inline u32 nvl_intr_stall_en_tx_fault_interface_f(u32 v) 1312{ 1313 return (v & 0x1U) << 5U; 1314} 1315static inline u32 nvl_intr_stall_en_tx_fault_interface_m(void) 1316{ 1317 return 0x1U << 5U; 1318} 1319static inline u32 nvl_intr_stall_en_tx_fault_interface_v(u32 r) 1320{ 1321 return (r >> 5U) & 0x1U; 1322} 1323static inline u32 nvl_intr_stall_en_tx_fault_interface_enable_v(void) 1324{ 1325 return 0x00000001U; 1326} 1327static inline u32 nvl_intr_stall_en_tx_fault_interface_enable_f(void) 1328{ 1329 return 0x20U; 1330} 1331static inline u32 nvl_intr_stall_en_tx_fault_sublink_change_f(u32 v) 1332{ 1333 return (v & 0x1U) << 8U; 1334} 1335static inline u32 nvl_intr_stall_en_tx_fault_sublink_change_m(void) 1336{ 1337 return 0x1U << 8U; 1338} 1339static inline u32 nvl_intr_stall_en_tx_fault_sublink_change_v(u32 r) 1340{ 1341 return (r >> 8U) & 0x1U; 1342} 1343static inline u32 nvl_intr_stall_en_tx_fault_sublink_change_enable_v(void) 1344{ 1345 return 0x00000001U; 1346} 1347static inline u32 nvl_intr_stall_en_tx_fault_sublink_change_enable_f(void) 1348{ 1349 return 0x100U; 1350} 1351static inline u32 nvl_intr_stall_en_rx_fault_sublink_change_f(u32 v) 1352{ 1353 return (v & 0x1U) << 16U; 1354} 1355static inline u32 nvl_intr_stall_en_rx_fault_sublink_change_m(void) 1356{ 1357 return 0x1U << 16U; 1358} 1359static inline u32 nvl_intr_stall_en_rx_fault_sublink_change_v(u32 r) 1360{ 1361 return (r >> 16U) & 0x1U; 1362} 1363static inline u32 nvl_intr_stall_en_rx_fault_sublink_change_enable_v(void) 1364{ 1365 return 0x00000001U; 1366} 1367static inline u32 nvl_intr_stall_en_rx_fault_sublink_change_enable_f(void) 1368{ 1369 return 0x10000U; 1370} 1371static inline u32 nvl_intr_stall_en_rx_fault_dl_protocol_f(u32 v) 1372{ 1373 return (v & 0x1U) << 20U; 1374} 1375static inline u32 nvl_intr_stall_en_rx_fault_dl_protocol_m(void) 1376{ 1377 return 0x1U << 20U; 1378} 1379static inline u32 nvl_intr_stall_en_rx_fault_dl_protocol_v(u32 r) 1380{ 1381 return (r >> 20U) & 0x1U; 1382} 1383static inline u32 nvl_intr_stall_en_rx_fault_dl_protocol_enable_v(void) 1384{ 1385 return 0x00000001U; 1386} 1387static inline u32 nvl_intr_stall_en_rx_fault_dl_protocol_enable_f(void) 1388{ 1389 return 0x100000U; 1390} 1391static inline u32 nvl_intr_stall_en_rx_short_error_rate_f(u32 v) 1392{ 1393 return (v & 0x1U) << 21U; 1394} 1395static inline u32 nvl_intr_stall_en_rx_short_error_rate_m(void) 1396{ 1397 return 0x1U << 21U; 1398} 1399static inline u32 nvl_intr_stall_en_rx_short_error_rate_v(u32 r) 1400{ 1401 return (r >> 21U) & 0x1U; 1402} 1403static inline u32 nvl_intr_stall_en_rx_short_error_rate_enable_v(void) 1404{ 1405 return 0x00000001U; 1406} 1407static inline u32 nvl_intr_stall_en_rx_short_error_rate_enable_f(void) 1408{ 1409 return 0x200000U; 1410} 1411static inline u32 nvl_intr_stall_en_rx_long_error_rate_f(u32 v) 1412{ 1413 return (v & 0x1U) << 22U; 1414} 1415static inline u32 nvl_intr_stall_en_rx_long_error_rate_m(void) 1416{ 1417 return 0x1U << 22U; 1418} 1419static inline u32 nvl_intr_stall_en_rx_long_error_rate_v(u32 r) 1420{ 1421 return (r >> 22U) & 0x1U; 1422} 1423static inline u32 nvl_intr_stall_en_rx_long_error_rate_enable_v(void) 1424{ 1425 return 0x00000001U; 1426} 1427static inline u32 nvl_intr_stall_en_rx_long_error_rate_enable_f(void) 1428{ 1429 return 0x400000U; 1430} 1431static inline u32 nvl_intr_stall_en_rx_ila_trigger_f(u32 v) 1432{ 1433 return (v & 0x1U) << 23U; 1434} 1435static inline u32 nvl_intr_stall_en_rx_ila_trigger_m(void) 1436{ 1437 return 0x1U << 23U; 1438} 1439static inline u32 nvl_intr_stall_en_rx_ila_trigger_v(u32 r) 1440{ 1441 return (r >> 23U) & 0x1U; 1442} 1443static inline u32 nvl_intr_stall_en_rx_ila_trigger_enable_v(void) 1444{ 1445 return 0x00000001U; 1446} 1447static inline u32 nvl_intr_stall_en_rx_ila_trigger_enable_f(void) 1448{ 1449 return 0x800000U; 1450} 1451static inline u32 nvl_intr_stall_en_rx_crc_counter_f(u32 v) 1452{ 1453 return (v & 0x1U) << 24U; 1454} 1455static inline u32 nvl_intr_stall_en_rx_crc_counter_m(void) 1456{ 1457 return 0x1U << 24U; 1458} 1459static inline u32 nvl_intr_stall_en_rx_crc_counter_v(u32 r) 1460{ 1461 return (r >> 24U) & 0x1U; 1462} 1463static inline u32 nvl_intr_stall_en_rx_crc_counter_enable_v(void) 1464{ 1465 return 0x00000001U; 1466} 1467static inline u32 nvl_intr_stall_en_rx_crc_counter_enable_f(void) 1468{ 1469 return 0x1000000U; 1470} 1471static inline u32 nvl_intr_stall_en_ltssm_fault_f(u32 v) 1472{ 1473 return (v & 0x1U) << 28U; 1474} 1475static inline u32 nvl_intr_stall_en_ltssm_fault_m(void) 1476{ 1477 return 0x1U << 28U; 1478} 1479static inline u32 nvl_intr_stall_en_ltssm_fault_v(u32 r) 1480{ 1481 return (r >> 28U) & 0x1U; 1482} 1483static inline u32 nvl_intr_stall_en_ltssm_fault_enable_v(void) 1484{ 1485 return 0x00000001U; 1486} 1487static inline u32 nvl_intr_stall_en_ltssm_fault_enable_f(void) 1488{ 1489 return 0x10000000U; 1490} 1491static inline u32 nvl_intr_stall_en_ltssm_protocol_f(u32 v) 1492{ 1493 return (v & 0x1U) << 29U; 1494} 1495static inline u32 nvl_intr_stall_en_ltssm_protocol_m(void) 1496{ 1497 return 0x1U << 29U; 1498} 1499static inline u32 nvl_intr_stall_en_ltssm_protocol_v(u32 r) 1500{ 1501 return (r >> 29U) & 0x1U; 1502} 1503static inline u32 nvl_intr_stall_en_ltssm_protocol_enable_v(void) 1504{ 1505 return 0x00000001U; 1506} 1507static inline u32 nvl_intr_stall_en_ltssm_protocol_enable_f(void) 1508{ 1509 return 0x20000000U; 1510} 1511static inline u32 nvl_intr_stall_en_minion_request_f(u32 v) 1512{ 1513 return (v & 0x1U) << 30U; 1514} 1515static inline u32 nvl_intr_stall_en_minion_request_m(void) 1516{ 1517 return 0x1U << 30U; 1518} 1519static inline u32 nvl_intr_stall_en_minion_request_v(u32 r) 1520{ 1521 return (r >> 30U) & 0x1U; 1522} 1523static inline u32 nvl_intr_stall_en_minion_request_enable_v(void) 1524{ 1525 return 0x00000001U; 1526} 1527static inline u32 nvl_intr_stall_en_minion_request_enable_f(void) 1528{ 1529 return 0x40000000U; 1530} 1531static inline u32 nvl_br0_cfg_cal_r(void) 1532{ 1533 return 0x0000281cU; 1534} 1535static inline u32 nvl_br0_cfg_cal_rxcal_f(u32 v) 1536{ 1537 return (v & 0x1U) << 0U; 1538} 1539static inline u32 nvl_br0_cfg_cal_rxcal_m(void) 1540{ 1541 return 0x1U << 0U; 1542} 1543static inline u32 nvl_br0_cfg_cal_rxcal_v(u32 r) 1544{ 1545 return (r >> 0U) & 0x1U; 1546} 1547static inline u32 nvl_br0_cfg_cal_rxcal_on_v(void) 1548{ 1549 return 0x00000001U; 1550} 1551static inline u32 nvl_br0_cfg_cal_rxcal_on_f(void) 1552{ 1553 return 0x1U; 1554} 1555static inline u32 nvl_br0_cfg_status_cal_r(void) 1556{ 1557 return 0x00002838U; 1558} 1559static inline u32 nvl_br0_cfg_status_cal_rxcal_done_f(u32 v) 1560{ 1561 return (v & 0x1U) << 2U; 1562} 1563static inline u32 nvl_br0_cfg_status_cal_rxcal_done_m(void) 1564{ 1565 return 0x1U << 2U; 1566} 1567static inline u32 nvl_br0_cfg_status_cal_rxcal_done_v(u32 r) 1568{ 1569 return (r >> 2U) & 0x1U; 1570} 1571#endif
diff --git a/include/nvgpu/hw/gv100/hw_nvlinkip_discovery_gv100.h b/include/nvgpu/hw/gv100/hw_nvlinkip_discovery_gv100.h
deleted file mode 100644
index 9d33a9f..0000000
--- a/include/nvgpu/hw/gv100/hw_nvlinkip_discovery_gv100.h
+++ /dev/null
@@ -1,311 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_nvlinkip_discovery_gv100_h_ 57#define _hw_nvlinkip_discovery_gv100_h_ 58 59static inline u32 nvlinkip_discovery_common_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 nvlinkip_discovery_common_entry_f(u32 v) 64{ 65 return (v & 0x3U) << 0U; 66} 67static inline u32 nvlinkip_discovery_common_entry_v(u32 r) 68{ 69 return (r >> 0U) & 0x3U; 70} 71static inline u32 nvlinkip_discovery_common_entry_invalid_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 nvlinkip_discovery_common_entry_enum_v(void) 76{ 77 return 0x00000001U; 78} 79static inline u32 nvlinkip_discovery_common_entry_data1_v(void) 80{ 81 return 0x00000002U; 82} 83static inline u32 nvlinkip_discovery_common_entry_data2_v(void) 84{ 85 return 0x00000003U; 86} 87static inline u32 nvlinkip_discovery_common_contents_f(u32 v) 88{ 89 return (v & 0x1fffffffU) << 2U; 90} 91static inline u32 nvlinkip_discovery_common_contents_v(u32 r) 92{ 93 return (r >> 2U) & 0x1fffffffU; 94} 95static inline u32 nvlinkip_discovery_common_chain_f(u32 v) 96{ 97 return (v & 0x1U) << 31U; 98} 99static inline u32 nvlinkip_discovery_common_chain_v(u32 r) 100{ 101 return (r >> 31U) & 0x1U; 102} 103static inline u32 nvlinkip_discovery_common_chain_enable_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 nvlinkip_discovery_common_device_f(u32 v) 108{ 109 return (v & 0x3fU) << 2U; 110} 111static inline u32 nvlinkip_discovery_common_device_v(u32 r) 112{ 113 return (r >> 2U) & 0x3fU; 114} 115static inline u32 nvlinkip_discovery_common_device_invalid_v(void) 116{ 117 return 0x00000000U; 118} 119static inline u32 nvlinkip_discovery_common_device_ioctrl_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 nvlinkip_discovery_common_device_nvltl_v(void) 124{ 125 return 0x00000002U; 126} 127static inline u32 nvlinkip_discovery_common_device_nvlink_v(void) 128{ 129 return 0x00000003U; 130} 131static inline u32 nvlinkip_discovery_common_device_minion_v(void) 132{ 133 return 0x00000004U; 134} 135static inline u32 nvlinkip_discovery_common_device_nvlipt_v(void) 136{ 137 return 0x00000005U; 138} 139static inline u32 nvlinkip_discovery_common_device_nvltlc_v(void) 140{ 141 return 0x00000006U; 142} 143static inline u32 nvlinkip_discovery_common_device_dlpl_v(void) 144{ 145 return 0x0000000bU; 146} 147static inline u32 nvlinkip_discovery_common_device_ioctrlmif_v(void) 148{ 149 return 0x00000007U; 150} 151static inline u32 nvlinkip_discovery_common_device_dlpl_multicast_v(void) 152{ 153 return 0x00000008U; 154} 155static inline u32 nvlinkip_discovery_common_device_nvltlc_multicast_v(void) 156{ 157 return 0x00000009U; 158} 159static inline u32 nvlinkip_discovery_common_device_ioctrlmif_multicast_v(void) 160{ 161 return 0x0000000aU; 162} 163static inline u32 nvlinkip_discovery_common_device_sioctrl_v(void) 164{ 165 return 0x0000000cU; 166} 167static inline u32 nvlinkip_discovery_common_device_tioctrl_v(void) 168{ 169 return 0x0000000dU; 170} 171static inline u32 nvlinkip_discovery_common_id_f(u32 v) 172{ 173 return (v & 0xffU) << 8U; 174} 175static inline u32 nvlinkip_discovery_common_id_v(u32 r) 176{ 177 return (r >> 8U) & 0xffU; 178} 179static inline u32 nvlinkip_discovery_common_version_f(u32 v) 180{ 181 return (v & 0x7ffU) << 20U; 182} 183static inline u32 nvlinkip_discovery_common_version_v(u32 r) 184{ 185 return (r >> 20U) & 0x7ffU; 186} 187static inline u32 nvlinkip_discovery_common_pri_base_f(u32 v) 188{ 189 return (v & 0xfffU) << 12U; 190} 191static inline u32 nvlinkip_discovery_common_pri_base_v(u32 r) 192{ 193 return (r >> 12U) & 0xfffU; 194} 195static inline u32 nvlinkip_discovery_common_intr_f(u32 v) 196{ 197 return (v & 0x1fU) << 7U; 198} 199static inline u32 nvlinkip_discovery_common_intr_v(u32 r) 200{ 201 return (r >> 7U) & 0x1fU; 202} 203static inline u32 nvlinkip_discovery_common_reset_f(u32 v) 204{ 205 return (v & 0x1fU) << 2U; 206} 207static inline u32 nvlinkip_discovery_common_reset_v(u32 r) 208{ 209 return (r >> 2U) & 0x1fU; 210} 211static inline u32 nvlinkip_discovery_common_ioctrl_length_f(u32 v) 212{ 213 return (v & 0x3fU) << 24U; 214} 215static inline u32 nvlinkip_discovery_common_ioctrl_length_v(u32 r) 216{ 217 return (r >> 24U) & 0x3fU; 218} 219static inline u32 nvlinkip_discovery_common_dlpl_num_tx_f(u32 v) 220{ 221 return (v & 0x7U) << 24U; 222} 223static inline u32 nvlinkip_discovery_common_dlpl_num_tx_v(u32 r) 224{ 225 return (r >> 24U) & 0x7U; 226} 227static inline u32 nvlinkip_discovery_common_dlpl_num_rx_f(u32 v) 228{ 229 return (v & 0x7U) << 27U; 230} 231static inline u32 nvlinkip_discovery_common_dlpl_num_rx_v(u32 r) 232{ 233 return (r >> 27U) & 0x7U; 234} 235static inline u32 nvlinkip_discovery_common_data1_ioctrl_length_f(u32 v) 236{ 237 return (v & 0x7ffffU) << 12U; 238} 239static inline u32 nvlinkip_discovery_common_data1_ioctrl_length_v(u32 r) 240{ 241 return (r >> 12U) & 0x7ffffU; 242} 243static inline u32 nvlinkip_discovery_common_data2_type_f(u32 v) 244{ 245 return (v & 0x1fU) << 26U; 246} 247static inline u32 nvlinkip_discovery_common_data2_type_v(u32 r) 248{ 249 return (r >> 26U) & 0x1fU; 250} 251static inline u32 nvlinkip_discovery_common_data2_type_invalid_v(void) 252{ 253 return 0x00000000U; 254} 255static inline u32 nvlinkip_discovery_common_data2_type_pllcontrol_v(void) 256{ 257 return 0x00000001U; 258} 259static inline u32 nvlinkip_discovery_common_data2_type_resetreg_v(void) 260{ 261 return 0x00000002U; 262} 263static inline u32 nvlinkip_discovery_common_data2_type_intrreg_v(void) 264{ 265 return 0x00000003U; 266} 267static inline u32 nvlinkip_discovery_common_data2_type_discovery_v(void) 268{ 269 return 0x00000004U; 270} 271static inline u32 nvlinkip_discovery_common_data2_type_unicast_v(void) 272{ 273 return 0x00000005U; 274} 275static inline u32 nvlinkip_discovery_common_data2_type_broadcast_v(void) 276{ 277 return 0x00000006U; 278} 279static inline u32 nvlinkip_discovery_common_data2_addr_f(u32 v) 280{ 281 return (v & 0xffffffU) << 2U; 282} 283static inline u32 nvlinkip_discovery_common_data2_addr_v(u32 r) 284{ 285 return (r >> 2U) & 0xffffffU; 286} 287static inline u32 nvlinkip_discovery_common_dlpl_data2_type_f(u32 v) 288{ 289 return (v & 0x1fU) << 26U; 290} 291static inline u32 nvlinkip_discovery_common_dlpl_data2_type_v(u32 r) 292{ 293 return (r >> 26U) & 0x1fU; 294} 295static inline u32 nvlinkip_discovery_common_dlpl_data2_master_f(u32 v) 296{ 297 return (v & 0x1U) << 15U; 298} 299static inline u32 nvlinkip_discovery_common_dlpl_data2_master_v(u32 r) 300{ 301 return (r >> 15U) & 0x1U; 302} 303static inline u32 nvlinkip_discovery_common_dlpl_data2_masterid_f(u32 v) 304{ 305 return (v & 0x7fU) << 8U; 306} 307static inline u32 nvlinkip_discovery_common_dlpl_data2_masterid_v(u32 r) 308{ 309 return (r >> 8U) & 0x7fU; 310} 311#endif
diff --git a/include/nvgpu/hw/gv100/hw_nvlipt_gv100.h b/include/nvgpu/hw/gv100/hw_nvlipt_gv100.h
deleted file mode 100644
index 5f73fab..0000000
--- a/include/nvgpu/hw/gv100/hw_nvlipt_gv100.h
+++ /dev/null
@@ -1,279 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_nvlipt_gv100_h_ 57#define _hw_nvlipt_gv100_h_ 58 59static inline u32 nvlipt_intr_control_link0_r(void) 60{ 61 return 0x000004b4U; 62} 63static inline u32 nvlipt_intr_control_link0_stallenable_f(u32 v) 64{ 65 return (v & 0x1U) << 0U; 66} 67static inline u32 nvlipt_intr_control_link0_stallenable_m(void) 68{ 69 return 0x1U << 0U; 70} 71static inline u32 nvlipt_intr_control_link0_stallenable_v(u32 r) 72{ 73 return (r >> 0U) & 0x1U; 74} 75static inline u32 nvlipt_intr_control_link0_nostallenable_f(u32 v) 76{ 77 return (v & 0x1U) << 1U; 78} 79static inline u32 nvlipt_intr_control_link0_nostallenable_m(void) 80{ 81 return 0x1U << 1U; 82} 83static inline u32 nvlipt_intr_control_link0_nostallenable_v(u32 r) 84{ 85 return (r >> 1U) & 0x1U; 86} 87static inline u32 nvlipt_err_uc_status_link0_r(void) 88{ 89 return 0x00000524U; 90} 91static inline u32 nvlipt_err_uc_status_link0_dlprotocol_f(u32 v) 92{ 93 return (v & 0x1U) << 4U; 94} 95static inline u32 nvlipt_err_uc_status_link0_dlprotocol_v(u32 r) 96{ 97 return (r >> 4U) & 0x1U; 98} 99static inline u32 nvlipt_err_uc_status_link0_datapoisoned_f(u32 v) 100{ 101 return (v & 0x1U) << 12U; 102} 103static inline u32 nvlipt_err_uc_status_link0_datapoisoned_v(u32 r) 104{ 105 return (r >> 12U) & 0x1U; 106} 107static inline u32 nvlipt_err_uc_status_link0_flowcontrol_f(u32 v) 108{ 109 return (v & 0x1U) << 13U; 110} 111static inline u32 nvlipt_err_uc_status_link0_flowcontrol_v(u32 r) 112{ 113 return (r >> 13U) & 0x1U; 114} 115static inline u32 nvlipt_err_uc_status_link0_responsetimeout_f(u32 v) 116{ 117 return (v & 0x1U) << 14U; 118} 119static inline u32 nvlipt_err_uc_status_link0_responsetimeout_v(u32 r) 120{ 121 return (r >> 14U) & 0x1U; 122} 123static inline u32 nvlipt_err_uc_status_link0_targeterror_f(u32 v) 124{ 125 return (v & 0x1U) << 15U; 126} 127static inline u32 nvlipt_err_uc_status_link0_targeterror_v(u32 r) 128{ 129 return (r >> 15U) & 0x1U; 130} 131static inline u32 nvlipt_err_uc_status_link0_unexpectedresponse_f(u32 v) 132{ 133 return (v & 0x1U) << 16U; 134} 135static inline u32 nvlipt_err_uc_status_link0_unexpectedresponse_v(u32 r) 136{ 137 return (r >> 16U) & 0x1U; 138} 139static inline u32 nvlipt_err_uc_status_link0_receiveroverflow_f(u32 v) 140{ 141 return (v & 0x1U) << 17U; 142} 143static inline u32 nvlipt_err_uc_status_link0_receiveroverflow_v(u32 r) 144{ 145 return (r >> 17U) & 0x1U; 146} 147static inline u32 nvlipt_err_uc_status_link0_malformedpacket_f(u32 v) 148{ 149 return (v & 0x1U) << 18U; 150} 151static inline u32 nvlipt_err_uc_status_link0_malformedpacket_v(u32 r) 152{ 153 return (r >> 18U) & 0x1U; 154} 155static inline u32 nvlipt_err_uc_status_link0_stompedpacketreceived_f(u32 v) 156{ 157 return (v & 0x1U) << 19U; 158} 159static inline u32 nvlipt_err_uc_status_link0_stompedpacketreceived_v(u32 r) 160{ 161 return (r >> 19U) & 0x1U; 162} 163static inline u32 nvlipt_err_uc_status_link0_unsupportedrequest_f(u32 v) 164{ 165 return (v & 0x1U) << 20U; 166} 167static inline u32 nvlipt_err_uc_status_link0_unsupportedrequest_v(u32 r) 168{ 169 return (r >> 20U) & 0x1U; 170} 171static inline u32 nvlipt_err_uc_status_link0_ucinternal_f(u32 v) 172{ 173 return (v & 0x1U) << 22U; 174} 175static inline u32 nvlipt_err_uc_status_link0_ucinternal_v(u32 r) 176{ 177 return (r >> 22U) & 0x1U; 178} 179static inline u32 nvlipt_err_uc_mask_link0_r(void) 180{ 181 return 0x00000528U; 182} 183static inline u32 nvlipt_err_uc_severity_link0_r(void) 184{ 185 return 0x0000052cU; 186} 187static inline u32 nvlipt_err_uc_first_link0_r(void) 188{ 189 return 0x00000530U; 190} 191static inline u32 nvlipt_err_uc_advisory_link0_r(void) 192{ 193 return 0x00000534U; 194} 195static inline u32 nvlipt_err_c_status_link0_r(void) 196{ 197 return 0x00000538U; 198} 199static inline u32 nvlipt_err_c_mask_link0_r(void) 200{ 201 return 0x0000053cU; 202} 203static inline u32 nvlipt_err_c_first_link0_r(void) 204{ 205 return 0x00000540U; 206} 207static inline u32 nvlipt_err_control_link0_r(void) 208{ 209 return 0x00000544U; 210} 211static inline u32 nvlipt_err_control_link0_fatalenable_f(u32 v) 212{ 213 return (v & 0x1U) << 1U; 214} 215static inline u32 nvlipt_err_control_link0_fatalenable_m(void) 216{ 217 return 0x1U << 1U; 218} 219static inline u32 nvlipt_err_control_link0_fatalenable_v(u32 r) 220{ 221 return (r >> 1U) & 0x1U; 222} 223static inline u32 nvlipt_err_control_link0_nonfatalenable_f(u32 v) 224{ 225 return (v & 0x1U) << 2U; 226} 227static inline u32 nvlipt_err_control_link0_nonfatalenable_m(void) 228{ 229 return 0x1U << 2U; 230} 231static inline u32 nvlipt_err_control_link0_nonfatalenable_v(u32 r) 232{ 233 return (r >> 2U) & 0x1U; 234} 235static inline u32 nvlipt_intr_control_common_r(void) 236{ 237 return 0x000004b0U; 238} 239static inline u32 nvlipt_intr_control_common_stallenable_f(u32 v) 240{ 241 return (v & 0x1U) << 0U; 242} 243static inline u32 nvlipt_intr_control_common_stallenable_m(void) 244{ 245 return 0x1U << 0U; 246} 247static inline u32 nvlipt_intr_control_common_stallenable_v(u32 r) 248{ 249 return (r >> 0U) & 0x1U; 250} 251static inline u32 nvlipt_intr_control_common_nonstallenable_f(u32 v) 252{ 253 return (v & 0x1U) << 1U; 254} 255static inline u32 nvlipt_intr_control_common_nonstallenable_m(void) 256{ 257 return 0x1U << 1U; 258} 259static inline u32 nvlipt_intr_control_common_nonstallenable_v(u32 r) 260{ 261 return (r >> 1U) & 0x1U; 262} 263static inline u32 nvlipt_scratch_cold_r(void) 264{ 265 return 0x000007d4U; 266} 267static inline u32 nvlipt_scratch_cold_data_f(u32 v) 268{ 269 return (v & 0xffffffffU) << 0U; 270} 271static inline u32 nvlipt_scratch_cold_data_v(u32 r) 272{ 273 return (r >> 0U) & 0xffffffffU; 274} 275static inline u32 nvlipt_scratch_cold_data_init_v(void) 276{ 277 return 0xdeadbaadU; 278} 279#endif
diff --git a/include/nvgpu/hw/gv100/hw_nvtlc_gv100.h b/include/nvgpu/hw/gv100/hw_nvtlc_gv100.h
deleted file mode 100644
index cc31b12..0000000
--- a/include/nvgpu/hw/gv100/hw_nvtlc_gv100.h
+++ /dev/null
@@ -1,95 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_nvtlc_gv100_h_ 57#define _hw_nvtlc_gv100_h_ 58 59static inline u32 nvtlc_tx_err_report_en_0_r(void) 60{ 61 return 0x00000708U; 62} 63static inline u32 nvtlc_rx_err_report_en_0_r(void) 64{ 65 return 0x00000f08U; 66} 67static inline u32 nvtlc_rx_err_report_en_1_r(void) 68{ 69 return 0x00000f20U; 70} 71static inline u32 nvtlc_tx_err_status_0_r(void) 72{ 73 return 0x00000700U; 74} 75static inline u32 nvtlc_rx_err_status_0_r(void) 76{ 77 return 0x00000f00U; 78} 79static inline u32 nvtlc_rx_err_status_1_r(void) 80{ 81 return 0x00000f18U; 82} 83static inline u32 nvtlc_tx_err_first_0_r(void) 84{ 85 return 0x00000714U; 86} 87static inline u32 nvtlc_rx_err_first_0_r(void) 88{ 89 return 0x00000f14U; 90} 91static inline u32 nvtlc_rx_err_first_1_r(void) 92{ 93 return 0x00000f2cU; 94} 95#endif
diff --git a/include/nvgpu/hw/gv100/hw_pbdma_gv100.h b/include/nvgpu/hw/gv100/hw_pbdma_gv100.h
deleted file mode 100644
index 41d7d1b..0000000
--- a/include/nvgpu/hw/gv100/hw_pbdma_gv100.h
+++ /dev/null
@@ -1,651 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pbdma_gv100_h_ 57#define _hw_pbdma_gv100_h_ 58 59static inline u32 pbdma_gp_entry1_r(void) 60{ 61 return 0x10000004U; 62} 63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) 64{ 65 return (r >> 0U) & 0xffU; 66} 67static inline u32 pbdma_gp_entry1_length_f(u32 v) 68{ 69 return (v & 0x1fffffU) << 10U; 70} 71static inline u32 pbdma_gp_entry1_length_v(u32 r) 72{ 73 return (r >> 10U) & 0x1fffffU; 74} 75static inline u32 pbdma_gp_base_r(u32 i) 76{ 77 return 0x00040048U + i*8192U; 78} 79static inline u32 pbdma_gp_base__size_1_v(void) 80{ 81 return 0x0000000eU; 82} 83static inline u32 pbdma_gp_base_offset_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 3U; 86} 87static inline u32 pbdma_gp_base_rsvd_s(void) 88{ 89 return 3U; 90} 91static inline u32 pbdma_gp_base_hi_r(u32 i) 92{ 93 return 0x0004004cU + i*8192U; 94} 95static inline u32 pbdma_gp_base_hi_offset_f(u32 v) 96{ 97 return (v & 0xffU) << 0U; 98} 99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) 100{ 101 return (v & 0x1fU) << 16U; 102} 103static inline u32 pbdma_gp_fetch_r(u32 i) 104{ 105 return 0x00040050U + i*8192U; 106} 107static inline u32 pbdma_gp_get_r(u32 i) 108{ 109 return 0x00040014U + i*8192U; 110} 111static inline u32 pbdma_gp_put_r(u32 i) 112{ 113 return 0x00040000U + i*8192U; 114} 115static inline u32 pbdma_pb_fetch_r(u32 i) 116{ 117 return 0x00040054U + i*8192U; 118} 119static inline u32 pbdma_pb_fetch_hi_r(u32 i) 120{ 121 return 0x00040058U + i*8192U; 122} 123static inline u32 pbdma_get_r(u32 i) 124{ 125 return 0x00040018U + i*8192U; 126} 127static inline u32 pbdma_get_hi_r(u32 i) 128{ 129 return 0x0004001cU + i*8192U; 130} 131static inline u32 pbdma_put_r(u32 i) 132{ 133 return 0x0004005cU + i*8192U; 134} 135static inline u32 pbdma_put_hi_r(u32 i) 136{ 137 return 0x00040060U + i*8192U; 138} 139static inline u32 pbdma_pb_header_r(u32 i) 140{ 141 return 0x00040084U + i*8192U; 142} 143static inline u32 pbdma_pb_header_priv_user_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 pbdma_pb_header_method_zero_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 pbdma_pb_header_subchannel_zero_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 pbdma_pb_header_level_main_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 pbdma_pb_header_first_true_f(void) 160{ 161 return 0x400000U; 162} 163static inline u32 pbdma_pb_header_type_inc_f(void) 164{ 165 return 0x20000000U; 166} 167static inline u32 pbdma_pb_header_type_non_inc_f(void) 168{ 169 return 0x60000000U; 170} 171static inline u32 pbdma_hdr_shadow_r(u32 i) 172{ 173 return 0x00040118U + i*8192U; 174} 175static inline u32 pbdma_gp_shadow_0_r(u32 i) 176{ 177 return 0x00040110U + i*8192U; 178} 179static inline u32 pbdma_gp_shadow_1_r(u32 i) 180{ 181 return 0x00040114U + i*8192U; 182} 183static inline u32 pbdma_subdevice_r(u32 i) 184{ 185 return 0x00040094U + i*8192U; 186} 187static inline u32 pbdma_subdevice_id_f(u32 v) 188{ 189 return (v & 0xfffU) << 0U; 190} 191static inline u32 pbdma_subdevice_status_active_f(void) 192{ 193 return 0x10000000U; 194} 195static inline u32 pbdma_subdevice_channel_dma_enable_f(void) 196{ 197 return 0x20000000U; 198} 199static inline u32 pbdma_method0_r(u32 i) 200{ 201 return 0x000400c0U + i*8192U; 202} 203static inline u32 pbdma_method0_fifo_size_v(void) 204{ 205 return 0x00000004U; 206} 207static inline u32 pbdma_method0_addr_f(u32 v) 208{ 209 return (v & 0xfffU) << 2U; 210} 211static inline u32 pbdma_method0_addr_v(u32 r) 212{ 213 return (r >> 2U) & 0xfffU; 214} 215static inline u32 pbdma_method0_subch_v(u32 r) 216{ 217 return (r >> 16U) & 0x7U; 218} 219static inline u32 pbdma_method0_first_true_f(void) 220{ 221 return 0x400000U; 222} 223static inline u32 pbdma_method0_valid_true_f(void) 224{ 225 return 0x80000000U; 226} 227static inline u32 pbdma_method1_r(u32 i) 228{ 229 return 0x000400c8U + i*8192U; 230} 231static inline u32 pbdma_method2_r(u32 i) 232{ 233 return 0x000400d0U + i*8192U; 234} 235static inline u32 pbdma_method3_r(u32 i) 236{ 237 return 0x000400d8U + i*8192U; 238} 239static inline u32 pbdma_data0_r(u32 i) 240{ 241 return 0x000400c4U + i*8192U; 242} 243static inline u32 pbdma_acquire_r(u32 i) 244{ 245 return 0x00040030U + i*8192U; 246} 247static inline u32 pbdma_acquire_retry_man_2_f(void) 248{ 249 return 0x2U; 250} 251static inline u32 pbdma_acquire_retry_exp_2_f(void) 252{ 253 return 0x100U; 254} 255static inline u32 pbdma_acquire_timeout_exp_f(u32 v) 256{ 257 return (v & 0xfU) << 11U; 258} 259static inline u32 pbdma_acquire_timeout_exp_max_v(void) 260{ 261 return 0x0000000fU; 262} 263static inline u32 pbdma_acquire_timeout_exp_max_f(void) 264{ 265 return 0x7800U; 266} 267static inline u32 pbdma_acquire_timeout_man_f(u32 v) 268{ 269 return (v & 0xffffU) << 15U; 270} 271static inline u32 pbdma_acquire_timeout_man_max_v(void) 272{ 273 return 0x0000ffffU; 274} 275static inline u32 pbdma_acquire_timeout_man_max_f(void) 276{ 277 return 0x7fff8000U; 278} 279static inline u32 pbdma_acquire_timeout_en_enable_f(void) 280{ 281 return 0x80000000U; 282} 283static inline u32 pbdma_acquire_timeout_en_disable_f(void) 284{ 285 return 0x0U; 286} 287static inline u32 pbdma_status_r(u32 i) 288{ 289 return 0x00040100U + i*8192U; 290} 291static inline u32 pbdma_channel_r(u32 i) 292{ 293 return 0x00040120U + i*8192U; 294} 295static inline u32 pbdma_signature_r(u32 i) 296{ 297 return 0x00040010U + i*8192U; 298} 299static inline u32 pbdma_signature_hw_valid_f(void) 300{ 301 return 0xfaceU; 302} 303static inline u32 pbdma_signature_sw_zero_f(void) 304{ 305 return 0x0U; 306} 307static inline u32 pbdma_userd_r(u32 i) 308{ 309 return 0x00040008U + i*8192U; 310} 311static inline u32 pbdma_userd_target_vid_mem_f(void) 312{ 313 return 0x0U; 314} 315static inline u32 pbdma_userd_target_sys_mem_coh_f(void) 316{ 317 return 0x2U; 318} 319static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) 320{ 321 return 0x3U; 322} 323static inline u32 pbdma_userd_addr_f(u32 v) 324{ 325 return (v & 0x7fffffU) << 9U; 326} 327static inline u32 pbdma_config_r(u32 i) 328{ 329 return 0x000400f4U + i*8192U; 330} 331static inline u32 pbdma_config_l2_evict_first_f(void) 332{ 333 return 0x0U; 334} 335static inline u32 pbdma_config_l2_evict_normal_f(void) 336{ 337 return 0x1U; 338} 339static inline u32 pbdma_config_ce_split_enable_f(void) 340{ 341 return 0x0U; 342} 343static inline u32 pbdma_config_ce_split_disable_f(void) 344{ 345 return 0x10U; 346} 347static inline u32 pbdma_config_auth_level_non_privileged_f(void) 348{ 349 return 0x0U; 350} 351static inline u32 pbdma_config_auth_level_privileged_f(void) 352{ 353 return 0x100U; 354} 355static inline u32 pbdma_config_userd_writeback_disable_f(void) 356{ 357 return 0x0U; 358} 359static inline u32 pbdma_config_userd_writeback_enable_f(void) 360{ 361 return 0x1000U; 362} 363static inline u32 pbdma_userd_hi_r(u32 i) 364{ 365 return 0x0004000cU + i*8192U; 366} 367static inline u32 pbdma_userd_hi_addr_f(u32 v) 368{ 369 return (v & 0xffU) << 0U; 370} 371static inline u32 pbdma_hce_ctrl_r(u32 i) 372{ 373 return 0x000400e4U + i*8192U; 374} 375static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) 376{ 377 return 0x20U; 378} 379static inline u32 pbdma_intr_0_r(u32 i) 380{ 381 return 0x00040108U + i*8192U; 382} 383static inline u32 pbdma_intr_0_memreq_v(u32 r) 384{ 385 return (r >> 0U) & 0x1U; 386} 387static inline u32 pbdma_intr_0_memreq_pending_f(void) 388{ 389 return 0x1U; 390} 391static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) 392{ 393 return 0x2U; 394} 395static inline u32 pbdma_intr_0_memack_extra_pending_f(void) 396{ 397 return 0x4U; 398} 399static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) 400{ 401 return 0x8U; 402} 403static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) 404{ 405 return 0x10U; 406} 407static inline u32 pbdma_intr_0_memflush_pending_f(void) 408{ 409 return 0x20U; 410} 411static inline u32 pbdma_intr_0_memop_pending_f(void) 412{ 413 return 0x40U; 414} 415static inline u32 pbdma_intr_0_lbconnect_pending_f(void) 416{ 417 return 0x80U; 418} 419static inline u32 pbdma_intr_0_lbreq_pending_f(void) 420{ 421 return 0x100U; 422} 423static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) 424{ 425 return 0x200U; 426} 427static inline u32 pbdma_intr_0_lback_extra_pending_f(void) 428{ 429 return 0x400U; 430} 431static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) 432{ 433 return 0x800U; 434} 435static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) 436{ 437 return 0x1000U; 438} 439static inline u32 pbdma_intr_0_gpfifo_pending_f(void) 440{ 441 return 0x2000U; 442} 443static inline u32 pbdma_intr_0_gpptr_pending_f(void) 444{ 445 return 0x4000U; 446} 447static inline u32 pbdma_intr_0_gpentry_pending_f(void) 448{ 449 return 0x8000U; 450} 451static inline u32 pbdma_intr_0_gpcrc_pending_f(void) 452{ 453 return 0x10000U; 454} 455static inline u32 pbdma_intr_0_pbptr_pending_f(void) 456{ 457 return 0x20000U; 458} 459static inline u32 pbdma_intr_0_pbentry_pending_f(void) 460{ 461 return 0x40000U; 462} 463static inline u32 pbdma_intr_0_pbcrc_pending_f(void) 464{ 465 return 0x80000U; 466} 467static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void) 468{ 469 return 0x100000U; 470} 471static inline u32 pbdma_intr_0_method_pending_f(void) 472{ 473 return 0x200000U; 474} 475static inline u32 pbdma_intr_0_methodcrc_pending_f(void) 476{ 477 return 0x400000U; 478} 479static inline u32 pbdma_intr_0_device_pending_f(void) 480{ 481 return 0x800000U; 482} 483static inline u32 pbdma_intr_0_eng_reset_pending_f(void) 484{ 485 return 0x1000000U; 486} 487static inline u32 pbdma_intr_0_semaphore_pending_f(void) 488{ 489 return 0x2000000U; 490} 491static inline u32 pbdma_intr_0_acquire_pending_f(void) 492{ 493 return 0x4000000U; 494} 495static inline u32 pbdma_intr_0_pri_pending_f(void) 496{ 497 return 0x8000000U; 498} 499static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) 500{ 501 return 0x20000000U; 502} 503static inline u32 pbdma_intr_0_pbseg_pending_f(void) 504{ 505 return 0x40000000U; 506} 507static inline u32 pbdma_intr_0_signature_pending_f(void) 508{ 509 return 0x80000000U; 510} 511static inline u32 pbdma_intr_1_r(u32 i) 512{ 513 return 0x00040148U + i*8192U; 514} 515static inline u32 pbdma_intr_1_ctxnotvalid_m(void) 516{ 517 return 0x1U << 31U; 518} 519static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void) 520{ 521 return 0x80000000U; 522} 523static inline u32 pbdma_intr_en_0_r(u32 i) 524{ 525 return 0x0004010cU + i*8192U; 526} 527static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) 528{ 529 return 0x100U; 530} 531static inline u32 pbdma_intr_en_1_r(u32 i) 532{ 533 return 0x0004014cU + i*8192U; 534} 535static inline u32 pbdma_intr_stall_r(u32 i) 536{ 537 return 0x0004013cU + i*8192U; 538} 539static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) 540{ 541 return 0x100U; 542} 543static inline u32 pbdma_intr_stall_1_r(u32 i) 544{ 545 return 0x00040140U + i*8192U; 546} 547static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void) 548{ 549 return 0x1U; 550} 551static inline u32 pbdma_udma_nop_r(void) 552{ 553 return 0x00000008U; 554} 555static inline u32 pbdma_runlist_timeslice_r(u32 i) 556{ 557 return 0x000400f8U + i*8192U; 558} 559static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) 560{ 561 return 0x80U; 562} 563static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) 564{ 565 return 0x3000U; 566} 567static inline u32 pbdma_runlist_timeslice_enable_true_f(void) 568{ 569 return 0x10000000U; 570} 571static inline u32 pbdma_target_r(u32 i) 572{ 573 return 0x000400acU + i*8192U; 574} 575static inline u32 pbdma_target_engine_sw_f(void) 576{ 577 return 0x1fU; 578} 579static inline u32 pbdma_target_eng_ctx_valid_true_f(void) 580{ 581 return 0x10000U; 582} 583static inline u32 pbdma_target_eng_ctx_valid_false_f(void) 584{ 585 return 0x0U; 586} 587static inline u32 pbdma_target_ce_ctx_valid_true_f(void) 588{ 589 return 0x20000U; 590} 591static inline u32 pbdma_target_ce_ctx_valid_false_f(void) 592{ 593 return 0x0U; 594} 595static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void) 600{ 601 return 0x1000000U; 602} 603static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void) 604{ 605 return 0x2000000U; 606} 607static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void) 608{ 609 return 0x3000000U; 610} 611static inline u32 pbdma_target_should_send_tsg_event_true_f(void) 612{ 613 return 0x20000000U; 614} 615static inline u32 pbdma_target_should_send_tsg_event_false_f(void) 616{ 617 return 0x0U; 618} 619static inline u32 pbdma_target_needs_host_tsg_event_true_f(void) 620{ 621 return 0x80000000U; 622} 623static inline u32 pbdma_target_needs_host_tsg_event_false_f(void) 624{ 625 return 0x0U; 626} 627static inline u32 pbdma_set_channel_info_r(u32 i) 628{ 629 return 0x000400fcU + i*8192U; 630} 631static inline u32 pbdma_set_channel_info_veid_f(u32 v) 632{ 633 return (v & 0x3fU) << 8U; 634} 635static inline u32 pbdma_timeout_r(u32 i) 636{ 637 return 0x0004012cU + i*8192U; 638} 639static inline u32 pbdma_timeout_period_m(void) 640{ 641 return 0xffffffffU << 0U; 642} 643static inline u32 pbdma_timeout_period_max_f(void) 644{ 645 return 0xffffffffU; 646} 647static inline u32 pbdma_timeout_period_init_f(void) 648{ 649 return 0x10000U; 650} 651#endif
diff --git a/include/nvgpu/hw/gv100/hw_perf_gv100.h b/include/nvgpu/hw/gv100/hw_perf_gv100.h
deleted file mode 100644
index 40107ee..0000000
--- a/include/nvgpu/hw/gv100/hw_perf_gv100.h
+++ /dev/null
@@ -1,263 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_perf_gv100_h_ 57#define _hw_perf_gv100_h_ 58 59static inline u32 perf_pmmgpc_perdomain_offset_v(void) 60{ 61 return 0x00000200U; 62} 63static inline u32 perf_pmmsys_perdomain_offset_v(void) 64{ 65 return 0x00000200U; 66} 67static inline u32 perf_pmmgpc_base_v(void) 68{ 69 return 0x00180000U; 70} 71static inline u32 perf_pmmgpc_extent_v(void) 72{ 73 return 0x00183fffU; 74} 75static inline u32 perf_pmmsys_base_v(void) 76{ 77 return 0x00240000U; 78} 79static inline u32 perf_pmmsys_extent_v(void) 80{ 81 return 0x00243fffU; 82} 83static inline u32 perf_pmmfbp_base_v(void) 84{ 85 return 0x00200000U; 86} 87static inline u32 perf_pmasys_control_r(void) 88{ 89 return 0x0024a000U; 90} 91static inline u32 perf_pmasys_control_membuf_status_v(u32 r) 92{ 93 return (r >> 4U) & 0x1U; 94} 95static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) 100{ 101 return 0x10U; 102} 103static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) 104{ 105 return (v & 0x1U) << 5U; 106} 107static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) 108{ 109 return (r >> 5U) & 0x1U; 110} 111static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) 116{ 117 return 0x20U; 118} 119static inline u32 perf_pmasys_mem_block_r(void) 120{ 121 return 0x0024a070U; 122} 123static inline u32 perf_pmasys_mem_block_base_f(u32 v) 124{ 125 return (v & 0xfffffffU) << 0U; 126} 127static inline u32 perf_pmasys_mem_block_target_f(u32 v) 128{ 129 return (v & 0x3U) << 28U; 130} 131static inline u32 perf_pmasys_mem_block_target_v(u32 r) 132{ 133 return (r >> 28U) & 0x3U; 134} 135static inline u32 perf_pmasys_mem_block_target_lfb_v(void) 136{ 137 return 0x00000000U; 138} 139static inline u32 perf_pmasys_mem_block_target_lfb_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) 144{ 145 return 0x00000002U; 146} 147static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) 148{ 149 return 0x20000000U; 150} 151static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) 152{ 153 return 0x00000003U; 154} 155static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) 156{ 157 return 0x30000000U; 158} 159static inline u32 perf_pmasys_mem_block_valid_f(u32 v) 160{ 161 return (v & 0x1U) << 31U; 162} 163static inline u32 perf_pmasys_mem_block_valid_v(u32 r) 164{ 165 return (r >> 31U) & 0x1U; 166} 167static inline u32 perf_pmasys_mem_block_valid_true_v(void) 168{ 169 return 0x00000001U; 170} 171static inline u32 perf_pmasys_mem_block_valid_true_f(void) 172{ 173 return 0x80000000U; 174} 175static inline u32 perf_pmasys_mem_block_valid_false_v(void) 176{ 177 return 0x00000000U; 178} 179static inline u32 perf_pmasys_mem_block_valid_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 perf_pmasys_outbase_r(void) 184{ 185 return 0x0024a074U; 186} 187static inline u32 perf_pmasys_outbase_ptr_f(u32 v) 188{ 189 return (v & 0x7ffffffU) << 5U; 190} 191static inline u32 perf_pmasys_outbaseupper_r(void) 192{ 193 return 0x0024a078U; 194} 195static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) 196{ 197 return (v & 0xffU) << 0U; 198} 199static inline u32 perf_pmasys_outsize_r(void) 200{ 201 return 0x0024a07cU; 202} 203static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) 204{ 205 return (v & 0x7ffffffU) << 5U; 206} 207static inline u32 perf_pmasys_mem_bytes_r(void) 208{ 209 return 0x0024a084U; 210} 211static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) 212{ 213 return (v & 0xfffffffU) << 4U; 214} 215static inline u32 perf_pmasys_mem_bump_r(void) 216{ 217 return 0x0024a088U; 218} 219static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) 220{ 221 return (v & 0xfffffffU) << 4U; 222} 223static inline u32 perf_pmasys_enginestatus_r(void) 224{ 225 return 0x0024a0a4U; 226} 227static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) 228{ 229 return (v & 0x1U) << 4U; 230} 231static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) 232{ 233 return 0x00000001U; 234} 235static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) 236{ 237 return 0x10U; 238} 239static inline u32 perf_pmmsys_engine_sel_r(u32 i) 240{ 241 return 0x0024006cU + i*512U; 242} 243static inline u32 perf_pmmsys_engine_sel__size_1_v(void) 244{ 245 return 0x00000020U; 246} 247static inline u32 perf_pmmfbp_engine_sel_r(u32 i) 248{ 249 return 0x0020006cU + i*512U; 250} 251static inline u32 perf_pmmfbp_engine_sel__size_1_v(void) 252{ 253 return 0x00000020U; 254} 255static inline u32 perf_pmmgpc_engine_sel_r(u32 i) 256{ 257 return 0x0018006cU + i*512U; 258} 259static inline u32 perf_pmmgpc_engine_sel__size_1_v(void) 260{ 261 return 0x00000020U; 262} 263#endif
diff --git a/include/nvgpu/hw/gv100/hw_pgsp_gv100.h b/include/nvgpu/hw/gv100/hw_pgsp_gv100.h
deleted file mode 100644
index 34d0eae..0000000
--- a/include/nvgpu/hw/gv100/hw_pgsp_gv100.h
+++ /dev/null
@@ -1,643 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pgsp_gv100_h_ 57#define _hw_pgsp_gv100_h_ 58 59static inline u32 pgsp_falcon_irqsset_r(void) 60{ 61 return 0x00110000U; 62} 63static inline u32 pgsp_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pgsp_falcon_irqsclr_r(void) 68{ 69 return 0x00110004U; 70} 71static inline u32 pgsp_falcon_irqstat_r(void) 72{ 73 return 0x00110008U; 74} 75static inline u32 pgsp_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pgsp_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pgsp_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pgsp_falcon_irqmode_r(void) 88{ 89 return 0x0011000cU; 90} 91static inline u32 pgsp_falcon_irqmset_r(void) 92{ 93 return 0x00110010U; 94} 95static inline u32 pgsp_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 pgsp_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 pgsp_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 pgsp_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 pgsp_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 pgsp_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 pgsp_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 pgsp_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 pgsp_falcon_irqmclr_r(void) 128{ 129 return 0x00110014U; 130} 131static inline u32 pgsp_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 pgsp_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 pgsp_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 pgsp_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 pgsp_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 pgsp_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 pgsp_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 pgsp_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 pgsp_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 pgsp_falcon_irqmask_r(void) 168{ 169 return 0x00110018U; 170} 171static inline u32 pgsp_falcon_irqdest_r(void) 172{ 173 return 0x0011001cU; 174} 175static inline u32 pgsp_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 pgsp_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 pgsp_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 pgsp_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 pgsp_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 pgsp_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 pgsp_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 pgsp_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 pgsp_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 pgsp_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 pgsp_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 pgsp_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 pgsp_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 pgsp_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 pgsp_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 pgsp_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 pgsp_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 pgsp_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 pgsp_falcon_curctx_r(void) 248{ 249 return 0x00110050U; 250} 251static inline u32 pgsp_falcon_nxtctx_r(void) 252{ 253 return 0x00110054U; 254} 255static inline u32 pgsp_falcon_nxtctx_ctxptr_f(u32 v) 256{ 257 return (v & 0xfffffffU) << 0U; 258} 259static inline u32 pgsp_falcon_nxtctx_ctxtgt_fb_f(void) 260{ 261 return 0x0U; 262} 263static inline u32 pgsp_falcon_nxtctx_ctxtgt_sys_coh_f(void) 264{ 265 return 0x20000000U; 266} 267static inline u32 pgsp_falcon_nxtctx_ctxtgt_sys_ncoh_f(void) 268{ 269 return 0x30000000U; 270} 271static inline u32 pgsp_falcon_nxtctx_ctxvalid_f(u32 v) 272{ 273 return (v & 0x1U) << 30U; 274} 275static inline u32 pgsp_falcon_mailbox0_r(void) 276{ 277 return 0x00110040U; 278} 279static inline u32 pgsp_falcon_mailbox1_r(void) 280{ 281 return 0x00110044U; 282} 283static inline u32 pgsp_falcon_itfen_r(void) 284{ 285 return 0x00110048U; 286} 287static inline u32 pgsp_falcon_itfen_ctxen_enable_f(void) 288{ 289 return 0x1U; 290} 291static inline u32 pgsp_falcon_idlestate_r(void) 292{ 293 return 0x0011004cU; 294} 295static inline u32 pgsp_falcon_idlestate_falcon_busy_v(u32 r) 296{ 297 return (r >> 0U) & 0x1U; 298} 299static inline u32 pgsp_falcon_idlestate_ext_busy_v(u32 r) 300{ 301 return (r >> 1U) & 0x7fffU; 302} 303static inline u32 pgsp_falcon_os_r(void) 304{ 305 return 0x00110080U; 306} 307static inline u32 pgsp_falcon_engctl_r(void) 308{ 309 return 0x001100a4U; 310} 311static inline u32 pgsp_falcon_engctl_switch_context_true_f(void) 312{ 313 return 0x8U; 314} 315static inline u32 pgsp_falcon_engctl_switch_context_false_f(void) 316{ 317 return 0x0U; 318} 319static inline u32 pgsp_falcon_cpuctl_r(void) 320{ 321 return 0x00110100U; 322} 323static inline u32 pgsp_falcon_cpuctl_startcpu_f(u32 v) 324{ 325 return (v & 0x1U) << 1U; 326} 327static inline u32 pgsp_falcon_cpuctl_halt_intr_f(u32 v) 328{ 329 return (v & 0x1U) << 4U; 330} 331static inline u32 pgsp_falcon_cpuctl_halt_intr_m(void) 332{ 333 return 0x1U << 4U; 334} 335static inline u32 pgsp_falcon_cpuctl_halt_intr_v(u32 r) 336{ 337 return (r >> 4U) & 0x1U; 338} 339static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 340{ 341 return (v & 0x1U) << 6U; 342} 343static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_m(void) 344{ 345 return 0x1U << 6U; 346} 347static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 348{ 349 return (r >> 6U) & 0x1U; 350} 351static inline u32 pgsp_falcon_cpuctl_alias_r(void) 352{ 353 return 0x00110130U; 354} 355static inline u32 pgsp_falcon_cpuctl_alias_startcpu_f(u32 v) 356{ 357 return (v & 0x1U) << 1U; 358} 359static inline u32 pgsp_falcon_imemc_r(u32 i) 360{ 361 return 0x00110180U + i*16U; 362} 363static inline u32 pgsp_falcon_imemc_offs_f(u32 v) 364{ 365 return (v & 0x3fU) << 2U; 366} 367static inline u32 pgsp_falcon_imemc_blk_f(u32 v) 368{ 369 return (v & 0xffU) << 8U; 370} 371static inline u32 pgsp_falcon_imemc_aincw_f(u32 v) 372{ 373 return (v & 0x1U) << 24U; 374} 375static inline u32 pgsp_falcon_imemd_r(u32 i) 376{ 377 return 0x00110184U + i*16U; 378} 379static inline u32 pgsp_falcon_imemt_r(u32 i) 380{ 381 return 0x00110188U + i*16U; 382} 383static inline u32 pgsp_falcon_sctl_r(void) 384{ 385 return 0x00110240U; 386} 387static inline u32 pgsp_falcon_mmu_phys_sec_r(void) 388{ 389 return 0x00100ce4U; 390} 391static inline u32 pgsp_falcon_bootvec_r(void) 392{ 393 return 0x00110104U; 394} 395static inline u32 pgsp_falcon_bootvec_vec_f(u32 v) 396{ 397 return (v & 0xffffffffU) << 0U; 398} 399static inline u32 pgsp_falcon_dmactl_r(void) 400{ 401 return 0x0011010cU; 402} 403static inline u32 pgsp_falcon_dmactl_dmem_scrubbing_m(void) 404{ 405 return 0x1U << 1U; 406} 407static inline u32 pgsp_falcon_dmactl_imem_scrubbing_m(void) 408{ 409 return 0x1U << 2U; 410} 411static inline u32 pgsp_falcon_dmactl_require_ctx_f(u32 v) 412{ 413 return (v & 0x1U) << 0U; 414} 415static inline u32 pgsp_falcon_hwcfg_r(void) 416{ 417 return 0x00110108U; 418} 419static inline u32 pgsp_falcon_hwcfg_imem_size_v(u32 r) 420{ 421 return (r >> 0U) & 0x1ffU; 422} 423static inline u32 pgsp_falcon_hwcfg_dmem_size_v(u32 r) 424{ 425 return (r >> 9U) & 0x1ffU; 426} 427static inline u32 pgsp_falcon_dmatrfbase_r(void) 428{ 429 return 0x00110110U; 430} 431static inline u32 pgsp_falcon_dmatrfbase1_r(void) 432{ 433 return 0x00110128U; 434} 435static inline u32 pgsp_falcon_dmatrfmoffs_r(void) 436{ 437 return 0x00110114U; 438} 439static inline u32 pgsp_falcon_dmatrfcmd_r(void) 440{ 441 return 0x00110118U; 442} 443static inline u32 pgsp_falcon_dmatrfcmd_imem_f(u32 v) 444{ 445 return (v & 0x1U) << 4U; 446} 447static inline u32 pgsp_falcon_dmatrfcmd_write_f(u32 v) 448{ 449 return (v & 0x1U) << 5U; 450} 451static inline u32 pgsp_falcon_dmatrfcmd_size_f(u32 v) 452{ 453 return (v & 0x7U) << 8U; 454} 455static inline u32 pgsp_falcon_dmatrfcmd_ctxdma_f(u32 v) 456{ 457 return (v & 0x7U) << 12U; 458} 459static inline u32 pgsp_falcon_dmatrffboffs_r(void) 460{ 461 return 0x0011011cU; 462} 463static inline u32 pgsp_falcon_exterraddr_r(void) 464{ 465 return 0x00110168U; 466} 467static inline u32 pgsp_falcon_exterrstat_r(void) 468{ 469 return 0x0011016cU; 470} 471static inline u32 pgsp_falcon_exterrstat_valid_m(void) 472{ 473 return 0x1U << 31U; 474} 475static inline u32 pgsp_falcon_exterrstat_valid_v(u32 r) 476{ 477 return (r >> 31U) & 0x1U; 478} 479static inline u32 pgsp_falcon_exterrstat_valid_true_v(void) 480{ 481 return 0x00000001U; 482} 483static inline u32 pgsp_sec2_falcon_icd_cmd_r(void) 484{ 485 return 0x00110200U; 486} 487static inline u32 pgsp_sec2_falcon_icd_cmd_opc_s(void) 488{ 489 return 4U; 490} 491static inline u32 pgsp_sec2_falcon_icd_cmd_opc_f(u32 v) 492{ 493 return (v & 0xfU) << 0U; 494} 495static inline u32 pgsp_sec2_falcon_icd_cmd_opc_m(void) 496{ 497 return 0xfU << 0U; 498} 499static inline u32 pgsp_sec2_falcon_icd_cmd_opc_v(u32 r) 500{ 501 return (r >> 0U) & 0xfU; 502} 503static inline u32 pgsp_sec2_falcon_icd_cmd_opc_rreg_f(void) 504{ 505 return 0x8U; 506} 507static inline u32 pgsp_sec2_falcon_icd_cmd_opc_rstat_f(void) 508{ 509 return 0xeU; 510} 511static inline u32 pgsp_sec2_falcon_icd_cmd_idx_f(u32 v) 512{ 513 return (v & 0x1fU) << 8U; 514} 515static inline u32 pgsp_sec2_falcon_icd_rdata_r(void) 516{ 517 return 0x0011020cU; 518} 519static inline u32 pgsp_falcon_dmemc_r(u32 i) 520{ 521 return 0x001101c0U + i*8U; 522} 523static inline u32 pgsp_falcon_dmemc_offs_f(u32 v) 524{ 525 return (v & 0x3fU) << 2U; 526} 527static inline u32 pgsp_falcon_dmemc_offs_m(void) 528{ 529 return 0x3fU << 2U; 530} 531static inline u32 pgsp_falcon_dmemc_blk_f(u32 v) 532{ 533 return (v & 0xffU) << 8U; 534} 535static inline u32 pgsp_falcon_dmemc_blk_m(void) 536{ 537 return 0xffU << 8U; 538} 539static inline u32 pgsp_falcon_dmemc_aincw_f(u32 v) 540{ 541 return (v & 0x1U) << 24U; 542} 543static inline u32 pgsp_falcon_dmemc_aincr_f(u32 v) 544{ 545 return (v & 0x1U) << 25U; 546} 547static inline u32 pgsp_falcon_dmemd_r(u32 i) 548{ 549 return 0x001101c4U + i*8U; 550} 551static inline u32 pgsp_falcon_debug1_r(void) 552{ 553 return 0x00110090U; 554} 555static inline u32 pgsp_falcon_debug1_ctxsw_mode_s(void) 556{ 557 return 1U; 558} 559static inline u32 pgsp_falcon_debug1_ctxsw_mode_f(u32 v) 560{ 561 return (v & 0x1U) << 16U; 562} 563static inline u32 pgsp_falcon_debug1_ctxsw_mode_m(void) 564{ 565 return 0x1U << 16U; 566} 567static inline u32 pgsp_falcon_debug1_ctxsw_mode_v(u32 r) 568{ 569 return (r >> 16U) & 0x1U; 570} 571static inline u32 pgsp_falcon_debug1_ctxsw_mode_init_f(void) 572{ 573 return 0x0U; 574} 575static inline u32 pgsp_fbif_transcfg_r(u32 i) 576{ 577 return 0x00110600U + i*4U; 578} 579static inline u32 pgsp_fbif_transcfg_target_local_fb_f(void) 580{ 581 return 0x0U; 582} 583static inline u32 pgsp_fbif_transcfg_target_coherent_sysmem_f(void) 584{ 585 return 0x1U; 586} 587static inline u32 pgsp_fbif_transcfg_target_noncoherent_sysmem_f(void) 588{ 589 return 0x2U; 590} 591static inline u32 pgsp_fbif_transcfg_mem_type_s(void) 592{ 593 return 1U; 594} 595static inline u32 pgsp_fbif_transcfg_mem_type_f(u32 v) 596{ 597 return (v & 0x1U) << 2U; 598} 599static inline u32 pgsp_fbif_transcfg_mem_type_m(void) 600{ 601 return 0x1U << 2U; 602} 603static inline u32 pgsp_fbif_transcfg_mem_type_v(u32 r) 604{ 605 return (r >> 2U) & 0x1U; 606} 607static inline u32 pgsp_fbif_transcfg_mem_type_virtual_f(void) 608{ 609 return 0x0U; 610} 611static inline u32 pgsp_fbif_transcfg_mem_type_physical_f(void) 612{ 613 return 0x4U; 614} 615static inline u32 pgsp_falcon_engine_r(void) 616{ 617 return 0x001103c0U; 618} 619static inline u32 pgsp_falcon_engine_reset_true_f(void) 620{ 621 return 0x1U; 622} 623static inline u32 pgsp_falcon_engine_reset_false_f(void) 624{ 625 return 0x0U; 626} 627static inline u32 pgsp_fbif_ctl_r(void) 628{ 629 return 0x00110624U; 630} 631static inline u32 pgsp_fbif_ctl_allow_phys_no_ctx_init_f(void) 632{ 633 return 0x0U; 634} 635static inline u32 pgsp_fbif_ctl_allow_phys_no_ctx_disallow_f(void) 636{ 637 return 0x0U; 638} 639static inline u32 pgsp_fbif_ctl_allow_phys_no_ctx_allow_f(void) 640{ 641 return 0x80U; 642} 643#endif
diff --git a/include/nvgpu/hw/gv100/hw_pram_gv100.h b/include/nvgpu/hw/gv100/hw_pram_gv100.h
deleted file mode 100644
index 8f005a2..0000000
--- a/include/nvgpu/hw/gv100/hw_pram_gv100.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pram_gv100_h_ 57#define _hw_pram_gv100_h_ 58 59static inline u32 pram_data032_r(u32 i) 60{ 61 return 0x00700000U + i*4U; 62} 63#endif
diff --git a/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h b/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h
deleted file mode 100644
index 5eca93c..0000000
--- a/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h
+++ /dev/null
@@ -1,167 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringmaster_gv100_h_ 57#define _hw_pri_ringmaster_gv100_h_ 58 59static inline u32 pri_ringmaster_command_r(void) 60{ 61 return 0x0012004cU; 62} 63static inline u32 pri_ringmaster_command_cmd_m(void) 64{ 65 return 0x3fU << 0U; 66} 67static inline u32 pri_ringmaster_command_cmd_v(u32 r) 68{ 69 return (r >> 0U) & 0x3fU; 70} 71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 pri_ringmaster_command_data_r(void) 92{ 93 return 0x00120048U; 94} 95static inline u32 pri_ringmaster_start_results_r(void) 96{ 97 return 0x00120050U; 98} 99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 pri_ringmaster_intr_status0_r(void) 108{ 109 return 0x00120058U; 110} 111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) 112{ 113 return (r >> 0U) & 0x1U; 114} 115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) 116{ 117 return (r >> 1U) & 0x1U; 118} 119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) 120{ 121 return (r >> 2U) & 0x1U; 122} 123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) 124{ 125 return (r >> 8U) & 0x1U; 126} 127static inline u32 pri_ringmaster_intr_status1_r(void) 128{ 129 return 0x0012005cU; 130} 131static inline u32 pri_ringmaster_global_ctl_r(void) 132{ 133 return 0x00120060U; 134} 135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) 136{ 137 return 0x1U; 138} 139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 pri_ringmaster_enum_fbp_r(void) 144{ 145 return 0x00120074U; 146} 147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) 148{ 149 return (r >> 0U) & 0x1fU; 150} 151static inline u32 pri_ringmaster_enum_gpc_r(void) 152{ 153 return 0x00120078U; 154} 155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) 156{ 157 return (r >> 0U) & 0x1fU; 158} 159static inline u32 pri_ringmaster_enum_ltc_r(void) 160{ 161 return 0x0012006cU; 162} 163static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) 164{ 165 return (r >> 0U) & 0x1fU; 166} 167#endif
diff --git a/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h b/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h
deleted file mode 100644
index fc522d5..0000000
--- a/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h
+++ /dev/null
@@ -1,79 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_gpc_gv100_h_ 57#define _hw_pri_ringstation_gpc_gv100_h_ 58 59static inline u32 pri_ringstation_gpc_master_config_r(u32 i) 60{ 61 return 0x00128300U + i*4U; 62} 63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) 64{ 65 return 0x00128120U; 66} 67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) 68{ 69 return 0x00128124U; 70} 71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) 72{ 73 return 0x00128128U; 74} 75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) 76{ 77 return 0x0012812cU; 78} 79#endif
diff --git a/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h b/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h
deleted file mode 100644
index 885ea30..0000000
--- a/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_sys_gv100_h_ 57#define _hw_pri_ringstation_sys_gv100_h_ 58 59static inline u32 pri_ringstation_sys_master_config_r(u32 i) 60{ 61 return 0x00122300U + i*4U; 62} 63static inline u32 pri_ringstation_sys_decode_config_r(void) 64{ 65 return 0x00122204U; 66} 67static inline u32 pri_ringstation_sys_decode_config_ring_m(void) 68{ 69 return 0x7U << 0U; 70} 71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 pri_ringstation_sys_priv_error_adr_r(void) 76{ 77 return 0x00122120U; 78} 79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) 80{ 81 return 0x00122124U; 82} 83static inline u32 pri_ringstation_sys_priv_error_info_r(void) 84{ 85 return 0x00122128U; 86} 87static inline u32 pri_ringstation_sys_priv_error_code_r(void) 88{ 89 return 0x0012212cU; 90} 91#endif
diff --git a/include/nvgpu/hw/gv100/hw_proj_gv100.h b/include/nvgpu/hw/gv100/hw_proj_gv100.h
deleted file mode 100644
index f46eaa0..0000000
--- a/include/nvgpu/hw/gv100/hw_proj_gv100.h
+++ /dev/null
@@ -1,199 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_proj_gv100_h_ 57#define _hw_proj_gv100_h_ 58 59static inline u32 proj_gpc_base_v(void) 60{ 61 return 0x00500000U; 62} 63static inline u32 proj_gpc_shared_base_v(void) 64{ 65 return 0x00418000U; 66} 67static inline u32 proj_gpc_stride_v(void) 68{ 69 return 0x00008000U; 70} 71static inline u32 proj_gpc_priv_stride_v(void) 72{ 73 return 0x00000800U; 74} 75static inline u32 proj_ltc_stride_v(void) 76{ 77 return 0x00002000U; 78} 79static inline u32 proj_lts_stride_v(void) 80{ 81 return 0x00000200U; 82} 83static inline u32 proj_fbpa_base_v(void) 84{ 85 return 0x00900000U; 86} 87static inline u32 proj_fbpa_shared_base_v(void) 88{ 89 return 0x009a0000U; 90} 91static inline u32 proj_fbpa_stride_v(void) 92{ 93 return 0x00004000U; 94} 95static inline u32 proj_ppc_in_gpc_base_v(void) 96{ 97 return 0x00003000U; 98} 99static inline u32 proj_ppc_in_gpc_shared_base_v(void) 100{ 101 return 0x00003e00U; 102} 103static inline u32 proj_ppc_in_gpc_stride_v(void) 104{ 105 return 0x00000200U; 106} 107static inline u32 proj_rop_base_v(void) 108{ 109 return 0x00410000U; 110} 111static inline u32 proj_rop_shared_base_v(void) 112{ 113 return 0x00408800U; 114} 115static inline u32 proj_rop_stride_v(void) 116{ 117 return 0x00000400U; 118} 119static inline u32 proj_tpc_in_gpc_base_v(void) 120{ 121 return 0x00004000U; 122} 123static inline u32 proj_tpc_in_gpc_stride_v(void) 124{ 125 return 0x00000800U; 126} 127static inline u32 proj_tpc_in_gpc_shared_base_v(void) 128{ 129 return 0x00001800U; 130} 131static inline u32 proj_smpc_base_v(void) 132{ 133 return 0x00000200U; 134} 135static inline u32 proj_smpc_shared_base_v(void) 136{ 137 return 0x00000300U; 138} 139static inline u32 proj_smpc_unique_base_v(void) 140{ 141 return 0x00000600U; 142} 143static inline u32 proj_smpc_stride_v(void) 144{ 145 return 0x00000100U; 146} 147static inline u32 proj_host_num_engines_v(void) 148{ 149 return 0x0000000fU; 150} 151static inline u32 proj_host_num_pbdma_v(void) 152{ 153 return 0x0000000eU; 154} 155static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) 156{ 157 return 0x00000007U; 158} 159static inline u32 proj_scal_litter_num_fbps_v(void) 160{ 161 return 0x00000008U; 162} 163static inline u32 proj_scal_litter_num_fbpas_v(void) 164{ 165 return 0x00000010U; 166} 167static inline u32 proj_scal_litter_num_gpcs_v(void) 168{ 169 return 0x00000006U; 170} 171static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) 172{ 173 return 0x00000003U; 174} 175static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) 176{ 177 return 0x00000003U; 178} 179static inline u32 proj_scal_litter_num_zcull_banks_v(void) 180{ 181 return 0x00000004U; 182} 183static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) 184{ 185 return 0x00000002U; 186} 187static inline u32 proj_scal_max_gpcs_v(void) 188{ 189 return 0x00000020U; 190} 191static inline u32 proj_scal_max_tpc_per_gpc_v(void) 192{ 193 return 0x00000008U; 194} 195static inline u32 proj_sm_stride_v(void) 196{ 197 return 0x00000080U; 198} 199#endif
diff --git a/include/nvgpu/hw/gv100/hw_pwr_gv100.h b/include/nvgpu/hw/gv100/hw_pwr_gv100.h
deleted file mode 100644
index c719226..0000000
--- a/include/nvgpu/hw/gv100/hw_pwr_gv100.h
+++ /dev/null
@@ -1,983 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pwr_gv100_h_ 57#define _hw_pwr_gv100_h_ 58 59static inline u32 pwr_falcon_irqsset_r(void) 60{ 61 return 0x0010a000U; 62} 63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pwr_falcon_irqsclr_r(void) 68{ 69 return 0x0010a004U; 70} 71static inline u32 pwr_falcon_irqstat_r(void) 72{ 73 return 0x0010a008U; 74} 75static inline u32 pwr_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pwr_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pwr_falcon_irqstat_ext_second_true_f(void) 88{ 89 return 0x800U; 90} 91static inline u32 pwr_falcon_irqmode_r(void) 92{ 93 return 0x0010a00cU; 94} 95static inline u32 pwr_falcon_irqmset_r(void) 96{ 97 return 0x0010a010U; 98} 99static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) 100{ 101 return (v & 0x1U) << 0U; 102} 103static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) 104{ 105 return (v & 0x1U) << 1U; 106} 107static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) 108{ 109 return (v & 0x1U) << 2U; 110} 111static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) 112{ 113 return (v & 0x1U) << 3U; 114} 115static inline u32 pwr_falcon_irqmset_halt_f(u32 v) 116{ 117 return (v & 0x1U) << 4U; 118} 119static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) 120{ 121 return (v & 0x1U) << 5U; 122} 123static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) 124{ 125 return (v & 0x1U) << 6U; 126} 127static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) 128{ 129 return (v & 0x1U) << 7U; 130} 131static inline u32 pwr_falcon_irqmset_ext_f(u32 v) 132{ 133 return (v & 0xffU) << 8U; 134} 135static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v) 136{ 137 return (v & 0x1U) << 8U; 138} 139static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v) 140{ 141 return (v & 0x1U) << 9U; 142} 143static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v) 144{ 145 return (v & 0x1U) << 11U; 146} 147static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v) 148{ 149 return (v & 0x1U) << 12U; 150} 151static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v) 152{ 153 return (v & 0x1U) << 13U; 154} 155static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v) 156{ 157 return (v & 0x1U) << 14U; 158} 159static inline u32 pwr_falcon_irqmclr_r(void) 160{ 161 return 0x0010a014U; 162} 163static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) 164{ 165 return (v & 0x1U) << 0U; 166} 167static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) 168{ 169 return (v & 0x1U) << 1U; 170} 171static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) 172{ 173 return (v & 0x1U) << 2U; 174} 175static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) 176{ 177 return (v & 0x1U) << 3U; 178} 179static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) 180{ 181 return (v & 0x1U) << 4U; 182} 183static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) 184{ 185 return (v & 0x1U) << 5U; 186} 187static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) 188{ 189 return (v & 0x1U) << 6U; 190} 191static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) 192{ 193 return (v & 0x1U) << 7U; 194} 195static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) 196{ 197 return (v & 0xffU) << 8U; 198} 199static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v) 200{ 201 return (v & 0x1U) << 8U; 202} 203static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v) 204{ 205 return (v & 0x1U) << 9U; 206} 207static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v) 208{ 209 return (v & 0x1U) << 11U; 210} 211static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v) 212{ 213 return (v & 0x1U) << 12U; 214} 215static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v) 216{ 217 return (v & 0x1U) << 13U; 218} 219static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v) 220{ 221 return (v & 0x1U) << 14U; 222} 223static inline u32 pwr_falcon_irqmask_r(void) 224{ 225 return 0x0010a018U; 226} 227static inline u32 pwr_falcon_irqdest_r(void) 228{ 229 return 0x0010a01cU; 230} 231static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) 232{ 233 return (v & 0x1U) << 0U; 234} 235static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) 236{ 237 return (v & 0x1U) << 1U; 238} 239static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) 240{ 241 return (v & 0x1U) << 2U; 242} 243static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) 244{ 245 return (v & 0x1U) << 3U; 246} 247static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) 248{ 249 return (v & 0x1U) << 4U; 250} 251static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) 252{ 253 return (v & 0x1U) << 5U; 254} 255static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) 256{ 257 return (v & 0x1U) << 6U; 258} 259static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) 260{ 261 return (v & 0x1U) << 7U; 262} 263static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) 264{ 265 return (v & 0xffU) << 8U; 266} 267static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v) 268{ 269 return (v & 0x1U) << 8U; 270} 271static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v) 272{ 273 return (v & 0x1U) << 9U; 274} 275static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v) 276{ 277 return (v & 0x1U) << 11U; 278} 279static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v) 280{ 281 return (v & 0x1U) << 12U; 282} 283static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v) 284{ 285 return (v & 0x1U) << 13U; 286} 287static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v) 288{ 289 return (v & 0x1U) << 14U; 290} 291static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 292{ 293 return (v & 0x1U) << 16U; 294} 295static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) 296{ 297 return (v & 0x1U) << 17U; 298} 299static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) 300{ 301 return (v & 0x1U) << 18U; 302} 303static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) 304{ 305 return (v & 0x1U) << 19U; 306} 307static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) 308{ 309 return (v & 0x1U) << 20U; 310} 311static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) 312{ 313 return (v & 0x1U) << 21U; 314} 315static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) 316{ 317 return (v & 0x1U) << 22U; 318} 319static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) 320{ 321 return (v & 0x1U) << 23U; 322} 323static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) 324{ 325 return (v & 0xffU) << 24U; 326} 327static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v) 328{ 329 return (v & 0x1U) << 24U; 330} 331static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v) 332{ 333 return (v & 0x1U) << 25U; 334} 335static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v) 336{ 337 return (v & 0x1U) << 27U; 338} 339static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v) 340{ 341 return (v & 0x1U) << 28U; 342} 343static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v) 344{ 345 return (v & 0x1U) << 29U; 346} 347static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v) 348{ 349 return (v & 0x1U) << 30U; 350} 351static inline u32 pwr_falcon_curctx_r(void) 352{ 353 return 0x0010a050U; 354} 355static inline u32 pwr_falcon_nxtctx_r(void) 356{ 357 return 0x0010a054U; 358} 359static inline u32 pwr_falcon_mailbox0_r(void) 360{ 361 return 0x0010a040U; 362} 363static inline u32 pwr_falcon_mailbox1_r(void) 364{ 365 return 0x0010a044U; 366} 367static inline u32 pwr_falcon_itfen_r(void) 368{ 369 return 0x0010a048U; 370} 371static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) 372{ 373 return 0x1U; 374} 375static inline u32 pwr_falcon_idlestate_r(void) 376{ 377 return 0x0010a04cU; 378} 379static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) 380{ 381 return (r >> 0U) & 0x1U; 382} 383static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) 384{ 385 return (r >> 1U) & 0x7fffU; 386} 387static inline u32 pwr_falcon_os_r(void) 388{ 389 return 0x0010a080U; 390} 391static inline u32 pwr_falcon_engctl_r(void) 392{ 393 return 0x0010a0a4U; 394} 395static inline u32 pwr_falcon_cpuctl_r(void) 396{ 397 return 0x0010a100U; 398} 399static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) 400{ 401 return (v & 0x1U) << 1U; 402} 403static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) 404{ 405 return (v & 0x1U) << 4U; 406} 407static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) 408{ 409 return 0x1U << 4U; 410} 411static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) 412{ 413 return (r >> 4U) & 0x1U; 414} 415static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 416{ 417 return (v & 0x1U) << 6U; 418} 419static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) 420{ 421 return 0x1U << 6U; 422} 423static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 424{ 425 return (r >> 6U) & 0x1U; 426} 427static inline u32 pwr_falcon_cpuctl_alias_r(void) 428{ 429 return 0x0010a130U; 430} 431static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) 432{ 433 return (v & 0x1U) << 1U; 434} 435static inline u32 pwr_pmu_scpctl_stat_r(void) 436{ 437 return 0x0010ac08U; 438} 439static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) 440{ 441 return (v & 0x1U) << 20U; 442} 443static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) 444{ 445 return 0x1U << 20U; 446} 447static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) 448{ 449 return (r >> 20U) & 0x1U; 450} 451static inline u32 pwr_falcon_imemc_r(u32 i) 452{ 453 return 0x0010a180U + i*16U; 454} 455static inline u32 pwr_falcon_imemc_offs_f(u32 v) 456{ 457 return (v & 0x3fU) << 2U; 458} 459static inline u32 pwr_falcon_imemc_blk_f(u32 v) 460{ 461 return (v & 0xffU) << 8U; 462} 463static inline u32 pwr_falcon_imemc_aincw_f(u32 v) 464{ 465 return (v & 0x1U) << 24U; 466} 467static inline u32 pwr_falcon_imemd_r(u32 i) 468{ 469 return 0x0010a184U + i*16U; 470} 471static inline u32 pwr_falcon_imemt_r(u32 i) 472{ 473 return 0x0010a188U + i*16U; 474} 475static inline u32 pwr_falcon_sctl_r(void) 476{ 477 return 0x0010a240U; 478} 479static inline u32 pwr_falcon_mmu_phys_sec_r(void) 480{ 481 return 0x00100ce4U; 482} 483static inline u32 pwr_falcon_bootvec_r(void) 484{ 485 return 0x0010a104U; 486} 487static inline u32 pwr_falcon_bootvec_vec_f(u32 v) 488{ 489 return (v & 0xffffffffU) << 0U; 490} 491static inline u32 pwr_falcon_dmactl_r(void) 492{ 493 return 0x0010a10cU; 494} 495static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) 496{ 497 return 0x1U << 1U; 498} 499static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) 500{ 501 return 0x1U << 2U; 502} 503static inline u32 pwr_falcon_hwcfg_r(void) 504{ 505 return 0x0010a108U; 506} 507static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) 508{ 509 return (r >> 0U) & 0x1ffU; 510} 511static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) 512{ 513 return (r >> 9U) & 0x1ffU; 514} 515static inline u32 pwr_falcon_dmatrfbase_r(void) 516{ 517 return 0x0010a110U; 518} 519static inline u32 pwr_falcon_dmatrfbase1_r(void) 520{ 521 return 0x0010a128U; 522} 523static inline u32 pwr_falcon_dmatrfmoffs_r(void) 524{ 525 return 0x0010a114U; 526} 527static inline u32 pwr_falcon_dmatrfcmd_r(void) 528{ 529 return 0x0010a118U; 530} 531static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) 532{ 533 return (v & 0x1U) << 4U; 534} 535static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) 536{ 537 return (v & 0x1U) << 5U; 538} 539static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) 540{ 541 return (v & 0x7U) << 8U; 542} 543static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) 544{ 545 return (v & 0x7U) << 12U; 546} 547static inline u32 pwr_falcon_dmatrffboffs_r(void) 548{ 549 return 0x0010a11cU; 550} 551static inline u32 pwr_falcon_exterraddr_r(void) 552{ 553 return 0x0010a168U; 554} 555static inline u32 pwr_falcon_exterrstat_r(void) 556{ 557 return 0x0010a16cU; 558} 559static inline u32 pwr_falcon_exterrstat_valid_m(void) 560{ 561 return 0x1U << 31U; 562} 563static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) 564{ 565 return (r >> 31U) & 0x1U; 566} 567static inline u32 pwr_falcon_exterrstat_valid_true_v(void) 568{ 569 return 0x00000001U; 570} 571static inline u32 pwr_pmu_falcon_icd_cmd_r(void) 572{ 573 return 0x0010a200U; 574} 575static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) 576{ 577 return 4U; 578} 579static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) 580{ 581 return (v & 0xfU) << 0U; 582} 583static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) 584{ 585 return 0xfU << 0U; 586} 587static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) 588{ 589 return (r >> 0U) & 0xfU; 590} 591static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) 592{ 593 return 0x8U; 594} 595static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) 596{ 597 return 0xeU; 598} 599static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) 600{ 601 return (v & 0x1fU) << 8U; 602} 603static inline u32 pwr_pmu_falcon_icd_rdata_r(void) 604{ 605 return 0x0010a20cU; 606} 607static inline u32 pwr_falcon_dmemc_r(u32 i) 608{ 609 return 0x0010a1c0U + i*8U; 610} 611static inline u32 pwr_falcon_dmemc_offs_f(u32 v) 612{ 613 return (v & 0x3fU) << 2U; 614} 615static inline u32 pwr_falcon_dmemc_offs_m(void) 616{ 617 return 0x3fU << 2U; 618} 619static inline u32 pwr_falcon_dmemc_blk_f(u32 v) 620{ 621 return (v & 0xffU) << 8U; 622} 623static inline u32 pwr_falcon_dmemc_blk_m(void) 624{ 625 return 0xffU << 8U; 626} 627static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) 628{ 629 return (v & 0x1U) << 24U; 630} 631static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) 632{ 633 return (v & 0x1U) << 25U; 634} 635static inline u32 pwr_falcon_dmemd_r(u32 i) 636{ 637 return 0x0010a1c4U + i*8U; 638} 639static inline u32 pwr_pmu_new_instblk_r(void) 640{ 641 return 0x0010a480U; 642} 643static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) 644{ 645 return (v & 0xfffffffU) << 0U; 646} 647static inline u32 pwr_pmu_new_instblk_target_fb_f(void) 648{ 649 return 0x0U; 650} 651static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) 652{ 653 return 0x20000000U; 654} 655static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) 656{ 657 return 0x30000000U; 658} 659static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) 660{ 661 return (v & 0x1U) << 30U; 662} 663static inline u32 pwr_pmu_mutex_id_r(void) 664{ 665 return 0x0010a488U; 666} 667static inline u32 pwr_pmu_mutex_id_value_v(u32 r) 668{ 669 return (r >> 0U) & 0xffU; 670} 671static inline u32 pwr_pmu_mutex_id_value_init_v(void) 672{ 673 return 0x00000000U; 674} 675static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) 676{ 677 return 0x000000ffU; 678} 679static inline u32 pwr_pmu_mutex_id_release_r(void) 680{ 681 return 0x0010a48cU; 682} 683static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) 684{ 685 return (v & 0xffU) << 0U; 686} 687static inline u32 pwr_pmu_mutex_id_release_value_m(void) 688{ 689 return 0xffU << 0U; 690} 691static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) 692{ 693 return 0x00000000U; 694} 695static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) 696{ 697 return 0x0U; 698} 699static inline u32 pwr_pmu_mutex_r(u32 i) 700{ 701 return 0x0010a580U + i*4U; 702} 703static inline u32 pwr_pmu_mutex__size_1_v(void) 704{ 705 return 0x00000010U; 706} 707static inline u32 pwr_pmu_mutex_value_f(u32 v) 708{ 709 return (v & 0xffU) << 0U; 710} 711static inline u32 pwr_pmu_mutex_value_v(u32 r) 712{ 713 return (r >> 0U) & 0xffU; 714} 715static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) 716{ 717 return 0x0U; 718} 719static inline u32 pwr_pmu_queue_head_r(u32 i) 720{ 721 return 0x0010a800U + i*4U; 722} 723static inline u32 pwr_pmu_queue_head__size_1_v(void) 724{ 725 return 0x00000008U; 726} 727static inline u32 pwr_pmu_queue_head_address_f(u32 v) 728{ 729 return (v & 0xffffffffU) << 0U; 730} 731static inline u32 pwr_pmu_queue_head_address_v(u32 r) 732{ 733 return (r >> 0U) & 0xffffffffU; 734} 735static inline u32 pwr_pmu_queue_tail_r(u32 i) 736{ 737 return 0x0010a820U + i*4U; 738} 739static inline u32 pwr_pmu_queue_tail__size_1_v(void) 740{ 741 return 0x00000008U; 742} 743static inline u32 pwr_pmu_queue_tail_address_f(u32 v) 744{ 745 return (v & 0xffffffffU) << 0U; 746} 747static inline u32 pwr_pmu_queue_tail_address_v(u32 r) 748{ 749 return (r >> 0U) & 0xffffffffU; 750} 751static inline u32 pwr_pmu_msgq_head_r(void) 752{ 753 return 0x0010a4c8U; 754} 755static inline u32 pwr_pmu_msgq_head_val_f(u32 v) 756{ 757 return (v & 0xffffffffU) << 0U; 758} 759static inline u32 pwr_pmu_msgq_head_val_v(u32 r) 760{ 761 return (r >> 0U) & 0xffffffffU; 762} 763static inline u32 pwr_pmu_msgq_tail_r(void) 764{ 765 return 0x0010a4ccU; 766} 767static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) 768{ 769 return (v & 0xffffffffU) << 0U; 770} 771static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) 772{ 773 return (r >> 0U) & 0xffffffffU; 774} 775static inline u32 pwr_pmu_idle_mask_r(u32 i) 776{ 777 return 0x0010a504U + i*16U; 778} 779static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) 780{ 781 return 0x1U; 782} 783static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) 784{ 785 return 0x200000U; 786} 787static inline u32 pwr_pmu_idle_count_r(u32 i) 788{ 789 return 0x0010a508U + i*16U; 790} 791static inline u32 pwr_pmu_idle_count_value_f(u32 v) 792{ 793 return (v & 0x7fffffffU) << 0U; 794} 795static inline u32 pwr_pmu_idle_count_value_v(u32 r) 796{ 797 return (r >> 0U) & 0x7fffffffU; 798} 799static inline u32 pwr_pmu_idle_count_reset_f(u32 v) 800{ 801 return (v & 0x1U) << 31U; 802} 803static inline u32 pwr_pmu_idle_ctrl_r(u32 i) 804{ 805 return 0x0010a50cU + i*16U; 806} 807static inline u32 pwr_pmu_idle_ctrl_value_m(void) 808{ 809 return 0x3U << 0U; 810} 811static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) 812{ 813 return 0x2U; 814} 815static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) 816{ 817 return 0x3U; 818} 819static inline u32 pwr_pmu_idle_ctrl_filter_m(void) 820{ 821 return 0x1U << 2U; 822} 823static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) 824{ 825 return 0x0U; 826} 827static inline u32 pwr_pmu_idle_threshold_r(u32 i) 828{ 829 return 0x0010a8a0U + i*4U; 830} 831static inline u32 pwr_pmu_idle_threshold_value_f(u32 v) 832{ 833 return (v & 0x7fffffffU) << 0U; 834} 835static inline u32 pwr_pmu_idle_intr_r(void) 836{ 837 return 0x0010a9e8U; 838} 839static inline u32 pwr_pmu_idle_intr_en_f(u32 v) 840{ 841 return (v & 0x1U) << 0U; 842} 843static inline u32 pwr_pmu_idle_intr_en_disabled_v(void) 844{ 845 return 0x00000000U; 846} 847static inline u32 pwr_pmu_idle_intr_en_enabled_v(void) 848{ 849 return 0x00000001U; 850} 851static inline u32 pwr_pmu_idle_intr_status_r(void) 852{ 853 return 0x0010a9ecU; 854} 855static inline u32 pwr_pmu_idle_intr_status_intr_f(u32 v) 856{ 857 return (v & 0x1U) << 0U; 858} 859static inline u32 pwr_pmu_idle_intr_status_intr_m(void) 860{ 861 return U32(0x1U) << 0U; 862} 863static inline u32 pwr_pmu_idle_intr_status_intr_v(u32 r) 864{ 865 return (r >> 0U) & 0x1U; 866} 867static inline u32 pwr_pmu_idle_intr_status_intr_pending_v(void) 868{ 869 return 0x00000001U; 870} 871static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void) 872{ 873 return 0x00000001U; 874} 875static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) 876{ 877 return 0x0010a9f0U + i*8U; 878} 879static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) 880{ 881 return 0x0010a9f4U + i*8U; 882} 883static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) 884{ 885 return 0x0010aa30U + i*8U; 886} 887static inline u32 pwr_pmu_debug_r(u32 i) 888{ 889 return 0x0010a5c0U + i*4U; 890} 891static inline u32 pwr_pmu_debug__size_1_v(void) 892{ 893 return 0x00000004U; 894} 895static inline u32 pwr_pmu_mailbox_r(u32 i) 896{ 897 return 0x0010a450U + i*4U; 898} 899static inline u32 pwr_pmu_mailbox__size_1_v(void) 900{ 901 return 0x0000000cU; 902} 903static inline u32 pwr_pmu_bar0_addr_r(void) 904{ 905 return 0x0010a7a0U; 906} 907static inline u32 pwr_pmu_bar0_data_r(void) 908{ 909 return 0x0010a7a4U; 910} 911static inline u32 pwr_pmu_bar0_ctl_r(void) 912{ 913 return 0x0010a7acU; 914} 915static inline u32 pwr_pmu_bar0_timeout_r(void) 916{ 917 return 0x0010a7a8U; 918} 919static inline u32 pwr_pmu_bar0_fecs_error_r(void) 920{ 921 return 0x0010a988U; 922} 923static inline u32 pwr_pmu_bar0_error_status_r(void) 924{ 925 return 0x0010a7b0U; 926} 927static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) 928{ 929 return 0x0010a6c0U + i*4U; 930} 931static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) 932{ 933 return 0x0010a6e8U + i*4U; 934} 935static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) 936{ 937 return 0x0010a710U + i*4U; 938} 939static inline u32 pwr_pmu_pg_intren_r(u32 i) 940{ 941 return 0x0010a760U + i*4U; 942} 943static inline u32 pwr_fbif_transcfg_r(u32 i) 944{ 945 return 0x0010ae00U + i*4U; 946} 947static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) 948{ 949 return 0x0U; 950} 951static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) 952{ 953 return 0x1U; 954} 955static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) 956{ 957 return 0x2U; 958} 959static inline u32 pwr_fbif_transcfg_mem_type_s(void) 960{ 961 return 1U; 962} 963static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) 964{ 965 return (v & 0x1U) << 2U; 966} 967static inline u32 pwr_fbif_transcfg_mem_type_m(void) 968{ 969 return 0x1U << 2U; 970} 971static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) 972{ 973 return (r >> 2U) & 0x1U; 974} 975static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) 976{ 977 return 0x0U; 978} 979static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) 980{ 981 return 0x4U; 982} 983#endif
diff --git a/include/nvgpu/hw/gv100/hw_ram_gv100.h b/include/nvgpu/hw/gv100/hw_ram_gv100.h
deleted file mode 100644
index 55aa25f..0000000
--- a/include/nvgpu/hw/gv100/hw_ram_gv100.h
+++ /dev/null
@@ -1,791 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ram_gv100_h_ 57#define _hw_ram_gv100_h_ 58 59static inline u32 ram_in_ramfc_s(void) 60{ 61 return 4096U; 62} 63static inline u32 ram_in_ramfc_w(void) 64{ 65 return 0U; 66} 67static inline u32 ram_in_page_dir_base_target_f(u32 v) 68{ 69 return (v & 0x3U) << 0U; 70} 71static inline u32 ram_in_page_dir_base_target_w(void) 72{ 73 return 128U; 74} 75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 ram_in_page_dir_base_vol_w(void) 88{ 89 return 128U; 90} 91static inline u32 ram_in_page_dir_base_vol_true_f(void) 92{ 93 return 0x4U; 94} 95static inline u32 ram_in_page_dir_base_vol_false_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v) 100{ 101 return (v & 0x1U) << 4U; 102} 103static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void) 104{ 105 return 0x1U << 4U; 106} 107static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void) 108{ 109 return 128U; 110} 111static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void) 112{ 113 return 0x10U; 114} 115static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void) 120{ 121 return 0x1U << 5U; 122} 123static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void) 124{ 125 return 128U; 126} 127static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void) 128{ 129 return 0x20U; 130} 131static inline u32 ram_in_use_ver2_pt_format_f(u32 v) 132{ 133 return (v & 0x1U) << 10U; 134} 135static inline u32 ram_in_use_ver2_pt_format_m(void) 136{ 137 return 0x1U << 10U; 138} 139static inline u32 ram_in_use_ver2_pt_format_w(void) 140{ 141 return 128U; 142} 143static inline u32 ram_in_use_ver2_pt_format_true_f(void) 144{ 145 return 0x400U; 146} 147static inline u32 ram_in_use_ver2_pt_format_false_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 ram_in_big_page_size_f(u32 v) 152{ 153 return (v & 0x1U) << 11U; 154} 155static inline u32 ram_in_big_page_size_m(void) 156{ 157 return 0x1U << 11U; 158} 159static inline u32 ram_in_big_page_size_w(void) 160{ 161 return 128U; 162} 163static inline u32 ram_in_big_page_size_128kb_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 ram_in_big_page_size_64kb_f(void) 168{ 169 return 0x800U; 170} 171static inline u32 ram_in_page_dir_base_lo_f(u32 v) 172{ 173 return (v & 0xfffffU) << 12U; 174} 175static inline u32 ram_in_page_dir_base_lo_w(void) 176{ 177 return 128U; 178} 179static inline u32 ram_in_page_dir_base_hi_f(u32 v) 180{ 181 return (v & 0xffffffffU) << 0U; 182} 183static inline u32 ram_in_page_dir_base_hi_w(void) 184{ 185 return 129U; 186} 187static inline u32 ram_in_engine_cs_w(void) 188{ 189 return 132U; 190} 191static inline u32 ram_in_engine_cs_wfi_v(void) 192{ 193 return 0x00000000U; 194} 195static inline u32 ram_in_engine_cs_wfi_f(void) 196{ 197 return 0x0U; 198} 199static inline u32 ram_in_engine_cs_fg_v(void) 200{ 201 return 0x00000001U; 202} 203static inline u32 ram_in_engine_cs_fg_f(void) 204{ 205 return 0x8U; 206} 207static inline u32 ram_in_engine_wfi_mode_f(u32 v) 208{ 209 return (v & 0x1U) << 2U; 210} 211static inline u32 ram_in_engine_wfi_mode_w(void) 212{ 213 return 132U; 214} 215static inline u32 ram_in_engine_wfi_mode_physical_v(void) 216{ 217 return 0x00000000U; 218} 219static inline u32 ram_in_engine_wfi_mode_virtual_v(void) 220{ 221 return 0x00000001U; 222} 223static inline u32 ram_in_engine_wfi_target_f(u32 v) 224{ 225 return (v & 0x3U) << 0U; 226} 227static inline u32 ram_in_engine_wfi_target_w(void) 228{ 229 return 132U; 230} 231static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void) 232{ 233 return 0x00000002U; 234} 235static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void) 236{ 237 return 0x00000003U; 238} 239static inline u32 ram_in_engine_wfi_target_local_mem_v(void) 240{ 241 return 0x00000000U; 242} 243static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v) 244{ 245 return (v & 0xfffffU) << 12U; 246} 247static inline u32 ram_in_engine_wfi_ptr_lo_w(void) 248{ 249 return 132U; 250} 251static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v) 252{ 253 return (v & 0xffU) << 0U; 254} 255static inline u32 ram_in_engine_wfi_ptr_hi_w(void) 256{ 257 return 133U; 258} 259static inline u32 ram_in_engine_wfi_veid_f(u32 v) 260{ 261 return (v & 0x3fU) << 0U; 262} 263static inline u32 ram_in_engine_wfi_veid_w(void) 264{ 265 return 134U; 266} 267static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v) 268{ 269 return (v & 0xffffffffU) << 0U; 270} 271static inline u32 ram_in_eng_method_buffer_addr_lo_w(void) 272{ 273 return 136U; 274} 275static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v) 276{ 277 return (v & 0x1ffffU) << 0U; 278} 279static inline u32 ram_in_eng_method_buffer_addr_hi_w(void) 280{ 281 return 137U; 282} 283static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i) 284{ 285 return (v & 0x3U) << (0U + i*0U); 286} 287static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void) 288{ 289 return 0x00000040U; 290} 291static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void) 292{ 293 return 0x00000000U; 294} 295static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void) 296{ 297 return 0x00000001U; 298} 299static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void) 300{ 301 return 0x00000002U; 302} 303static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void) 304{ 305 return 0x00000003U; 306} 307static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i) 308{ 309 return (v & 0x1U) << (2U + i*0U); 310} 311static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void) 312{ 313 return 0x00000040U; 314} 315static inline u32 ram_in_sc_page_dir_base_vol_true_v(void) 316{ 317 return 0x00000001U; 318} 319static inline u32 ram_in_sc_page_dir_base_vol_false_v(void) 320{ 321 return 0x00000000U; 322} 323static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i) 324{ 325 return (v & 0x1U) << (4U + i*0U); 326} 327static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void) 328{ 329 return 0x00000040U; 330} 331static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void) 332{ 333 return 0x00000001U; 334} 335static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void) 336{ 337 return 0x00000000U; 338} 339static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i) 340{ 341 return (v & 0x1U) << (5U + i*0U); 342} 343static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void) 344{ 345 return 0x00000040U; 346} 347static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void) 348{ 349 return 0x00000001U; 350} 351static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void) 352{ 353 return 0x00000000U; 354} 355static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i) 356{ 357 return (v & 0x1U) << (10U + i*0U); 358} 359static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void) 360{ 361 return 0x00000040U; 362} 363static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void) 364{ 365 return 0x00000000U; 366} 367static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void) 368{ 369 return 0x00000001U; 370} 371static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i) 372{ 373 return (v & 0x1U) << (11U + i*0U); 374} 375static inline u32 ram_in_sc_big_page_size__size_1_v(void) 376{ 377 return 0x00000040U; 378} 379static inline u32 ram_in_sc_big_page_size_64kb_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i) 384{ 385 return (v & 0xfffffU) << (12U + i*0U); 386} 387static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void) 388{ 389 return 0x00000040U; 390} 391static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i) 392{ 393 return (v & 0xffffffffU) << (0U + i*0U); 394} 395static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void) 396{ 397 return 0x00000040U; 398} 399static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v) 400{ 401 return (v & 0x3U) << 0U; 402} 403static inline u32 ram_in_sc_page_dir_base_target_0_w(void) 404{ 405 return 168U; 406} 407static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v) 408{ 409 return (v & 0x1U) << 2U; 410} 411static inline u32 ram_in_sc_page_dir_base_vol_0_w(void) 412{ 413 return 168U; 414} 415static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v) 416{ 417 return (v & 0x1U) << 4U; 418} 419static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void) 420{ 421 return 168U; 422} 423static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v) 424{ 425 return (v & 0x1U) << 5U; 426} 427static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void) 428{ 429 return 168U; 430} 431static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v) 432{ 433 return (v & 0x1U) << 10U; 434} 435static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void) 436{ 437 return 168U; 438} 439static inline u32 ram_in_sc_big_page_size_0_f(u32 v) 440{ 441 return (v & 0x1U) << 11U; 442} 443static inline u32 ram_in_sc_big_page_size_0_w(void) 444{ 445 return 168U; 446} 447static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v) 448{ 449 return (v & 0xfffffU) << 12U; 450} 451static inline u32 ram_in_sc_page_dir_base_lo_0_w(void) 452{ 453 return 168U; 454} 455static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v) 456{ 457 return (v & 0xffffffffU) << 0U; 458} 459static inline u32 ram_in_sc_page_dir_base_hi_0_w(void) 460{ 461 return 169U; 462} 463static inline u32 ram_in_base_shift_v(void) 464{ 465 return 0x0000000cU; 466} 467static inline u32 ram_in_alloc_size_v(void) 468{ 469 return 0x00001000U; 470} 471static inline u32 ram_fc_size_val_v(void) 472{ 473 return 0x00000200U; 474} 475static inline u32 ram_fc_gp_put_w(void) 476{ 477 return 0U; 478} 479static inline u32 ram_fc_userd_w(void) 480{ 481 return 2U; 482} 483static inline u32 ram_fc_userd_hi_w(void) 484{ 485 return 3U; 486} 487static inline u32 ram_fc_signature_w(void) 488{ 489 return 4U; 490} 491static inline u32 ram_fc_gp_get_w(void) 492{ 493 return 5U; 494} 495static inline u32 ram_fc_pb_get_w(void) 496{ 497 return 6U; 498} 499static inline u32 ram_fc_pb_get_hi_w(void) 500{ 501 return 7U; 502} 503static inline u32 ram_fc_pb_top_level_get_w(void) 504{ 505 return 8U; 506} 507static inline u32 ram_fc_pb_top_level_get_hi_w(void) 508{ 509 return 9U; 510} 511static inline u32 ram_fc_acquire_w(void) 512{ 513 return 12U; 514} 515static inline u32 ram_fc_sem_addr_hi_w(void) 516{ 517 return 14U; 518} 519static inline u32 ram_fc_sem_addr_lo_w(void) 520{ 521 return 15U; 522} 523static inline u32 ram_fc_sem_payload_lo_w(void) 524{ 525 return 16U; 526} 527static inline u32 ram_fc_sem_payload_hi_w(void) 528{ 529 return 39U; 530} 531static inline u32 ram_fc_sem_execute_w(void) 532{ 533 return 17U; 534} 535static inline u32 ram_fc_gp_base_w(void) 536{ 537 return 18U; 538} 539static inline u32 ram_fc_gp_base_hi_w(void) 540{ 541 return 19U; 542} 543static inline u32 ram_fc_gp_fetch_w(void) 544{ 545 return 20U; 546} 547static inline u32 ram_fc_pb_fetch_w(void) 548{ 549 return 21U; 550} 551static inline u32 ram_fc_pb_fetch_hi_w(void) 552{ 553 return 22U; 554} 555static inline u32 ram_fc_pb_put_w(void) 556{ 557 return 23U; 558} 559static inline u32 ram_fc_pb_put_hi_w(void) 560{ 561 return 24U; 562} 563static inline u32 ram_fc_pb_header_w(void) 564{ 565 return 33U; 566} 567static inline u32 ram_fc_pb_count_w(void) 568{ 569 return 34U; 570} 571static inline u32 ram_fc_subdevice_w(void) 572{ 573 return 37U; 574} 575static inline u32 ram_fc_target_w(void) 576{ 577 return 43U; 578} 579static inline u32 ram_fc_hce_ctrl_w(void) 580{ 581 return 57U; 582} 583static inline u32 ram_fc_chid_w(void) 584{ 585 return 58U; 586} 587static inline u32 ram_fc_chid_id_f(u32 v) 588{ 589 return (v & 0xfffU) << 0U; 590} 591static inline u32 ram_fc_chid_id_w(void) 592{ 593 return 0U; 594} 595static inline u32 ram_fc_config_w(void) 596{ 597 return 61U; 598} 599static inline u32 ram_fc_runlist_timeslice_w(void) 600{ 601 return 62U; 602} 603static inline u32 ram_fc_set_channel_info_w(void) 604{ 605 return 63U; 606} 607static inline u32 ram_userd_base_shift_v(void) 608{ 609 return 0x00000009U; 610} 611static inline u32 ram_userd_chan_size_v(void) 612{ 613 return 0x00000200U; 614} 615static inline u32 ram_userd_put_w(void) 616{ 617 return 16U; 618} 619static inline u32 ram_userd_get_w(void) 620{ 621 return 17U; 622} 623static inline u32 ram_userd_ref_w(void) 624{ 625 return 18U; 626} 627static inline u32 ram_userd_put_hi_w(void) 628{ 629 return 19U; 630} 631static inline u32 ram_userd_ref_threshold_w(void) 632{ 633 return 20U; 634} 635static inline u32 ram_userd_top_level_get_w(void) 636{ 637 return 22U; 638} 639static inline u32 ram_userd_top_level_get_hi_w(void) 640{ 641 return 23U; 642} 643static inline u32 ram_userd_get_hi_w(void) 644{ 645 return 24U; 646} 647static inline u32 ram_userd_gp_get_w(void) 648{ 649 return 34U; 650} 651static inline u32 ram_userd_gp_put_w(void) 652{ 653 return 35U; 654} 655static inline u32 ram_userd_gp_top_level_get_w(void) 656{ 657 return 22U; 658} 659static inline u32 ram_userd_gp_top_level_get_hi_w(void) 660{ 661 return 23U; 662} 663static inline u32 ram_rl_entry_size_v(void) 664{ 665 return 0x00000010U; 666} 667static inline u32 ram_rl_entry_type_f(u32 v) 668{ 669 return (v & 0x1U) << 0U; 670} 671static inline u32 ram_rl_entry_type_channel_v(void) 672{ 673 return 0x00000000U; 674} 675static inline u32 ram_rl_entry_type_tsg_v(void) 676{ 677 return 0x00000001U; 678} 679static inline u32 ram_rl_entry_id_f(u32 v) 680{ 681 return (v & 0xfffU) << 0U; 682} 683static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v) 684{ 685 return (v & 0x1U) << 1U; 686} 687static inline u32 ram_rl_entry_chan_inst_target_f(u32 v) 688{ 689 return (v & 0x3U) << 4U; 690} 691static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void) 692{ 693 return 0x00000003U; 694} 695static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void) 696{ 697 return 0x00000002U; 698} 699static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void) 700{ 701 return 0x00000000U; 702} 703static inline u32 ram_rl_entry_chan_userd_target_f(u32 v) 704{ 705 return (v & 0x3U) << 6U; 706} 707static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void) 708{ 709 return 0x00000000U; 710} 711static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void) 712{ 713 return 0x00000001U; 714} 715static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void) 716{ 717 return 0x00000002U; 718} 719static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void) 720{ 721 return 0x00000003U; 722} 723static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v) 724{ 725 return (v & 0xffffffU) << 8U; 726} 727static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v) 728{ 729 return (v & 0xffffffffU) << 0U; 730} 731static inline u32 ram_rl_entry_chid_f(u32 v) 732{ 733 return (v & 0xfffU) << 0U; 734} 735static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v) 736{ 737 return (v & 0xfffffU) << 12U; 738} 739static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v) 740{ 741 return (v & 0xffffffffU) << 0U; 742} 743static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v) 744{ 745 return (v & 0xfU) << 16U; 746} 747static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void) 748{ 749 return 0x00000003U; 750} 751static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v) 752{ 753 return (v & 0xffU) << 24U; 754} 755static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void) 756{ 757 return 0x00000080U; 758} 759static inline u32 ram_rl_entry_tsg_length_f(u32 v) 760{ 761 return (v & 0xffU) << 0U; 762} 763static inline u32 ram_rl_entry_tsg_length_init_v(void) 764{ 765 return 0x00000000U; 766} 767static inline u32 ram_rl_entry_tsg_length_min_v(void) 768{ 769 return 0x00000001U; 770} 771static inline u32 ram_rl_entry_tsg_length_max_v(void) 772{ 773 return 0x00000080U; 774} 775static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v) 776{ 777 return (v & 0xfffU) << 0U; 778} 779static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void) 780{ 781 return 0x00000008U; 782} 783static inline u32 ram_rl_entry_chan_userd_align_shift_v(void) 784{ 785 return 0x00000008U; 786} 787static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void) 788{ 789 return 0x0000000cU; 790} 791#endif
diff --git a/include/nvgpu/hw/gv100/hw_therm_gv100.h b/include/nvgpu/hw/gv100/hw_therm_gv100.h
deleted file mode 100644
index 2ea71ef..0000000
--- a/include/nvgpu/hw/gv100/hw_therm_gv100.h
+++ /dev/null
@@ -1,299 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_therm_gv100_h_ 57#define _hw_therm_gv100_h_ 58 59static inline u32 therm_weight_1_r(void) 60{ 61 return 0x00020024U; 62} 63static inline u32 therm_config1_r(void) 64{ 65 return 0x00020050U; 66} 67static inline u32 therm_config2_r(void) 68{ 69 return 0x00020130U; 70} 71static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) 72{ 73 return (v & 0x1U) << 24U; 74} 75static inline u32 therm_config2_grad_enable_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 therm_gate_ctrl_r(u32 i) 80{ 81 return 0x00020200U + i*4U; 82} 83static inline u32 therm_gate_ctrl_eng_clk_m(void) 84{ 85 return 0x3U << 0U; 86} 87static inline u32 therm_gate_ctrl_eng_clk_run_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) 92{ 93 return 0x1U; 94} 95static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) 96{ 97 return 0x2U; 98} 99static inline u32 therm_gate_ctrl_blk_clk_m(void) 100{ 101 return 0x3U << 2U; 102} 103static inline u32 therm_gate_ctrl_blk_clk_run_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) 108{ 109 return 0x4U; 110} 111static inline u32 therm_gate_ctrl_idle_holdoff_m(void) 112{ 113 return 0x1U << 4U; 114} 115static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void) 120{ 121 return 0x10U; 122} 123static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) 124{ 125 return (v & 0x1fU) << 8U; 126} 127static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) 128{ 129 return 0x1fU << 8U; 130} 131static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) 132{ 133 return (v & 0x7U) << 13U; 134} 135static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) 136{ 137 return 0x7U << 13U; 138} 139static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) 140{ 141 return (v & 0xfU) << 16U; 142} 143static inline u32 therm_gate_ctrl_eng_delay_before_m(void) 144{ 145 return 0xfU << 16U; 146} 147static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) 148{ 149 return (v & 0xfU) << 20U; 150} 151static inline u32 therm_gate_ctrl_eng_delay_after_m(void) 152{ 153 return 0xfU << 20U; 154} 155static inline u32 therm_fecs_idle_filter_r(void) 156{ 157 return 0x00020288U; 158} 159static inline u32 therm_fecs_idle_filter_value_m(void) 160{ 161 return 0xffffffffU << 0U; 162} 163static inline u32 therm_hubmmu_idle_filter_r(void) 164{ 165 return 0x0002028cU; 166} 167static inline u32 therm_hubmmu_idle_filter_value_m(void) 168{ 169 return 0xffffffffU << 0U; 170} 171static inline u32 therm_clk_slowdown_r(u32 i) 172{ 173 return 0x00020160U + i*4U; 174} 175static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) 176{ 177 return (v & 0x3fU) << 16U; 178} 179static inline u32 therm_clk_slowdown_idle_factor_m(void) 180{ 181 return 0x3fU << 16U; 182} 183static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) 184{ 185 return (r >> 16U) & 0x3fU; 186} 187static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) 188{ 189 return 0x0U; 190} 191static inline u32 therm_grad_stepping_table_r(u32 i) 192{ 193 return 0x000202c8U + i*4U; 194} 195static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) 196{ 197 return (v & 0x3fU) << 0U; 198} 199static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) 200{ 201 return 0x3fU << 0U; 202} 203static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) 204{ 205 return 0x1U; 206} 207static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) 208{ 209 return 0x2U; 210} 211static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) 212{ 213 return 0x6U; 214} 215static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) 216{ 217 return 0xeU; 218} 219static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) 220{ 221 return (v & 0x3fU) << 6U; 222} 223static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) 224{ 225 return 0x3fU << 6U; 226} 227static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) 228{ 229 return (v & 0x3fU) << 12U; 230} 231static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) 232{ 233 return 0x3fU << 12U; 234} 235static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) 236{ 237 return (v & 0x3fU) << 18U; 238} 239static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) 240{ 241 return 0x3fU << 18U; 242} 243static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) 244{ 245 return (v & 0x3fU) << 24U; 246} 247static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) 248{ 249 return 0x3fU << 24U; 250} 251static inline u32 therm_grad_stepping0_r(void) 252{ 253 return 0x000202c0U; 254} 255static inline u32 therm_grad_stepping0_feature_s(void) 256{ 257 return 1U; 258} 259static inline u32 therm_grad_stepping0_feature_f(u32 v) 260{ 261 return (v & 0x1U) << 0U; 262} 263static inline u32 therm_grad_stepping0_feature_m(void) 264{ 265 return 0x1U << 0U; 266} 267static inline u32 therm_grad_stepping0_feature_v(u32 r) 268{ 269 return (r >> 0U) & 0x1U; 270} 271static inline u32 therm_grad_stepping0_feature_enable_f(void) 272{ 273 return 0x1U; 274} 275static inline u32 therm_grad_stepping1_r(void) 276{ 277 return 0x000202c4U; 278} 279static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) 280{ 281 return (v & 0x1ffffU) << 0U; 282} 283static inline u32 therm_clk_timing_r(u32 i) 284{ 285 return 0x000203c0U + i*4U; 286} 287static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) 288{ 289 return (v & 0x1U) << 16U; 290} 291static inline u32 therm_clk_timing_grad_slowdown_m(void) 292{ 293 return 0x1U << 16U; 294} 295static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) 296{ 297 return 0x10000U; 298} 299#endif
diff --git a/include/nvgpu/hw/gv100/hw_timer_gv100.h b/include/nvgpu/hw/gv100/hw_timer_gv100.h
deleted file mode 100644
index 9d76e24..0000000
--- a/include/nvgpu/hw/gv100/hw_timer_gv100.h
+++ /dev/null
@@ -1,115 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_timer_gv100_h_ 57#define _hw_timer_gv100_h_ 58 59static inline u32 timer_pri_timeout_r(void) 60{ 61 return 0x00009080U; 62} 63static inline u32 timer_pri_timeout_period_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 timer_pri_timeout_period_m(void) 68{ 69 return 0xffffffU << 0U; 70} 71static inline u32 timer_pri_timeout_period_v(u32 r) 72{ 73 return (r >> 0U) & 0xffffffU; 74} 75static inline u32 timer_pri_timeout_en_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 timer_pri_timeout_en_m(void) 80{ 81 return 0x1U << 31U; 82} 83static inline u32 timer_pri_timeout_en_v(u32 r) 84{ 85 return (r >> 31U) & 0x1U; 86} 87static inline u32 timer_pri_timeout_en_en_enabled_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 timer_pri_timeout_en_en_disabled_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 timer_pri_timeout_save_0_r(void) 96{ 97 return 0x00009084U; 98} 99static inline u32 timer_pri_timeout_save_1_r(void) 100{ 101 return 0x00009088U; 102} 103static inline u32 timer_pri_timeout_fecs_errcode_r(void) 104{ 105 return 0x0000908cU; 106} 107static inline u32 timer_time_0_r(void) 108{ 109 return 0x00009400U; 110} 111static inline u32 timer_time_1_r(void) 112{ 113 return 0x00009410U; 114} 115#endif
diff --git a/include/nvgpu/hw/gv100/hw_top_gv100.h b/include/nvgpu/hw/gv100/hw_top_gv100.h
deleted file mode 100644
index 506a818..0000000
--- a/include/nvgpu/hw/gv100/hw_top_gv100.h
+++ /dev/null
@@ -1,343 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_top_gv100_h_ 57#define _hw_top_gv100_h_ 58 59static inline u32 top_num_gpcs_r(void) 60{ 61 return 0x00022430U; 62} 63static inline u32 top_num_gpcs_value_v(u32 r) 64{ 65 return (r >> 0U) & 0x1fU; 66} 67static inline u32 top_tpc_per_gpc_r(void) 68{ 69 return 0x00022434U; 70} 71static inline u32 top_tpc_per_gpc_value_v(u32 r) 72{ 73 return (r >> 0U) & 0x1fU; 74} 75static inline u32 top_num_fbps_r(void) 76{ 77 return 0x00022438U; 78} 79static inline u32 top_num_fbps_value_v(u32 r) 80{ 81 return (r >> 0U) & 0x1fU; 82} 83static inline u32 top_num_fbpas_r(void) 84{ 85 return 0x0002243cU; 86} 87static inline u32 top_num_fbpas_value_v(u32 r) 88{ 89 return (r >> 0U) & 0x1fU; 90} 91static inline u32 top_ltc_per_fbp_r(void) 92{ 93 return 0x00022450U; 94} 95static inline u32 top_ltc_per_fbp_value_v(u32 r) 96{ 97 return (r >> 0U) & 0x1fU; 98} 99static inline u32 top_slices_per_ltc_r(void) 100{ 101 return 0x0002245cU; 102} 103static inline u32 top_slices_per_ltc_value_v(u32 r) 104{ 105 return (r >> 0U) & 0x1fU; 106} 107static inline u32 top_num_ltcs_r(void) 108{ 109 return 0x00022454U; 110} 111static inline u32 top_num_ces_r(void) 112{ 113 return 0x00022444U; 114} 115static inline u32 top_num_ces_value_v(u32 r) 116{ 117 return (r >> 0U) & 0x1fU; 118} 119static inline u32 top_device_info_r(u32 i) 120{ 121 return 0x00022700U + i*4U; 122} 123static inline u32 top_device_info__size_1_v(void) 124{ 125 return 0x00000040U; 126} 127static inline u32 top_device_info_chain_v(u32 r) 128{ 129 return (r >> 31U) & 0x1U; 130} 131static inline u32 top_device_info_chain_enable_v(void) 132{ 133 return 0x00000001U; 134} 135static inline u32 top_device_info_engine_enum_v(u32 r) 136{ 137 return (r >> 26U) & 0xfU; 138} 139static inline u32 top_device_info_runlist_enum_v(u32 r) 140{ 141 return (r >> 21U) & 0xfU; 142} 143static inline u32 top_device_info_intr_enum_v(u32 r) 144{ 145 return (r >> 15U) & 0x1fU; 146} 147static inline u32 top_device_info_reset_enum_v(u32 r) 148{ 149 return (r >> 9U) & 0x1fU; 150} 151static inline u32 top_device_info_type_enum_v(u32 r) 152{ 153 return (r >> 2U) & 0x1fffffffU; 154} 155static inline u32 top_device_info_type_enum_graphics_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 top_device_info_type_enum_graphics_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 top_device_info_type_enum_copy2_v(void) 164{ 165 return 0x00000003U; 166} 167static inline u32 top_device_info_type_enum_copy2_f(void) 168{ 169 return 0xcU; 170} 171static inline u32 top_device_info_type_enum_lce_v(void) 172{ 173 return 0x00000013U; 174} 175static inline u32 top_device_info_type_enum_lce_f(void) 176{ 177 return 0x4cU; 178} 179static inline u32 top_device_info_type_enum_ioctrl_v(void) 180{ 181 return 0x00000012U; 182} 183static inline u32 top_device_info_type_enum_ioctrl_f(void) 184{ 185 return 0x48U; 186} 187static inline u32 top_device_info_engine_v(u32 r) 188{ 189 return (r >> 5U) & 0x1U; 190} 191static inline u32 top_device_info_runlist_v(u32 r) 192{ 193 return (r >> 4U) & 0x1U; 194} 195static inline u32 top_device_info_intr_v(u32 r) 196{ 197 return (r >> 3U) & 0x1U; 198} 199static inline u32 top_device_info_reset_v(u32 r) 200{ 201 return (r >> 2U) & 0x1U; 202} 203static inline u32 top_device_info_entry_v(u32 r) 204{ 205 return (r >> 0U) & 0x3U; 206} 207static inline u32 top_device_info_entry_not_valid_v(void) 208{ 209 return 0x00000000U; 210} 211static inline u32 top_device_info_entry_enum_v(void) 212{ 213 return 0x00000002U; 214} 215static inline u32 top_device_info_entry_data_v(void) 216{ 217 return 0x00000001U; 218} 219static inline u32 top_device_info_entry_engine_type_v(void) 220{ 221 return 0x00000003U; 222} 223static inline u32 top_device_info_data_type_v(u32 r) 224{ 225 return (r >> 30U) & 0x1U; 226} 227static inline u32 top_device_info_data_type_enum2_v(void) 228{ 229 return 0x00000000U; 230} 231static inline u32 top_device_info_data_inst_id_v(u32 r) 232{ 233 return (r >> 26U) & 0xfU; 234} 235static inline u32 top_device_info_data_pri_base_v(u32 r) 236{ 237 return (r >> 12U) & 0xfffU; 238} 239static inline u32 top_device_info_data_pri_base_align_v(void) 240{ 241 return 0x0000000cU; 242} 243static inline u32 top_device_info_data_fault_id_enum_v(u32 r) 244{ 245 return (r >> 3U) & 0x7fU; 246} 247static inline u32 top_device_info_data_fault_id_v(u32 r) 248{ 249 return (r >> 2U) & 0x1U; 250} 251static inline u32 top_device_info_data_fault_id_valid_v(void) 252{ 253 return 0x00000001U; 254} 255static inline u32 top_nvhsclk_ctrl_r(void) 256{ 257 return 0x00022424U; 258} 259static inline u32 top_nvhsclk_ctrl_e_clk_nvl_f(u32 v) 260{ 261 return (v & 0x7U) << 0U; 262} 263static inline u32 top_nvhsclk_ctrl_e_clk_nvl_m(void) 264{ 265 return 0x7U << 0U; 266} 267static inline u32 top_nvhsclk_ctrl_e_clk_nvl_v(u32 r) 268{ 269 return (r >> 0U) & 0x7U; 270} 271static inline u32 top_nvhsclk_ctrl_e_clk_pcie_f(u32 v) 272{ 273 return (v & 0x1U) << 3U; 274} 275static inline u32 top_nvhsclk_ctrl_e_clk_pcie_m(void) 276{ 277 return 0x1U << 3U; 278} 279static inline u32 top_nvhsclk_ctrl_e_clk_pcie_v(u32 r) 280{ 281 return (r >> 3U) & 0x1U; 282} 283static inline u32 top_nvhsclk_ctrl_e_clk_core_f(u32 v) 284{ 285 return (v & 0x1U) << 4U; 286} 287static inline u32 top_nvhsclk_ctrl_e_clk_core_m(void) 288{ 289 return 0x1U << 4U; 290} 291static inline u32 top_nvhsclk_ctrl_e_clk_core_v(u32 r) 292{ 293 return (r >> 4U) & 0x1U; 294} 295static inline u32 top_nvhsclk_ctrl_rfu_f(u32 v) 296{ 297 return (v & 0xfU) << 5U; 298} 299static inline u32 top_nvhsclk_ctrl_rfu_m(void) 300{ 301 return 0xfU << 5U; 302} 303static inline u32 top_nvhsclk_ctrl_rfu_v(u32 r) 304{ 305 return (r >> 5U) & 0xfU; 306} 307static inline u32 top_nvhsclk_ctrl_swap_clk_nvl_f(u32 v) 308{ 309 return (v & 0x7U) << 10U; 310} 311static inline u32 top_nvhsclk_ctrl_swap_clk_nvl_m(void) 312{ 313 return 0x7U << 10U; 314} 315static inline u32 top_nvhsclk_ctrl_swap_clk_nvl_v(u32 r) 316{ 317 return (r >> 10U) & 0x7U; 318} 319static inline u32 top_nvhsclk_ctrl_swap_clk_pcie_f(u32 v) 320{ 321 return (v & 0x1U) << 9U; 322} 323static inline u32 top_nvhsclk_ctrl_swap_clk_pcie_m(void) 324{ 325 return 0x1U << 9U; 326} 327static inline u32 top_nvhsclk_ctrl_swap_clk_pcie_v(u32 r) 328{ 329 return (r >> 9U) & 0x1U; 330} 331static inline u32 top_nvhsclk_ctrl_swap_clk_core_f(u32 v) 332{ 333 return (v & 0x1U) << 13U; 334} 335static inline u32 top_nvhsclk_ctrl_swap_clk_core_m(void) 336{ 337 return 0x1U << 13U; 338} 339static inline u32 top_nvhsclk_ctrl_swap_clk_core_v(u32 r) 340{ 341 return (r >> 13U) & 0x1U; 342} 343#endif
diff --git a/include/nvgpu/hw/gv100/hw_trim_gv100.h b/include/nvgpu/hw/gv100/hw_trim_gv100.h
deleted file mode 100644
index f1b6da2..0000000
--- a/include/nvgpu/hw/gv100/hw_trim_gv100.h
+++ /dev/null
@@ -1,247 +0,0 @@ 1/* 2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_trim_gv100_h_ 57#define _hw_trim_gv100_h_ 58 59static inline u32 trim_sys_nvlink_uphy_cfg_r(void) 60{ 61 return 0x00132410U; 62} 63static inline u32 trim_sys_nvlink_uphy_cfg_lockdect_wait_dly_length_f(u32 v) 64{ 65 return (v & 0x3ffU) << 0U; 66} 67static inline u32 trim_sys_nvlink_uphy_cfg_lockdect_wait_dly_length_m(void) 68{ 69 return 0x3ffU << 0U; 70} 71static inline u32 trim_sys_nvlink_uphy_cfg_lockdect_wait_dly_length_v(u32 r) 72{ 73 return (r >> 0U) & 0x3ffU; 74} 75static inline u32 trim_sys_nvlink_uphy_cfg_phy2clks_use_lockdet_f(u32 v) 76{ 77 return (v & 0x1U) << 12U; 78} 79static inline u32 trim_sys_nvlink_uphy_cfg_phy2clks_use_lockdet_m(void) 80{ 81 return 0x1U << 12U; 82} 83static inline u32 trim_sys_nvlink_uphy_cfg_phy2clks_use_lockdet_v(u32 r) 84{ 85 return (r >> 12U) & 0x1U; 86} 87static inline u32 trim_sys_nvlink_uphy_cfg_nvlink_wait_dly_f(u32 v) 88{ 89 return (v & 0xffU) << 16U; 90} 91static inline u32 trim_sys_nvlink_uphy_cfg_nvlink_wait_dly_m(void) 92{ 93 return 0xffU << 16U; 94} 95static inline u32 trim_sys_nvlink_uphy_cfg_nvlink_wait_dly_v(u32 r) 96{ 97 return (r >> 16U) & 0xffU; 98} 99static inline u32 trim_sys_nvlink0_ctrl_r(void) 100{ 101 return 0x00132420U; 102} 103static inline u32 trim_sys_nvlink0_ctrl_unit2clks_pll_turn_off_f(u32 v) 104{ 105 return (v & 0x1U) << 0U; 106} 107static inline u32 trim_sys_nvlink0_ctrl_unit2clks_pll_turn_off_m(void) 108{ 109 return 0x1U << 0U; 110} 111static inline u32 trim_sys_nvlink0_ctrl_unit2clks_pll_turn_off_v(u32 r) 112{ 113 return (r >> 0U) & 0x1U; 114} 115static inline u32 trim_sys_nvlink0_status_r(void) 116{ 117 return 0x00132424U; 118} 119static inline u32 trim_sys_nvlink0_status_pll_off_f(u32 v) 120{ 121 return (v & 0x1U) << 5U; 122} 123static inline u32 trim_sys_nvlink0_status_pll_off_m(void) 124{ 125 return 0x1U << 5U; 126} 127static inline u32 trim_sys_nvlink0_status_pll_off_v(u32 r) 128{ 129 return (r >> 5U) & 0x1U; 130} 131static inline u32 trim_sys_nvl_common_clk_alt_switch_r(void) 132{ 133 return 0x001371c4U; 134} 135static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_f(u32 v) 136{ 137 return (v & 0x3U) << 16U; 138} 139static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_m(void) 140{ 141 return 0x3U << 16U; 142} 143static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_v(u32 r) 144{ 145 return (r >> 16U) & 0x3U; 146} 147static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal4x_v(void) 148{ 149 return 0x00000003U; 150} 151static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal4x_f(void) 152{ 153 return 0x30000U; 154} 155static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal_in_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal_in_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_f(u32 v) 164{ 165 return (v & 0x3U) << 0U; 166} 167static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_m(void) 168{ 169 return 0x3U << 0U; 170} 171static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_v(u32 r) 172{ 173 return (r >> 0U) & 0x3U; 174} 175static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_slowclk_v(void) 176{ 177 return 0x00000000U; 178} 179static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_slowclk_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_miscclk_v(void) 184{ 185 return 0x00000002U; 186} 187static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_miscclk_f(void) 188{ 189 return 0x2U; 190} 191static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_onesrcclk_v(void) 192{ 193 return 0x00000003U; 194} 195static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_onesrcclk_f(void) 196{ 197 return 0x3U; 198} 199static inline u32 trim_gpc_bcast_fr_clk_cntr_ncgpcclk_cfg_r(void) 200{ 201 return 0x00132a70U; 202} 203static inline u32 trim_gpc_bcast_fr_clk_cntr_ncgpcclk_cfg_source_gpcclk_f(void) 204{ 205 return 0x10000000U; 206} 207static inline u32 trim_gpc_bcast_fr_clk_cntr_ncgpcclk_cnt0_r(void) 208{ 209 return 0x00132a74U; 210} 211static inline u32 trim_gpc_bcast_fr_clk_cntr_ncgpcclk_cnt1_r(void) 212{ 213 return 0x00132a78U; 214} 215static inline u32 trim_sys_nafll_fr_clk_cntr_xbarclk_cfg_r(void) 216{ 217 return 0x00136470U; 218} 219static inline u32 trim_sys_nafll_fr_clk_cntr_xbarclk_cfg_source_xbarclk_f(void) 220{ 221 return 0x10000000U; 222} 223static inline u32 trim_sys_nafll_fr_clk_cntr_xbarclk_cntr0_r(void) 224{ 225 return 0x00136474U; 226} 227static inline u32 trim_sys_nafll_fr_clk_cntr_xbarclk_cntr1_r(void) 228{ 229 return 0x00136478U; 230} 231static inline u32 trim_sys_fr_clk_cntr_sysclk_cfg_r(void) 232{ 233 return 0x0013762cU; 234} 235static inline u32 trim_sys_fr_clk_cntr_sysclk_cfg_source_sysclk_f(void) 236{ 237 return 0x20000000U; 238} 239static inline u32 trim_sys_fr_clk_cntr_sysclk_cntr0_r(void) 240{ 241 return 0x00137630U; 242} 243static inline u32 trim_sys_fr_clk_cntr_sysclk_cntr1_r(void) 244{ 245 return 0x00137634U; 246} 247#endif
diff --git a/include/nvgpu/hw/gv100/hw_usermode_gv100.h b/include/nvgpu/hw/gv100/hw_usermode_gv100.h
deleted file mode 100644
index 7b1d861..0000000
--- a/include/nvgpu/hw/gv100/hw_usermode_gv100.h
+++ /dev/null
@@ -1,95 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_usermode_gv100_h_ 57#define _hw_usermode_gv100_h_ 58 59static inline u32 usermode_cfg0_r(void) 60{ 61 return 0x00810000U; 62} 63static inline u32 usermode_cfg0_class_id_f(u32 v) 64{ 65 return (v & 0xffffU) << 0U; 66} 67static inline u32 usermode_cfg0_class_id_value_v(void) 68{ 69 return 0x0000c361U; 70} 71static inline u32 usermode_time_0_r(void) 72{ 73 return 0x00810080U; 74} 75static inline u32 usermode_time_0_nsec_f(u32 v) 76{ 77 return (v & 0x7ffffffU) << 5U; 78} 79static inline u32 usermode_time_1_r(void) 80{ 81 return 0x00810084U; 82} 83static inline u32 usermode_time_1_nsec_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 0U; 86} 87static inline u32 usermode_notify_channel_pending_r(void) 88{ 89 return 0x00810090U; 90} 91static inline u32 usermode_notify_channel_pending_id_f(u32 v) 92{ 93 return (v & 0xffffffffU) << 0U; 94} 95#endif
diff --git a/include/nvgpu/hw/gv100/hw_xp_gv100.h b/include/nvgpu/hw/gv100/hw_xp_gv100.h
deleted file mode 100644
index 4296e04..0000000
--- a/include/nvgpu/hw/gv100/hw_xp_gv100.h
+++ /dev/null
@@ -1,143 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_xp_gv100_h_ 57#define _hw_xp_gv100_h_ 58 59static inline u32 xp_dl_mgr_r(u32 i) 60{ 61 return 0x0008b8c0U + i*4U; 62} 63static inline u32 xp_dl_mgr_safe_timing_f(u32 v) 64{ 65 return (v & 0x1U) << 2U; 66} 67static inline u32 xp_pl_link_config_r(u32 i) 68{ 69 return 0x0008c040U + i*4U; 70} 71static inline u32 xp_pl_link_config_ltssm_status_f(u32 v) 72{ 73 return (v & 0x1U) << 4U; 74} 75static inline u32 xp_pl_link_config_ltssm_status_idle_v(void) 76{ 77 return 0x00000000U; 78} 79static inline u32 xp_pl_link_config_ltssm_directive_f(u32 v) 80{ 81 return (v & 0xfU) << 0U; 82} 83static inline u32 xp_pl_link_config_ltssm_directive_m(void) 84{ 85 return 0xfU << 0U; 86} 87static inline u32 xp_pl_link_config_ltssm_directive_normal_operations_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 xp_pl_link_config_ltssm_directive_change_speed_v(void) 92{ 93 return 0x00000001U; 94} 95static inline u32 xp_pl_link_config_max_link_rate_f(u32 v) 96{ 97 return (v & 0x3U) << 18U; 98} 99static inline u32 xp_pl_link_config_max_link_rate_m(void) 100{ 101 return 0x3U << 18U; 102} 103static inline u32 xp_pl_link_config_max_link_rate_2500_mtps_v(void) 104{ 105 return 0x00000002U; 106} 107static inline u32 xp_pl_link_config_max_link_rate_5000_mtps_v(void) 108{ 109 return 0x00000001U; 110} 111static inline u32 xp_pl_link_config_max_link_rate_8000_mtps_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 xp_pl_link_config_target_tx_width_f(u32 v) 116{ 117 return (v & 0x7U) << 20U; 118} 119static inline u32 xp_pl_link_config_target_tx_width_m(void) 120{ 121 return 0x7U << 20U; 122} 123static inline u32 xp_pl_link_config_target_tx_width_x1_v(void) 124{ 125 return 0x00000007U; 126} 127static inline u32 xp_pl_link_config_target_tx_width_x2_v(void) 128{ 129 return 0x00000006U; 130} 131static inline u32 xp_pl_link_config_target_tx_width_x4_v(void) 132{ 133 return 0x00000005U; 134} 135static inline u32 xp_pl_link_config_target_tx_width_x8_v(void) 136{ 137 return 0x00000004U; 138} 139static inline u32 xp_pl_link_config_target_tx_width_x16_v(void) 140{ 141 return 0x00000000U; 142} 143#endif
diff --git a/include/nvgpu/hw/gv100/hw_xve_gv100.h b/include/nvgpu/hw/gv100/hw_xve_gv100.h
deleted file mode 100644
index fc7aa72..0000000
--- a/include/nvgpu/hw/gv100/hw_xve_gv100.h
+++ /dev/null
@@ -1,207 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_xve_gv100_h_ 57#define _hw_xve_gv100_h_ 58 59static inline u32 xve_rom_ctrl_r(void) 60{ 61 return 0x00000050U; 62} 63static inline u32 xve_rom_ctrl_rom_shadow_f(u32 v) 64{ 65 return (v & 0x1U) << 0U; 66} 67static inline u32 xve_rom_ctrl_rom_shadow_disabled_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 xve_rom_ctrl_rom_shadow_enabled_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 xve_link_control_status_r(void) 76{ 77 return 0x00000088U; 78} 79static inline u32 xve_link_control_status_link_speed_m(void) 80{ 81 return 0xfU << 16U; 82} 83static inline u32 xve_link_control_status_link_speed_v(u32 r) 84{ 85 return (r >> 16U) & 0xfU; 86} 87static inline u32 xve_link_control_status_link_speed_link_speed_2p5_v(void) 88{ 89 return 0x00000001U; 90} 91static inline u32 xve_link_control_status_link_speed_link_speed_5p0_v(void) 92{ 93 return 0x00000002U; 94} 95static inline u32 xve_link_control_status_link_speed_link_speed_8p0_v(void) 96{ 97 return 0x00000003U; 98} 99static inline u32 xve_link_control_status_link_width_m(void) 100{ 101 return 0x3fU << 20U; 102} 103static inline u32 xve_link_control_status_link_width_v(u32 r) 104{ 105 return (r >> 20U) & 0x3fU; 106} 107static inline u32 xve_link_control_status_link_width_x1_v(void) 108{ 109 return 0x00000001U; 110} 111static inline u32 xve_link_control_status_link_width_x2_v(void) 112{ 113 return 0x00000002U; 114} 115static inline u32 xve_link_control_status_link_width_x4_v(void) 116{ 117 return 0x00000004U; 118} 119static inline u32 xve_link_control_status_link_width_x8_v(void) 120{ 121 return 0x00000008U; 122} 123static inline u32 xve_link_control_status_link_width_x16_v(void) 124{ 125 return 0x00000010U; 126} 127static inline u32 xve_priv_xv_r(void) 128{ 129 return 0x00000150U; 130} 131static inline u32 xve_priv_xv_cya_l0s_enable_f(u32 v) 132{ 133 return (v & 0x1U) << 7U; 134} 135static inline u32 xve_priv_xv_cya_l0s_enable_m(void) 136{ 137 return 0x1U << 7U; 138} 139static inline u32 xve_priv_xv_cya_l0s_enable_v(u32 r) 140{ 141 return (r >> 7U) & 0x1U; 142} 143static inline u32 xve_priv_xv_cya_l1_enable_f(u32 v) 144{ 145 return (v & 0x1U) << 8U; 146} 147static inline u32 xve_priv_xv_cya_l1_enable_m(void) 148{ 149 return 0x1U << 8U; 150} 151static inline u32 xve_priv_xv_cya_l1_enable_v(u32 r) 152{ 153 return (r >> 8U) & 0x1U; 154} 155static inline u32 xve_cya_2_r(void) 156{ 157 return 0x00000704U; 158} 159static inline u32 xve_reset_r(void) 160{ 161 return 0x00000718U; 162} 163static inline u32 xve_reset_reset_m(void) 164{ 165 return 0x1U << 0U; 166} 167static inline u32 xve_reset_gpu_on_sw_reset_m(void) 168{ 169 return 0x1U << 1U; 170} 171static inline u32 xve_reset_counter_en_m(void) 172{ 173 return 0x1U << 2U; 174} 175static inline u32 xve_reset_counter_val_f(u32 v) 176{ 177 return (v & 0x7ffU) << 4U; 178} 179static inline u32 xve_reset_counter_val_m(void) 180{ 181 return 0x7ffU << 4U; 182} 183static inline u32 xve_reset_counter_val_v(u32 r) 184{ 185 return (r >> 4U) & 0x7ffU; 186} 187static inline u32 xve_reset_clock_on_sw_reset_m(void) 188{ 189 return 0x1U << 15U; 190} 191static inline u32 xve_reset_clock_counter_en_m(void) 192{ 193 return 0x1U << 16U; 194} 195static inline u32 xve_reset_clock_counter_val_f(u32 v) 196{ 197 return (v & 0x7ffU) << 17U; 198} 199static inline u32 xve_reset_clock_counter_val_m(void) 200{ 201 return 0x7ffU << 17U; 202} 203static inline u32 xve_reset_clock_counter_val_v(u32 r) 204{ 205 return (r >> 17U) & 0x7ffU; 206} 207#endif
diff --git a/include/nvgpu/hw/gv11b/hw_bus_gv11b.h b/include/nvgpu/hw/gv11b/hw_bus_gv11b.h
deleted file mode 100644
index d1d9b34..0000000
--- a/include/nvgpu/hw/gv11b/hw_bus_gv11b.h
+++ /dev/null
@@ -1,223 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_bus_gv11b_h_ 57#define _hw_bus_gv11b_h_ 58 59static inline u32 bus_bar0_window_r(void) 60{ 61 return 0x00001700U; 62} 63static inline u32 bus_bar0_window_base_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 bus_bar0_window_target_vid_mem_f(void) 68{ 69 return 0x0U; 70} 71static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) 72{ 73 return 0x2000000U; 74} 75static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) 76{ 77 return 0x3000000U; 78} 79static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) 80{ 81 return 0x00000010U; 82} 83static inline u32 bus_bar1_block_r(void) 84{ 85 return 0x00001704U; 86} 87static inline u32 bus_bar1_block_ptr_f(u32 v) 88{ 89 return (v & 0xfffffffU) << 0U; 90} 91static inline u32 bus_bar1_block_target_vid_mem_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) 96{ 97 return 0x20000000U; 98} 99static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) 100{ 101 return 0x30000000U; 102} 103static inline u32 bus_bar1_block_mode_virtual_f(void) 104{ 105 return 0x80000000U; 106} 107static inline u32 bus_bar2_block_r(void) 108{ 109 return 0x00001714U; 110} 111static inline u32 bus_bar2_block_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 bus_bar2_block_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 bus_bar2_block_mode_virtual_f(void) 128{ 129 return 0x80000000U; 130} 131static inline u32 bus_bar1_block_ptr_shift_v(void) 132{ 133 return 0x0000000cU; 134} 135static inline u32 bus_bar2_block_ptr_shift_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 bus_bind_status_r(void) 140{ 141 return 0x00001710U; 142} 143static inline u32 bus_bind_status_bar1_pending_v(u32 r) 144{ 145 return (r >> 0U) & 0x1U; 146} 147static inline u32 bus_bind_status_bar1_pending_empty_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 bus_bind_status_bar1_pending_busy_f(void) 152{ 153 return 0x1U; 154} 155static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) 156{ 157 return (r >> 1U) & 0x1U; 158} 159static inline u32 bus_bind_status_bar1_outstanding_false_f(void) 160{ 161 return 0x0U; 162} 163static inline u32 bus_bind_status_bar1_outstanding_true_f(void) 164{ 165 return 0x2U; 166} 167static inline u32 bus_bind_status_bar2_pending_v(u32 r) 168{ 169 return (r >> 2U) & 0x1U; 170} 171static inline u32 bus_bind_status_bar2_pending_empty_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 bus_bind_status_bar2_pending_busy_f(void) 176{ 177 return 0x4U; 178} 179static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) 180{ 181 return (r >> 3U) & 0x1U; 182} 183static inline u32 bus_bind_status_bar2_outstanding_false_f(void) 184{ 185 return 0x0U; 186} 187static inline u32 bus_bind_status_bar2_outstanding_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 bus_intr_0_r(void) 192{ 193 return 0x00001100U; 194} 195static inline u32 bus_intr_0_pri_squash_m(void) 196{ 197 return 0x1U << 1U; 198} 199static inline u32 bus_intr_0_pri_fecserr_m(void) 200{ 201 return 0x1U << 2U; 202} 203static inline u32 bus_intr_0_pri_timeout_m(void) 204{ 205 return 0x1U << 3U; 206} 207static inline u32 bus_intr_en_0_r(void) 208{ 209 return 0x00001140U; 210} 211static inline u32 bus_intr_en_0_pri_squash_m(void) 212{ 213 return 0x1U << 1U; 214} 215static inline u32 bus_intr_en_0_pri_fecserr_m(void) 216{ 217 return 0x1U << 2U; 218} 219static inline u32 bus_intr_en_0_pri_timeout_m(void) 220{ 221 return 0x1U << 3U; 222} 223#endif
diff --git a/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h b/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h
deleted file mode 100644
index e21a473..0000000
--- a/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ccsr_gv11b_h_ 57#define _hw_ccsr_gv11b_h_ 58 59static inline u32 ccsr_channel_inst_r(u32 i) 60{ 61 return 0x00800000U + i*8U; 62} 63static inline u32 ccsr_channel_inst__size_1_v(void) 64{ 65 return 0x00000200U; 66} 67static inline u32 ccsr_channel_inst_ptr_f(u32 v) 68{ 69 return (v & 0xfffffffU) << 0U; 70} 71static inline u32 ccsr_channel_inst_target_vid_mem_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) 76{ 77 return 0x20000000U; 78} 79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) 80{ 81 return 0x30000000U; 82} 83static inline u32 ccsr_channel_inst_bind_false_f(void) 84{ 85 return 0x0U; 86} 87static inline u32 ccsr_channel_inst_bind_true_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 ccsr_channel_r(u32 i) 92{ 93 return 0x00800004U + i*8U; 94} 95static inline u32 ccsr_channel__size_1_v(void) 96{ 97 return 0x00000200U; 98} 99static inline u32 ccsr_channel_enable_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 ccsr_channel_enable_set_f(u32 v) 104{ 105 return (v & 0x1U) << 10U; 106} 107static inline u32 ccsr_channel_enable_set_true_f(void) 108{ 109 return 0x400U; 110} 111static inline u32 ccsr_channel_enable_clr_true_f(void) 112{ 113 return 0x800U; 114} 115static inline u32 ccsr_channel_status_v(u32 r) 116{ 117 return (r >> 24U) & 0xfU; 118} 119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) 120{ 121 return 0x00000002U; 122} 123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) 124{ 125 return 0x00000004U; 126} 127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) 128{ 129 return 0x0000000aU; 130} 131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) 132{ 133 return 0x0000000bU; 134} 135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) 136{ 137 return 0x0000000cU; 138} 139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) 140{ 141 return 0x0000000dU; 142} 143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) 144{ 145 return 0x0000000eU; 146} 147static inline u32 ccsr_channel_next_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 ccsr_channel_next_true_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 ccsr_channel_force_ctx_reload_true_f(void) 156{ 157 return 0x100U; 158} 159static inline u32 ccsr_channel_pbdma_faulted_f(u32 v) 160{ 161 return (v & 0x1U) << 22U; 162} 163static inline u32 ccsr_channel_pbdma_faulted_reset_f(void) 164{ 165 return 0x400000U; 166} 167static inline u32 ccsr_channel_eng_faulted_f(u32 v) 168{ 169 return (v & 0x1U) << 23U; 170} 171static inline u32 ccsr_channel_eng_faulted_v(u32 r) 172{ 173 return (r >> 23U) & 0x1U; 174} 175static inline u32 ccsr_channel_eng_faulted_reset_f(void) 176{ 177 return 0x800000U; 178} 179static inline u32 ccsr_channel_eng_faulted_true_v(void) 180{ 181 return 0x00000001U; 182} 183static inline u32 ccsr_channel_busy_v(u32 r) 184{ 185 return (r >> 28U) & 0x1U; 186} 187#endif
diff --git a/include/nvgpu/hw/gv11b/hw_ce_gv11b.h b/include/nvgpu/hw/gv11b/hw_ce_gv11b.h
deleted file mode 100644
index 57a76e6..0000000
--- a/include/nvgpu/hw/gv11b/hw_ce_gv11b.h
+++ /dev/null
@@ -1,115 +0,0 @@ 1/* 2 * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ce_gv11b_h_ 57#define _hw_ce_gv11b_h_ 58 59static inline u32 ce_intr_status_r(u32 i) 60{ 61 return 0x00104410U + i*128U; 62} 63static inline u32 ce_intr_status_blockpipe_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 ce_intr_status_blockpipe_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 ce_intr_status_nonblockpipe_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 ce_intr_status_nonblockpipe_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 ce_intr_status_launcherr_pending_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 ce_intr_status_launcherr_reset_f(void) 84{ 85 return 0x4U; 86} 87static inline u32 ce_intr_status_invalid_config_pending_f(void) 88{ 89 return 0x8U; 90} 91static inline u32 ce_intr_status_invalid_config_reset_f(void) 92{ 93 return 0x8U; 94} 95static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void) 96{ 97 return 0x10U; 98} 99static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void) 100{ 101 return 0x10U; 102} 103static inline u32 ce_pce_map_r(void) 104{ 105 return 0x00104028U; 106} 107static inline u32 ce_lce_opt_r(u32 i) 108{ 109 return 0x00104414U + i*128U; 110} 111static inline u32 ce_lce_opt_force_barriers_npl__prod_f(void) 112{ 113 return 0x8U; 114} 115#endif
diff --git a/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h b/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h
deleted file mode 100644
index 8b095b1..0000000
--- a/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h
+++ /dev/null
@@ -1,463 +0,0 @@ 1/* 2 * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ctxsw_prog_gv11b_h_ 57#define _hw_ctxsw_prog_gv11b_h_ 58 59static inline u32 ctxsw_prog_fecs_header_v(void) 60{ 61 return 0x00000100U; 62} 63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) 64{ 65 return 0x00000008U; 66} 67static inline u32 ctxsw_prog_main_image_ctl_o(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v) 72{ 73 return (v & 0x3fU) << 0U; 74} 75static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void) 76{ 77 return 0x00000000U; 78} 79static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void) 80{ 81 return 0x00000008U; 82} 83static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void) 84{ 85 return 0x00000010U; 86} 87static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void) 88{ 89 return 0x00000011U; 90} 91static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void) 92{ 93 return 0x00000012U; 94} 95static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void) 96{ 97 return 0x00000020U; 98} 99static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void) 100{ 101 return 0x00000021U; 102} 103static inline u32 ctxsw_prog_main_image_patch_count_o(void) 104{ 105 return 0x00000010U; 106} 107static inline u32 ctxsw_prog_main_image_context_id_o(void) 108{ 109 return 0x000000f0U; 110} 111static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) 112{ 113 return 0x00000014U; 114} 115static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) 116{ 117 return 0x00000018U; 118} 119static inline u32 ctxsw_prog_main_image_zcull_o(void) 120{ 121 return 0x0000001cU; 122} 123static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) 124{ 125 return 0x00000001U; 126} 127static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) 128{ 129 return 0x00000002U; 130} 131static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) 132{ 133 return 0x00000020U; 134} 135static inline u32 ctxsw_prog_main_image_pm_o(void) 136{ 137 return 0x00000028U; 138} 139static inline u32 ctxsw_prog_main_image_pm_mode_m(void) 140{ 141 return 0x7U << 0U; 142} 143static inline u32 ctxsw_prog_main_image_pm_mode_ctxsw_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f(void) 152{ 153 return 0x2U; 154} 155static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) 156{ 157 return 0x7U << 3U; 158} 159static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) 160{ 161 return 0x8U; 162} 163static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) 168{ 169 return 0x0000002cU; 170} 171static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) 172{ 173 return 0x000000f4U; 174} 175static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void) 176{ 177 return 0x000000d0U; 178} 179static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void) 180{ 181 return 0x000000d4U; 182} 183static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void) 184{ 185 return 0x000000d8U; 186} 187static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void) 188{ 189 return 0x000000dcU; 190} 191static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) 192{ 193 return 0x000000f8U; 194} 195static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void) 196{ 197 return 0x00000060U; 198} 199static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v) 200{ 201 return (v & 0x1ffffU) << 0U; 202} 203static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void) 204{ 205 return 0x00000094U; 206} 207static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void) 208{ 209 return 0x00000064U; 210} 211static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v) 212{ 213 return (v & 0x1ffffU) << 0U; 214} 215static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void) 216{ 217 return 0x00000068U; 218} 219static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v) 220{ 221 return (v & 0xffffffffU) << 0U; 222} 223static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void) 224{ 225 return 0x00000070U; 226} 227static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v) 228{ 229 return (v & 0x1ffffU) << 0U; 230} 231static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void) 232{ 233 return 0x00000074U; 234} 235static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v) 236{ 237 return (v & 0xffffffffU) << 0U; 238} 239static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void) 240{ 241 return 0x00000078U; 242} 243static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v) 244{ 245 return (v & 0x1ffffU) << 0U; 246} 247static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void) 248{ 249 return 0x0000007cU; 250} 251static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v) 252{ 253 return (v & 0xffffffffU) << 0U; 254} 255static inline u32 ctxsw_prog_main_image_magic_value_o(void) 256{ 257 return 0x000000fcU; 258} 259static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) 260{ 261 return 0x600dc0deU; 262} 263static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) 264{ 265 return 0x0000000cU; 266} 267static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) 268{ 269 return (r >> 0U) & 0xffffU; 270} 271static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void) 272{ 273 return 0x000000b8U; 274} 275static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v) 276{ 277 return (v & 0xffffffffU) << 0U; 278} 279static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void) 280{ 281 return 0x000000bcU; 282} 283static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v) 284{ 285 return (v & 0x1ffffU) << 0U; 286} 287static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void) 288{ 289 return 0x000000c0U; 290} 291static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v) 292{ 293 return (v & 0xffffffffU) << 0U; 294} 295static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void) 296{ 297 return 0x000000c4U; 298} 299static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v) 300{ 301 return (v & 0x1ffffU) << 0U; 302} 303static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void) 304{ 305 return 0x000000c8U; 306} 307static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v) 308{ 309 return (v & 0xffffffffU) << 0U; 310} 311static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void) 312{ 313 return 0x000000ccU; 314} 315static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v) 316{ 317 return (v & 0x1ffffU) << 0U; 318} 319static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void) 320{ 321 return 0x000000e0U; 322} 323static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v) 324{ 325 return (v & 0xffffffffU) << 0U; 326} 327static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void) 328{ 329 return 0x000000e4U; 330} 331static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v) 332{ 333 return (v & 0x1ffffU) << 0U; 334} 335static inline u32 ctxsw_prog_local_image_ppc_info_o(void) 336{ 337 return 0x000000f4U; 338} 339static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) 340{ 341 return (r >> 0U) & 0xffffU; 342} 343static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) 344{ 345 return (r >> 16U) & 0xffffU; 346} 347static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) 348{ 349 return 0x000000f8U; 350} 351static inline u32 ctxsw_prog_local_magic_value_o(void) 352{ 353 return 0x000000fcU; 354} 355static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) 356{ 357 return 0xad0becabU; 358} 359static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) 360{ 361 return 0x000000ecU; 362} 363static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) 364{ 365 return (r >> 0U) & 0xffffU; 366} 367static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) 368{ 369 return (r >> 16U) & 0xffU; 370} 371static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) 372{ 373 return 0x00000100U; 374} 375static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) 376{ 377 return 0x00000004U; 378} 379static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) 380{ 381 return 0x00000000U; 382} 383static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) 384{ 385 return 0x00000002U; 386} 387static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) 388{ 389 return 0x000000a0U; 390} 391static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) 392{ 393 return 2U; 394} 395static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) 396{ 397 return (v & 0x3U) << 0U; 398} 399static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) 400{ 401 return 0x3U << 0U; 402} 403static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) 404{ 405 return (r >> 0U) & 0x3U; 406} 407static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) 408{ 409 return 0x0U; 410} 411static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) 412{ 413 return 0x2U; 414} 415static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) 416{ 417 return 0x000000a4U; 418} 419static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) 420{ 421 return 0x000000a8U; 422} 423static inline u32 ctxsw_prog_main_image_misc_options_o(void) 424{ 425 return 0x0000003cU; 426} 427static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) 428{ 429 return 0x1U << 3U; 430} 431static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) 432{ 433 return 0x0U; 434} 435static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void) 436{ 437 return 0x00000080U; 438} 439static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v) 440{ 441 return (v & 0x3U) << 0U; 442} 443static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void) 444{ 445 return 0x1U; 446} 447static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void) 448{ 449 return 0x00000084U; 450} 451static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v) 452{ 453 return (v & 0x3U) << 0U; 454} 455static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void) 456{ 457 return 0x1U; 458} 459static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void) 460{ 461 return 0x2U; 462} 463#endif
diff --git a/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h b/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h
deleted file mode 100644
index 31e883e..0000000
--- a/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h
+++ /dev/null
@@ -1,603 +0,0 @@ 1/* 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_falcon_gv11b_h_ 57#define _hw_falcon_gv11b_h_ 58 59static inline u32 falcon_falcon_irqsset_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 falcon_falcon_irqsclr_r(void) 68{ 69 return 0x00000004U; 70} 71static inline u32 falcon_falcon_irqstat_r(void) 72{ 73 return 0x00000008U; 74} 75static inline u32 falcon_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 falcon_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 falcon_falcon_irqmode_r(void) 88{ 89 return 0x0000000cU; 90} 91static inline u32 falcon_falcon_irqmset_r(void) 92{ 93 return 0x00000010U; 94} 95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) 96{ 97 return (v & 0x1U) << 0U; 98} 99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) 100{ 101 return (v & 0x1U) << 1U; 102} 103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) 104{ 105 return (v & 0x1U) << 2U; 106} 107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) 108{ 109 return (v & 0x1U) << 3U; 110} 111static inline u32 falcon_falcon_irqmset_halt_f(u32 v) 112{ 113 return (v & 0x1U) << 4U; 114} 115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) 120{ 121 return (v & 0x1U) << 6U; 122} 123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) 124{ 125 return (v & 0x1U) << 7U; 126} 127static inline u32 falcon_falcon_irqmclr_r(void) 128{ 129 return 0x00000014U; 130} 131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) 132{ 133 return (v & 0x1U) << 0U; 134} 135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) 136{ 137 return (v & 0x1U) << 1U; 138} 139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) 140{ 141 return (v & 0x1U) << 2U; 142} 143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) 144{ 145 return (v & 0x1U) << 3U; 146} 147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) 148{ 149 return (v & 0x1U) << 4U; 150} 151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) 152{ 153 return (v & 0x1U) << 5U; 154} 155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) 156{ 157 return (v & 0x1U) << 6U; 158} 159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) 160{ 161 return (v & 0x1U) << 7U; 162} 163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) 164{ 165 return (v & 0xffU) << 8U; 166} 167static inline u32 falcon_falcon_irqmask_r(void) 168{ 169 return 0x00000018U; 170} 171static inline u32 falcon_falcon_irqdest_r(void) 172{ 173 return 0x0000001cU; 174} 175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) 176{ 177 return (v & 0x1U) << 0U; 178} 179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) 180{ 181 return (v & 0x1U) << 1U; 182} 183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) 184{ 185 return (v & 0x1U) << 2U; 186} 187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) 188{ 189 return (v & 0x1U) << 3U; 190} 191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) 192{ 193 return (v & 0x1U) << 4U; 194} 195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) 196{ 197 return (v & 0x1U) << 5U; 198} 199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) 200{ 201 return (v & 0x1U) << 6U; 202} 203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) 204{ 205 return (v & 0x1U) << 7U; 206} 207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) 208{ 209 return (v & 0xffU) << 8U; 210} 211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) 212{ 213 return (v & 0x1U) << 16U; 214} 215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) 216{ 217 return (v & 0x1U) << 17U; 218} 219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) 220{ 221 return (v & 0x1U) << 18U; 222} 223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) 224{ 225 return (v & 0x1U) << 19U; 226} 227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) 228{ 229 return (v & 0x1U) << 20U; 230} 231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) 232{ 233 return (v & 0x1U) << 21U; 234} 235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) 236{ 237 return (v & 0x1U) << 22U; 238} 239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) 240{ 241 return (v & 0x1U) << 23U; 242} 243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) 244{ 245 return (v & 0xffU) << 24U; 246} 247static inline u32 falcon_falcon_curctx_r(void) 248{ 249 return 0x00000050U; 250} 251static inline u32 falcon_falcon_nxtctx_r(void) 252{ 253 return 0x00000054U; 254} 255static inline u32 falcon_falcon_mailbox0_r(void) 256{ 257 return 0x00000040U; 258} 259static inline u32 falcon_falcon_mailbox1_r(void) 260{ 261 return 0x00000044U; 262} 263static inline u32 falcon_falcon_itfen_r(void) 264{ 265 return 0x00000048U; 266} 267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) 268{ 269 return 0x1U; 270} 271static inline u32 falcon_falcon_idlestate_r(void) 272{ 273 return 0x0000004cU; 274} 275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) 276{ 277 return (r >> 0U) & 0x1U; 278} 279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) 280{ 281 return (r >> 1U) & 0x7fffU; 282} 283static inline u32 falcon_falcon_os_r(void) 284{ 285 return 0x00000080U; 286} 287static inline u32 falcon_falcon_engctl_r(void) 288{ 289 return 0x000000a4U; 290} 291static inline u32 falcon_falcon_cpuctl_r(void) 292{ 293 return 0x00000100U; 294} 295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) 296{ 297 return (v & 0x1U) << 1U; 298} 299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) 300{ 301 return (v & 0x1U) << 2U; 302} 303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) 304{ 305 return (v & 0x1U) << 3U; 306} 307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) 308{ 309 return (v & 0x1U) << 4U; 310} 311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) 312{ 313 return 0x1U << 4U; 314} 315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) 316{ 317 return (r >> 4U) & 0x1U; 318} 319static inline u32 falcon_falcon_cpuctl_stopped_m(void) 320{ 321 return 0x1U << 5U; 322} 323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 324{ 325 return (v & 0x1U) << 6U; 326} 327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) 328{ 329 return 0x1U << 6U; 330} 331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 332{ 333 return (r >> 6U) & 0x1U; 334} 335static inline u32 falcon_falcon_cpuctl_alias_r(void) 336{ 337 return 0x00000130U; 338} 339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) 340{ 341 return (v & 0x1U) << 1U; 342} 343static inline u32 falcon_falcon_imemc_r(u32 i) 344{ 345 return 0x00000180U + i*16U; 346} 347static inline u32 falcon_falcon_imemc_offs_f(u32 v) 348{ 349 return (v & 0x3fU) << 2U; 350} 351static inline u32 falcon_falcon_imemc_blk_f(u32 v) 352{ 353 return (v & 0xffU) << 8U; 354} 355static inline u32 falcon_falcon_imemc_aincw_f(u32 v) 356{ 357 return (v & 0x1U) << 24U; 358} 359static inline u32 falcon_falcon_imemc_secure_f(u32 v) 360{ 361 return (v & 0x1U) << 28U; 362} 363static inline u32 falcon_falcon_imemd_r(u32 i) 364{ 365 return 0x00000184U + i*16U; 366} 367static inline u32 falcon_falcon_imemt_r(u32 i) 368{ 369 return 0x00000188U + i*16U; 370} 371static inline u32 falcon_falcon_sctl_r(void) 372{ 373 return 0x00000240U; 374} 375static inline u32 falcon_falcon_mmu_phys_sec_r(void) 376{ 377 return 0x00100ce4U; 378} 379static inline u32 falcon_falcon_bootvec_r(void) 380{ 381 return 0x00000104U; 382} 383static inline u32 falcon_falcon_bootvec_vec_f(u32 v) 384{ 385 return (v & 0xffffffffU) << 0U; 386} 387static inline u32 falcon_falcon_dmactl_r(void) 388{ 389 return 0x0000010cU; 390} 391static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) 392{ 393 return 0x1U << 1U; 394} 395static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) 396{ 397 return 0x1U << 2U; 398} 399static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) 400{ 401 return (v & 0x1U) << 0U; 402} 403static inline u32 falcon_falcon_hwcfg_r(void) 404{ 405 return 0x00000108U; 406} 407static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) 408{ 409 return (r >> 0U) & 0x1ffU; 410} 411static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) 412{ 413 return (r >> 9U) & 0x1ffU; 414} 415static inline u32 falcon_falcon_dmatrfbase_r(void) 416{ 417 return 0x00000110U; 418} 419static inline u32 falcon_falcon_dmatrfbase1_r(void) 420{ 421 return 0x00000128U; 422} 423static inline u32 falcon_falcon_dmatrfmoffs_r(void) 424{ 425 return 0x00000114U; 426} 427static inline u32 falcon_falcon_dmatrfcmd_r(void) 428{ 429 return 0x00000118U; 430} 431static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) 432{ 433 return (v & 0x1U) << 4U; 434} 435static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) 436{ 437 return (v & 0x1U) << 5U; 438} 439static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) 440{ 441 return (v & 0x7U) << 8U; 442} 443static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) 444{ 445 return (v & 0x7U) << 12U; 446} 447static inline u32 falcon_falcon_dmatrffboffs_r(void) 448{ 449 return 0x0000011cU; 450} 451static inline u32 falcon_falcon_imctl_debug_r(void) 452{ 453 return 0x0000015cU; 454} 455static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) 456{ 457 return (v & 0xffffffU) << 0U; 458} 459static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) 460{ 461 return (v & 0x7U) << 24U; 462} 463static inline u32 falcon_falcon_imstat_r(void) 464{ 465 return 0x00000144U; 466} 467static inline u32 falcon_falcon_traceidx_r(void) 468{ 469 return 0x00000148U; 470} 471static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) 472{ 473 return (r >> 16U) & 0xffU; 474} 475static inline u32 falcon_falcon_traceidx_idx_f(u32 v) 476{ 477 return (v & 0xffU) << 0U; 478} 479static inline u32 falcon_falcon_tracepc_r(void) 480{ 481 return 0x0000014cU; 482} 483static inline u32 falcon_falcon_tracepc_pc_v(u32 r) 484{ 485 return (r >> 0U) & 0xffffffU; 486} 487static inline u32 falcon_falcon_exterraddr_r(void) 488{ 489 return 0x00000168U; 490} 491static inline u32 falcon_falcon_exterrstat_r(void) 492{ 493 return 0x0000016cU; 494} 495static inline u32 falcon_falcon_exterrstat_valid_m(void) 496{ 497 return 0x1U << 31U; 498} 499static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) 500{ 501 return (r >> 31U) & 0x1U; 502} 503static inline u32 falcon_falcon_exterrstat_valid_true_v(void) 504{ 505 return 0x00000001U; 506} 507static inline u32 falcon_falcon_icd_cmd_r(void) 508{ 509 return 0x00000200U; 510} 511static inline u32 falcon_falcon_icd_cmd_opc_s(void) 512{ 513 return 4U; 514} 515static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) 516{ 517 return (v & 0xfU) << 0U; 518} 519static inline u32 falcon_falcon_icd_cmd_opc_m(void) 520{ 521 return 0xfU << 0U; 522} 523static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) 524{ 525 return (r >> 0U) & 0xfU; 526} 527static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) 528{ 529 return 0x8U; 530} 531static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) 532{ 533 return 0xeU; 534} 535static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) 536{ 537 return (v & 0x1fU) << 8U; 538} 539static inline u32 falcon_falcon_icd_rdata_r(void) 540{ 541 return 0x0000020cU; 542} 543static inline u32 falcon_falcon_dmemc_r(u32 i) 544{ 545 return 0x000001c0U + i*8U; 546} 547static inline u32 falcon_falcon_dmemc_offs_f(u32 v) 548{ 549 return (v & 0x3fU) << 2U; 550} 551static inline u32 falcon_falcon_dmemc_offs_m(void) 552{ 553 return 0x3fU << 2U; 554} 555static inline u32 falcon_falcon_dmemc_blk_f(u32 v) 556{ 557 return (v & 0xffU) << 8U; 558} 559static inline u32 falcon_falcon_dmemc_blk_m(void) 560{ 561 return 0xffU << 8U; 562} 563static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) 564{ 565 return (v & 0x1U) << 24U; 566} 567static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) 568{ 569 return (v & 0x1U) << 25U; 570} 571static inline u32 falcon_falcon_dmemd_r(u32 i) 572{ 573 return 0x000001c4U + i*8U; 574} 575static inline u32 falcon_falcon_debug1_r(void) 576{ 577 return 0x00000090U; 578} 579static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) 580{ 581 return 1U; 582} 583static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) 584{ 585 return (v & 0x1U) << 16U; 586} 587static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) 588{ 589 return 0x1U << 16U; 590} 591static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) 592{ 593 return (r >> 16U) & 0x1U; 594} 595static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 falcon_falcon_debuginfo_r(void) 600{ 601 return 0x00000094U; 602} 603#endif
diff --git a/include/nvgpu/hw/gv11b/hw_fb_gv11b.h b/include/nvgpu/hw/gv11b/hw_fb_gv11b.h
deleted file mode 100644
index 767fc5a..0000000
--- a/include/nvgpu/hw/gv11b/hw_fb_gv11b.h
+++ /dev/null
@@ -1,1867 +0,0 @@ 1/* 2 * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fb_gv11b_h_ 57#define _hw_fb_gv11b_h_ 58 59static inline u32 fb_fbhub_num_active_ltcs_r(void) 60{ 61 return 0x00100800U; 62} 63static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_m(void) 64{ 65 return 0x1U << 25U; 66} 67static inline u32 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_f(void) 68{ 69 return 0x2000000U; 70} 71static inline u32 fb_fbhub_num_active_ltcs_hub_sys_ncoh_atomic_mode_m(void) 72{ 73 return 0x1U << 26U; 74} 75static inline u32 fb_fbhub_num_active_ltcs_hub_sys_ncoh_atomic_mode_use_read_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 fb_mmu_ctrl_r(void) 80{ 81 return 0x00100c80U; 82} 83static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) 84{ 85 return (r >> 15U) & 0x1U; 86} 87static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) 92{ 93 return (r >> 16U) & 0xffU; 94} 95static inline u32 fb_mmu_ctrl_atomic_capability_mode_m(void) 96{ 97 return 0x3U << 24U; 98} 99static inline u32 fb_mmu_ctrl_atomic_capability_mode_l2_f(void) 100{ 101 return 0x0U; 102} 103static inline u32 fb_mmu_ctrl_atomic_capability_mode_rmw_f(void) 104{ 105 return 0x2000000U; 106} 107static inline u32 fb_mmu_ctrl_atomic_capability_sys_ncoh_mode_m(void) 108{ 109 return 0x1U << 27U; 110} 111static inline u32 fb_mmu_ctrl_atomic_capability_sys_ncoh_mode_l2_f(void) 112{ 113 return 0x0U; 114} 115static inline u32 fb_hshub_num_active_ltcs_r(void) 116{ 117 return 0x001fbc20U; 118} 119static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_m(void) 120{ 121 return 0x1U << 25U; 122} 123static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_read_f(void) 124{ 125 return 0x0U; 126} 127static inline u32 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_f(void) 128{ 129 return 0x2000000U; 130} 131static inline u32 fb_priv_mmu_phy_secure_r(void) 132{ 133 return 0x00100ce4U; 134} 135static inline u32 fb_mmu_invalidate_pdb_r(void) 136{ 137 return 0x00100cb8U; 138} 139static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) 144{ 145 return 0x2U; 146} 147static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) 148{ 149 return (v & 0xfffffffU) << 4U; 150} 151static inline u32 fb_mmu_invalidate_r(void) 152{ 153 return 0x00100cbcU; 154} 155static inline u32 fb_mmu_invalidate_all_va_true_f(void) 156{ 157 return 0x1U; 158} 159static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 fb_mmu_invalidate_hubtlb_only_s(void) 164{ 165 return 1U; 166} 167static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v) 168{ 169 return (v & 0x1U) << 2U; 170} 171static inline u32 fb_mmu_invalidate_hubtlb_only_m(void) 172{ 173 return 0x1U << 2U; 174} 175static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r) 176{ 177 return (r >> 2U) & 0x1U; 178} 179static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void) 180{ 181 return 0x4U; 182} 183static inline u32 fb_mmu_invalidate_replay_s(void) 184{ 185 return 3U; 186} 187static inline u32 fb_mmu_invalidate_replay_f(u32 v) 188{ 189 return (v & 0x7U) << 3U; 190} 191static inline u32 fb_mmu_invalidate_replay_m(void) 192{ 193 return 0x7U << 3U; 194} 195static inline u32 fb_mmu_invalidate_replay_v(u32 r) 196{ 197 return (r >> 3U) & 0x7U; 198} 199static inline u32 fb_mmu_invalidate_replay_none_f(void) 200{ 201 return 0x0U; 202} 203static inline u32 fb_mmu_invalidate_replay_start_f(void) 204{ 205 return 0x8U; 206} 207static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void) 208{ 209 return 0x10U; 210} 211static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void) 212{ 213 return 0x20U; 214} 215static inline u32 fb_mmu_invalidate_sys_membar_s(void) 216{ 217 return 1U; 218} 219static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v) 220{ 221 return (v & 0x1U) << 6U; 222} 223static inline u32 fb_mmu_invalidate_sys_membar_m(void) 224{ 225 return 0x1U << 6U; 226} 227static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r) 228{ 229 return (r >> 6U) & 0x1U; 230} 231static inline u32 fb_mmu_invalidate_sys_membar_true_f(void) 232{ 233 return 0x40U; 234} 235static inline u32 fb_mmu_invalidate_ack_s(void) 236{ 237 return 2U; 238} 239static inline u32 fb_mmu_invalidate_ack_f(u32 v) 240{ 241 return (v & 0x3U) << 7U; 242} 243static inline u32 fb_mmu_invalidate_ack_m(void) 244{ 245 return 0x3U << 7U; 246} 247static inline u32 fb_mmu_invalidate_ack_v(u32 r) 248{ 249 return (r >> 7U) & 0x3U; 250} 251static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void) 252{ 253 return 0x0U; 254} 255static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void) 256{ 257 return 0x100U; 258} 259static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void) 260{ 261 return 0x80U; 262} 263static inline u32 fb_mmu_invalidate_cancel_client_id_s(void) 264{ 265 return 6U; 266} 267static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v) 268{ 269 return (v & 0x3fU) << 9U; 270} 271static inline u32 fb_mmu_invalidate_cancel_client_id_m(void) 272{ 273 return 0x3fU << 9U; 274} 275static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r) 276{ 277 return (r >> 9U) & 0x3fU; 278} 279static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void) 280{ 281 return 5U; 282} 283static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v) 284{ 285 return (v & 0x1fU) << 15U; 286} 287static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void) 288{ 289 return 0x1fU << 15U; 290} 291static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r) 292{ 293 return (r >> 15U) & 0x1fU; 294} 295static inline u32 fb_mmu_invalidate_cancel_client_type_s(void) 296{ 297 return 1U; 298} 299static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v) 300{ 301 return (v & 0x1U) << 20U; 302} 303static inline u32 fb_mmu_invalidate_cancel_client_type_m(void) 304{ 305 return 0x1U << 20U; 306} 307static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r) 308{ 309 return (r >> 20U) & 0x1U; 310} 311static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void) 312{ 313 return 0x0U; 314} 315static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void) 316{ 317 return 0x100000U; 318} 319static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void) 320{ 321 return 3U; 322} 323static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v) 324{ 325 return (v & 0x7U) << 24U; 326} 327static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void) 328{ 329 return 0x7U << 24U; 330} 331static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r) 332{ 333 return (r >> 24U) & 0x7U; 334} 335static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void) 336{ 337 return 0x0U; 338} 339static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void) 340{ 341 return 0x1000000U; 342} 343static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void) 344{ 345 return 0x2000000U; 346} 347static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void) 348{ 349 return 0x3000000U; 350} 351static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void) 352{ 353 return 0x4000000U; 354} 355static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void) 356{ 357 return 0x5000000U; 358} 359static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void) 360{ 361 return 0x6000000U; 362} 363static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void) 364{ 365 return 0x7000000U; 366} 367static inline u32 fb_mmu_invalidate_trigger_s(void) 368{ 369 return 1U; 370} 371static inline u32 fb_mmu_invalidate_trigger_f(u32 v) 372{ 373 return (v & 0x1U) << 31U; 374} 375static inline u32 fb_mmu_invalidate_trigger_m(void) 376{ 377 return 0x1U << 31U; 378} 379static inline u32 fb_mmu_invalidate_trigger_v(u32 r) 380{ 381 return (r >> 31U) & 0x1U; 382} 383static inline u32 fb_mmu_invalidate_trigger_true_f(void) 384{ 385 return 0x80000000U; 386} 387static inline u32 fb_mmu_debug_wr_r(void) 388{ 389 return 0x00100cc8U; 390} 391static inline u32 fb_mmu_debug_wr_aperture_s(void) 392{ 393 return 2U; 394} 395static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) 396{ 397 return (v & 0x3U) << 0U; 398} 399static inline u32 fb_mmu_debug_wr_aperture_m(void) 400{ 401 return 0x3U << 0U; 402} 403static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) 404{ 405 return (r >> 0U) & 0x3U; 406} 407static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) 408{ 409 return 0x0U; 410} 411static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) 412{ 413 return 0x2U; 414} 415static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) 416{ 417 return 0x3U; 418} 419static inline u32 fb_mmu_debug_wr_vol_false_f(void) 420{ 421 return 0x0U; 422} 423static inline u32 fb_mmu_debug_wr_vol_true_v(void) 424{ 425 return 0x00000001U; 426} 427static inline u32 fb_mmu_debug_wr_vol_true_f(void) 428{ 429 return 0x4U; 430} 431static inline u32 fb_mmu_debug_wr_addr_f(u32 v) 432{ 433 return (v & 0xfffffffU) << 4U; 434} 435static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) 436{ 437 return 0x0000000cU; 438} 439static inline u32 fb_mmu_debug_rd_r(void) 440{ 441 return 0x00100cccU; 442} 443static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) 444{ 445 return 0x0U; 446} 447static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) 448{ 449 return 0x2U; 450} 451static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) 452{ 453 return 0x3U; 454} 455static inline u32 fb_mmu_debug_rd_vol_false_f(void) 456{ 457 return 0x0U; 458} 459static inline u32 fb_mmu_debug_rd_addr_f(u32 v) 460{ 461 return (v & 0xfffffffU) << 4U; 462} 463static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) 464{ 465 return 0x0000000cU; 466} 467static inline u32 fb_mmu_debug_ctrl_r(void) 468{ 469 return 0x00100cc4U; 470} 471static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) 472{ 473 return (r >> 16U) & 0x1U; 474} 475static inline u32 fb_mmu_debug_ctrl_debug_m(void) 476{ 477 return 0x1U << 16U; 478} 479static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) 480{ 481 return 0x00000001U; 482} 483static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) 484{ 485 return 0x00000000U; 486} 487static inline u32 fb_mmu_vpr_info_r(void) 488{ 489 return 0x00100cd0U; 490} 491static inline u32 fb_mmu_vpr_info_fetch_v(u32 r) 492{ 493 return (r >> 2U) & 0x1U; 494} 495static inline u32 fb_mmu_vpr_info_fetch_false_v(void) 496{ 497 return 0x00000000U; 498} 499static inline u32 fb_mmu_vpr_info_fetch_true_v(void) 500{ 501 return 0x00000001U; 502} 503static inline u32 fb_mmu_l2tlb_ecc_status_r(void) 504{ 505 return 0x00100e70U; 506} 507static inline u32 fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m(void) 508{ 509 return 0x1U << 0U; 510} 511static inline u32 fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m(void) 512{ 513 return 0x1U << 1U; 514} 515static inline u32 fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m(void) 516{ 517 return 0x1U << 16U; 518} 519static inline u32 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(void) 520{ 521 return 0x1U << 18U; 522} 523static inline u32 fb_mmu_l2tlb_ecc_status_reset_f(u32 v) 524{ 525 return (v & 0x1U) << 30U; 526} 527static inline u32 fb_mmu_l2tlb_ecc_status_reset_clear_f(void) 528{ 529 return 0x40000000U; 530} 531static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_r(void) 532{ 533 return 0x00100e74U; 534} 535static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_s(void) 536{ 537 return 16U; 538} 539static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_f(u32 v) 540{ 541 return (v & 0xffffU) << 0U; 542} 543static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_m(void) 544{ 545 return 0xffffU << 0U; 546} 547static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_v(u32 r) 548{ 549 return (r >> 0U) & 0xffffU; 550} 551static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_r(void) 552{ 553 return 0x00100e78U; 554} 555static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s(void) 556{ 557 return 16U; 558} 559static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_f(u32 v) 560{ 561 return (v & 0xffffU) << 0U; 562} 563static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_m(void) 564{ 565 return 0xffffU << 0U; 566} 567static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v(u32 r) 568{ 569 return (r >> 0U) & 0xffffU; 570} 571static inline u32 fb_mmu_l2tlb_ecc_address_r(void) 572{ 573 return 0x00100e7cU; 574} 575static inline u32 fb_mmu_l2tlb_ecc_address_index_s(void) 576{ 577 return 32U; 578} 579static inline u32 fb_mmu_l2tlb_ecc_address_index_f(u32 v) 580{ 581 return (v & 0xffffffffU) << 0U; 582} 583static inline u32 fb_mmu_l2tlb_ecc_address_index_m(void) 584{ 585 return 0xffffffffU << 0U; 586} 587static inline u32 fb_mmu_l2tlb_ecc_address_index_v(u32 r) 588{ 589 return (r >> 0U) & 0xffffffffU; 590} 591static inline u32 fb_mmu_hubtlb_ecc_status_r(void) 592{ 593 return 0x00100e84U; 594} 595static inline u32 fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m(void) 596{ 597 return 0x1U << 0U; 598} 599static inline u32 fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m(void) 600{ 601 return 0x1U << 1U; 602} 603static inline u32 fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m(void) 604{ 605 return 0x1U << 16U; 606} 607static inline u32 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(void) 608{ 609 return 0x1U << 18U; 610} 611static inline u32 fb_mmu_hubtlb_ecc_status_reset_f(u32 v) 612{ 613 return (v & 0x1U) << 30U; 614} 615static inline u32 fb_mmu_hubtlb_ecc_status_reset_clear_f(void) 616{ 617 return 0x40000000U; 618} 619static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_r(void) 620{ 621 return 0x00100e88U; 622} 623static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_s(void) 624{ 625 return 16U; 626} 627static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_f(u32 v) 628{ 629 return (v & 0xffffU) << 0U; 630} 631static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_m(void) 632{ 633 return 0xffffU << 0U; 634} 635static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_v(u32 r) 636{ 637 return (r >> 0U) & 0xffffU; 638} 639static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_r(void) 640{ 641 return 0x00100e8cU; 642} 643static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s(void) 644{ 645 return 16U; 646} 647static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_f(u32 v) 648{ 649 return (v & 0xffffU) << 0U; 650} 651static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_m(void) 652{ 653 return 0xffffU << 0U; 654} 655static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v(u32 r) 656{ 657 return (r >> 0U) & 0xffffU; 658} 659static inline u32 fb_mmu_hubtlb_ecc_address_r(void) 660{ 661 return 0x00100e90U; 662} 663static inline u32 fb_mmu_hubtlb_ecc_address_index_s(void) 664{ 665 return 32U; 666} 667static inline u32 fb_mmu_hubtlb_ecc_address_index_f(u32 v) 668{ 669 return (v & 0xffffffffU) << 0U; 670} 671static inline u32 fb_mmu_hubtlb_ecc_address_index_m(void) 672{ 673 return 0xffffffffU << 0U; 674} 675static inline u32 fb_mmu_hubtlb_ecc_address_index_v(u32 r) 676{ 677 return (r >> 0U) & 0xffffffffU; 678} 679static inline u32 fb_mmu_fillunit_ecc_status_r(void) 680{ 681 return 0x00100e98U; 682} 683static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m(void) 684{ 685 return 0x1U << 0U; 686} 687static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m(void) 688{ 689 return 0x1U << 1U; 690} 691static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m(void) 692{ 693 return 0x1U << 2U; 694} 695static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m(void) 696{ 697 return 0x1U << 3U; 698} 699static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m(void) 700{ 701 return 0x1U << 16U; 702} 703static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(void) 704{ 705 return 0x1U << 18U; 706} 707static inline u32 fb_mmu_fillunit_ecc_status_reset_f(u32 v) 708{ 709 return (v & 0x1U) << 30U; 710} 711static inline u32 fb_mmu_fillunit_ecc_status_reset_clear_f(void) 712{ 713 return 0x40000000U; 714} 715static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_r(void) 716{ 717 return 0x00100e9cU; 718} 719static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_s(void) 720{ 721 return 16U; 722} 723static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_f(u32 v) 724{ 725 return (v & 0xffffU) << 0U; 726} 727static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_m(void) 728{ 729 return 0xffffU << 0U; 730} 731static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_v(u32 r) 732{ 733 return (r >> 0U) & 0xffffU; 734} 735static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_r(void) 736{ 737 return 0x00100ea0U; 738} 739static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_s(void) 740{ 741 return 16U; 742} 743static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_f(u32 v) 744{ 745 return (v & 0xffffU) << 0U; 746} 747static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_m(void) 748{ 749 return 0xffffU << 0U; 750} 751static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_v(u32 r) 752{ 753 return (r >> 0U) & 0xffffU; 754} 755static inline u32 fb_mmu_fillunit_ecc_address_r(void) 756{ 757 return 0x00100ea4U; 758} 759static inline u32 fb_mmu_fillunit_ecc_address_index_s(void) 760{ 761 return 32U; 762} 763static inline u32 fb_mmu_fillunit_ecc_address_index_f(u32 v) 764{ 765 return (v & 0xffffffffU) << 0U; 766} 767static inline u32 fb_mmu_fillunit_ecc_address_index_m(void) 768{ 769 return 0xffffffffU << 0U; 770} 771static inline u32 fb_mmu_fillunit_ecc_address_index_v(u32 r) 772{ 773 return (r >> 0U) & 0xffffffffU; 774} 775static inline u32 fb_niso_flush_sysmem_addr_r(void) 776{ 777 return 0x00100c10U; 778} 779static inline u32 fb_niso_intr_r(void) 780{ 781 return 0x00100a20U; 782} 783static inline u32 fb_niso_intr_hub_access_counter_notify_m(void) 784{ 785 return 0x1U << 0U; 786} 787static inline u32 fb_niso_intr_hub_access_counter_notify_pending_f(void) 788{ 789 return 0x1U; 790} 791static inline u32 fb_niso_intr_hub_access_counter_error_m(void) 792{ 793 return 0x1U << 1U; 794} 795static inline u32 fb_niso_intr_hub_access_counter_error_pending_f(void) 796{ 797 return 0x2U; 798} 799static inline u32 fb_niso_intr_mmu_replayable_fault_notify_m(void) 800{ 801 return 0x1U << 27U; 802} 803static inline u32 fb_niso_intr_mmu_replayable_fault_notify_pending_f(void) 804{ 805 return 0x8000000U; 806} 807static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_m(void) 808{ 809 return 0x1U << 28U; 810} 811static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_pending_f(void) 812{ 813 return 0x10000000U; 814} 815static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_m(void) 816{ 817 return 0x1U << 29U; 818} 819static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f(void) 820{ 821 return 0x20000000U; 822} 823static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_m(void) 824{ 825 return 0x1U << 30U; 826} 827static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f(void) 828{ 829 return 0x40000000U; 830} 831static inline u32 fb_niso_intr_mmu_other_fault_notify_m(void) 832{ 833 return 0x1U << 31U; 834} 835static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void) 836{ 837 return 0x80000000U; 838} 839static inline u32 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m(void) 840{ 841 return 0x1U << 26U; 842} 843static inline u32 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f(void) 844{ 845 return 0x4000000U; 846} 847static inline u32 fb_niso_intr_en_r(u32 i) 848{ 849 return 0x00100a24U + i*4U; 850} 851static inline u32 fb_niso_intr_en__size_1_v(void) 852{ 853 return 0x00000002U; 854} 855static inline u32 fb_niso_intr_en_hub_access_counter_notify_f(u32 v) 856{ 857 return (v & 0x1U) << 0U; 858} 859static inline u32 fb_niso_intr_en_hub_access_counter_notify_enabled_f(void) 860{ 861 return 0x1U; 862} 863static inline u32 fb_niso_intr_en_hub_access_counter_error_f(u32 v) 864{ 865 return (v & 0x1U) << 1U; 866} 867static inline u32 fb_niso_intr_en_hub_access_counter_error_enabled_f(void) 868{ 869 return 0x2U; 870} 871static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_f(u32 v) 872{ 873 return (v & 0x1U) << 27U; 874} 875static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_enabled_f(void) 876{ 877 return 0x8000000U; 878} 879static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_f(u32 v) 880{ 881 return (v & 0x1U) << 28U; 882} 883static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_enabled_f(void) 884{ 885 return 0x10000000U; 886} 887static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_f(u32 v) 888{ 889 return (v & 0x1U) << 29U; 890} 891static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_enabled_f(void) 892{ 893 return 0x20000000U; 894} 895static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_f(u32 v) 896{ 897 return (v & 0x1U) << 30U; 898} 899static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_enabled_f(void) 900{ 901 return 0x40000000U; 902} 903static inline u32 fb_niso_intr_en_mmu_other_fault_notify_f(u32 v) 904{ 905 return (v & 0x1U) << 31U; 906} 907static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void) 908{ 909 return 0x80000000U; 910} 911static inline u32 fb_niso_intr_en_mmu_ecc_uncorrected_error_notify_f(u32 v) 912{ 913 return (v & 0x1U) << 26U; 914} 915static inline u32 fb_niso_intr_en_mmu_ecc_uncorrected_error_notify_enabled_f(void) 916{ 917 return 0x4000000U; 918} 919static inline u32 fb_niso_intr_en_set_r(u32 i) 920{ 921 return 0x00100a2cU + i*4U; 922} 923static inline u32 fb_niso_intr_en_set__size_1_v(void) 924{ 925 return 0x00000002U; 926} 927static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_m(void) 928{ 929 return 0x1U << 0U; 930} 931static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_set_f(void) 932{ 933 return 0x1U; 934} 935static inline u32 fb_niso_intr_en_set_hub_access_counter_error_m(void) 936{ 937 return 0x1U << 1U; 938} 939static inline u32 fb_niso_intr_en_set_hub_access_counter_error_set_f(void) 940{ 941 return 0x2U; 942} 943static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_m(void) 944{ 945 return 0x1U << 27U; 946} 947static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_set_f(void) 948{ 949 return 0x8000000U; 950} 951static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(void) 952{ 953 return 0x1U << 28U; 954} 955static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_set_f(void) 956{ 957 return 0x10000000U; 958} 959static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m(void) 960{ 961 return 0x1U << 29U; 962} 963static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_set_f(void) 964{ 965 return 0x20000000U; 966} 967static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(void) 968{ 969 return 0x1U << 30U; 970} 971static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_set_f(void) 972{ 973 return 0x40000000U; 974} 975static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_m(void) 976{ 977 return 0x1U << 31U; 978} 979static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void) 980{ 981 return 0x80000000U; 982} 983static inline u32 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m(void) 984{ 985 return 0x1U << 26U; 986} 987static inline u32 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(void) 988{ 989 return 0x4000000U; 990} 991static inline u32 fb_niso_intr_en_clr_r(u32 i) 992{ 993 return 0x00100a34U + i*4U; 994} 995static inline u32 fb_niso_intr_en_clr__size_1_v(void) 996{ 997 return 0x00000002U; 998} 999static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_m(void) 1000{ 1001 return 0x1U << 0U; 1002} 1003static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_set_f(void) 1004{ 1005 return 0x1U; 1006} 1007static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_m(void) 1008{ 1009 return 0x1U << 1U; 1010} 1011static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_set_f(void) 1012{ 1013 return 0x2U; 1014} 1015static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m(void) 1016{ 1017 return 0x1U << 27U; 1018} 1019static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_set_f(void) 1020{ 1021 return 0x8000000U; 1022} 1023static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(void) 1024{ 1025 return 0x1U << 28U; 1026} 1027static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_set_f(void) 1028{ 1029 return 0x10000000U; 1030} 1031static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m(void) 1032{ 1033 return 0x1U << 29U; 1034} 1035static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_set_f(void) 1036{ 1037 return 0x20000000U; 1038} 1039static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(void) 1040{ 1041 return 0x1U << 30U; 1042} 1043static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_set_f(void) 1044{ 1045 return 0x40000000U; 1046} 1047static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_m(void) 1048{ 1049 return 0x1U << 31U; 1050} 1051static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_set_f(void) 1052{ 1053 return 0x80000000U; 1054} 1055static inline u32 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m(void) 1056{ 1057 return 0x1U << 26U; 1058} 1059static inline u32 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(void) 1060{ 1061 return 0x4000000U; 1062} 1063static inline u32 fb_niso_intr_en_clr_mmu_non_replay_fault_buffer_v(void) 1064{ 1065 return 0x00000000U; 1066} 1067static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void) 1068{ 1069 return 0x00000001U; 1070} 1071static inline u32 fb_mmu_fault_buffer_lo_r(u32 i) 1072{ 1073 return 0x00100e24U + i*20U; 1074} 1075static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void) 1076{ 1077 return 0x00000002U; 1078} 1079static inline u32 fb_mmu_fault_buffer_lo_addr_mode_f(u32 v) 1080{ 1081 return (v & 0x1U) << 0U; 1082} 1083static inline u32 fb_mmu_fault_buffer_lo_addr_mode_v(u32 r) 1084{ 1085 return (r >> 0U) & 0x1U; 1086} 1087static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_v(void) 1088{ 1089 return 0x00000000U; 1090} 1091static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_f(void) 1092{ 1093 return 0x0U; 1094} 1095static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_v(void) 1096{ 1097 return 0x00000001U; 1098} 1099static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_f(void) 1100{ 1101 return 0x1U; 1102} 1103static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_f(u32 v) 1104{ 1105 return (v & 0x3U) << 1U; 1106} 1107static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_v(u32 r) 1108{ 1109 return (r >> 1U) & 0x3U; 1110} 1111static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_v(void) 1112{ 1113 return 0x00000002U; 1114} 1115static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_f(void) 1116{ 1117 return 0x4U; 1118} 1119static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_v(void) 1120{ 1121 return 0x00000003U; 1122} 1123static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_f(void) 1124{ 1125 return 0x6U; 1126} 1127static inline u32 fb_mmu_fault_buffer_lo_phys_vol_f(u32 v) 1128{ 1129 return (v & 0x1U) << 3U; 1130} 1131static inline u32 fb_mmu_fault_buffer_lo_phys_vol_v(u32 r) 1132{ 1133 return (r >> 3U) & 0x1U; 1134} 1135static inline u32 fb_mmu_fault_buffer_lo_addr_f(u32 v) 1136{ 1137 return (v & 0xfffffU) << 12U; 1138} 1139static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r) 1140{ 1141 return (r >> 12U) & 0xfffffU; 1142} 1143static inline u32 fb_mmu_fault_buffer_lo_addr_b(void) 1144{ 1145 return 12U; 1146} 1147static inline u32 fb_mmu_fault_buffer_hi_r(u32 i) 1148{ 1149 return 0x00100e28U + i*20U; 1150} 1151static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void) 1152{ 1153 return 0x00000002U; 1154} 1155static inline u32 fb_mmu_fault_buffer_hi_addr_f(u32 v) 1156{ 1157 return (v & 0xffffffffU) << 0U; 1158} 1159static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r) 1160{ 1161 return (r >> 0U) & 0xffffffffU; 1162} 1163static inline u32 fb_mmu_fault_buffer_get_r(u32 i) 1164{ 1165 return 0x00100e2cU + i*20U; 1166} 1167static inline u32 fb_mmu_fault_buffer_get__size_1_v(void) 1168{ 1169 return 0x00000002U; 1170} 1171static inline u32 fb_mmu_fault_buffer_get_ptr_f(u32 v) 1172{ 1173 return (v & 0xfffffU) << 0U; 1174} 1175static inline u32 fb_mmu_fault_buffer_get_ptr_m(void) 1176{ 1177 return 0xfffffU << 0U; 1178} 1179static inline u32 fb_mmu_fault_buffer_get_ptr_v(u32 r) 1180{ 1181 return (r >> 0U) & 0xfffffU; 1182} 1183static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_f(u32 v) 1184{ 1185 return (v & 0x1U) << 30U; 1186} 1187static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_m(void) 1188{ 1189 return 0x1U << 30U; 1190} 1191static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_v(void) 1192{ 1193 return 0x00000001U; 1194} 1195static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f(void) 1196{ 1197 return 0x40000000U; 1198} 1199static inline u32 fb_mmu_fault_buffer_get_overflow_f(u32 v) 1200{ 1201 return (v & 0x1U) << 31U; 1202} 1203static inline u32 fb_mmu_fault_buffer_get_overflow_m(void) 1204{ 1205 return 0x1U << 31U; 1206} 1207static inline u32 fb_mmu_fault_buffer_get_overflow_clear_v(void) 1208{ 1209 return 0x00000001U; 1210} 1211static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void) 1212{ 1213 return 0x80000000U; 1214} 1215static inline u32 fb_mmu_fault_buffer_put_r(u32 i) 1216{ 1217 return 0x00100e30U + i*20U; 1218} 1219static inline u32 fb_mmu_fault_buffer_put__size_1_v(void) 1220{ 1221 return 0x00000002U; 1222} 1223static inline u32 fb_mmu_fault_buffer_put_ptr_f(u32 v) 1224{ 1225 return (v & 0xfffffU) << 0U; 1226} 1227static inline u32 fb_mmu_fault_buffer_put_ptr_v(u32 r) 1228{ 1229 return (r >> 0U) & 0xfffffU; 1230} 1231static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_f(u32 v) 1232{ 1233 return (v & 0x1U) << 30U; 1234} 1235static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_v(u32 r) 1236{ 1237 return (r >> 30U) & 0x1U; 1238} 1239static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_v(void) 1240{ 1241 return 0x00000001U; 1242} 1243static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_f(void) 1244{ 1245 return 0x40000000U; 1246} 1247static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_v(void) 1248{ 1249 return 0x00000000U; 1250} 1251static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_f(void) 1252{ 1253 return 0x0U; 1254} 1255static inline u32 fb_mmu_fault_buffer_put_overflow_f(u32 v) 1256{ 1257 return (v & 0x1U) << 31U; 1258} 1259static inline u32 fb_mmu_fault_buffer_put_overflow_v(u32 r) 1260{ 1261 return (r >> 31U) & 0x1U; 1262} 1263static inline u32 fb_mmu_fault_buffer_put_overflow_yes_v(void) 1264{ 1265 return 0x00000001U; 1266} 1267static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void) 1268{ 1269 return 0x80000000U; 1270} 1271static inline u32 fb_mmu_fault_buffer_size_r(u32 i) 1272{ 1273 return 0x00100e34U + i*20U; 1274} 1275static inline u32 fb_mmu_fault_buffer_size__size_1_v(void) 1276{ 1277 return 0x00000002U; 1278} 1279static inline u32 fb_mmu_fault_buffer_size_val_f(u32 v) 1280{ 1281 return (v & 0xfffffU) << 0U; 1282} 1283static inline u32 fb_mmu_fault_buffer_size_val_v(u32 r) 1284{ 1285 return (r >> 0U) & 0xfffffU; 1286} 1287static inline u32 fb_mmu_fault_buffer_size_overflow_intr_f(u32 v) 1288{ 1289 return (v & 0x1U) << 29U; 1290} 1291static inline u32 fb_mmu_fault_buffer_size_overflow_intr_v(u32 r) 1292{ 1293 return (r >> 29U) & 0x1U; 1294} 1295static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_v(void) 1296{ 1297 return 0x00000001U; 1298} 1299static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_f(void) 1300{ 1301 return 0x20000000U; 1302} 1303static inline u32 fb_mmu_fault_buffer_size_set_default_f(u32 v) 1304{ 1305 return (v & 0x1U) << 30U; 1306} 1307static inline u32 fb_mmu_fault_buffer_size_set_default_v(u32 r) 1308{ 1309 return (r >> 30U) & 0x1U; 1310} 1311static inline u32 fb_mmu_fault_buffer_size_set_default_yes_v(void) 1312{ 1313 return 0x00000001U; 1314} 1315static inline u32 fb_mmu_fault_buffer_size_set_default_yes_f(void) 1316{ 1317 return 0x40000000U; 1318} 1319static inline u32 fb_mmu_fault_buffer_size_enable_f(u32 v) 1320{ 1321 return (v & 0x1U) << 31U; 1322} 1323static inline u32 fb_mmu_fault_buffer_size_enable_m(void) 1324{ 1325 return 0x1U << 31U; 1326} 1327static inline u32 fb_mmu_fault_buffer_size_enable_v(u32 r) 1328{ 1329 return (r >> 31U) & 0x1U; 1330} 1331static inline u32 fb_mmu_fault_buffer_size_enable_true_v(void) 1332{ 1333 return 0x00000001U; 1334} 1335static inline u32 fb_mmu_fault_buffer_size_enable_true_f(void) 1336{ 1337 return 0x80000000U; 1338} 1339static inline u32 fb_mmu_fault_addr_lo_r(void) 1340{ 1341 return 0x00100e4cU; 1342} 1343static inline u32 fb_mmu_fault_addr_lo_phys_aperture_f(u32 v) 1344{ 1345 return (v & 0x3U) << 0U; 1346} 1347static inline u32 fb_mmu_fault_addr_lo_phys_aperture_v(u32 r) 1348{ 1349 return (r >> 0U) & 0x3U; 1350} 1351static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_v(void) 1352{ 1353 return 0x00000002U; 1354} 1355static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_f(void) 1356{ 1357 return 0x2U; 1358} 1359static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_v(void) 1360{ 1361 return 0x00000003U; 1362} 1363static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_f(void) 1364{ 1365 return 0x3U; 1366} 1367static inline u32 fb_mmu_fault_addr_lo_addr_f(u32 v) 1368{ 1369 return (v & 0xfffffU) << 12U; 1370} 1371static inline u32 fb_mmu_fault_addr_lo_addr_v(u32 r) 1372{ 1373 return (r >> 12U) & 0xfffffU; 1374} 1375static inline u32 fb_mmu_fault_addr_lo_addr_b(void) 1376{ 1377 return 12U; 1378} 1379static inline u32 fb_mmu_fault_addr_hi_r(void) 1380{ 1381 return 0x00100e50U; 1382} 1383static inline u32 fb_mmu_fault_addr_hi_addr_f(u32 v) 1384{ 1385 return (v & 0xffffffffU) << 0U; 1386} 1387static inline u32 fb_mmu_fault_addr_hi_addr_v(u32 r) 1388{ 1389 return (r >> 0U) & 0xffffffffU; 1390} 1391static inline u32 fb_mmu_fault_inst_lo_r(void) 1392{ 1393 return 0x00100e54U; 1394} 1395static inline u32 fb_mmu_fault_inst_lo_engine_id_v(u32 r) 1396{ 1397 return (r >> 0U) & 0x1ffU; 1398} 1399static inline u32 fb_mmu_fault_inst_lo_aperture_v(u32 r) 1400{ 1401 return (r >> 10U) & 0x3U; 1402} 1403static inline u32 fb_mmu_fault_inst_lo_aperture_sys_coh_v(void) 1404{ 1405 return 0x00000002U; 1406} 1407static inline u32 fb_mmu_fault_inst_lo_aperture_sys_nocoh_v(void) 1408{ 1409 return 0x00000003U; 1410} 1411static inline u32 fb_mmu_fault_inst_lo_addr_f(u32 v) 1412{ 1413 return (v & 0xfffffU) << 12U; 1414} 1415static inline u32 fb_mmu_fault_inst_lo_addr_v(u32 r) 1416{ 1417 return (r >> 12U) & 0xfffffU; 1418} 1419static inline u32 fb_mmu_fault_inst_lo_addr_b(void) 1420{ 1421 return 12U; 1422} 1423static inline u32 fb_mmu_fault_inst_hi_r(void) 1424{ 1425 return 0x00100e58U; 1426} 1427static inline u32 fb_mmu_fault_inst_hi_addr_v(u32 r) 1428{ 1429 return (r >> 0U) & 0xffffffffU; 1430} 1431static inline u32 fb_mmu_fault_info_r(void) 1432{ 1433 return 0x00100e5cU; 1434} 1435static inline u32 fb_mmu_fault_info_fault_type_v(u32 r) 1436{ 1437 return (r >> 0U) & 0x1fU; 1438} 1439static inline u32 fb_mmu_fault_info_replayable_fault_v(u32 r) 1440{ 1441 return (r >> 7U) & 0x1U; 1442} 1443static inline u32 fb_mmu_fault_info_client_v(u32 r) 1444{ 1445 return (r >> 8U) & 0x7fU; 1446} 1447static inline u32 fb_mmu_fault_info_access_type_v(u32 r) 1448{ 1449 return (r >> 16U) & 0xfU; 1450} 1451static inline u32 fb_mmu_fault_info_client_type_v(u32 r) 1452{ 1453 return (r >> 20U) & 0x1U; 1454} 1455static inline u32 fb_mmu_fault_info_gpc_id_v(u32 r) 1456{ 1457 return (r >> 24U) & 0x1fU; 1458} 1459static inline u32 fb_mmu_fault_info_protected_mode_v(u32 r) 1460{ 1461 return (r >> 29U) & 0x1U; 1462} 1463static inline u32 fb_mmu_fault_info_replayable_fault_en_v(u32 r) 1464{ 1465 return (r >> 30U) & 0x1U; 1466} 1467static inline u32 fb_mmu_fault_info_valid_v(u32 r) 1468{ 1469 return (r >> 31U) & 0x1U; 1470} 1471static inline u32 fb_mmu_fault_status_r(void) 1472{ 1473 return 0x00100e60U; 1474} 1475static inline u32 fb_mmu_fault_status_dropped_bar1_phys_m(void) 1476{ 1477 return 0x1U << 0U; 1478} 1479static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_v(void) 1480{ 1481 return 0x00000001U; 1482} 1483static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_f(void) 1484{ 1485 return 0x1U; 1486} 1487static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_v(void) 1488{ 1489 return 0x00000001U; 1490} 1491static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_f(void) 1492{ 1493 return 0x1U; 1494} 1495static inline u32 fb_mmu_fault_status_dropped_bar1_virt_m(void) 1496{ 1497 return 0x1U << 1U; 1498} 1499static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_v(void) 1500{ 1501 return 0x00000001U; 1502} 1503static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_f(void) 1504{ 1505 return 0x2U; 1506} 1507static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_v(void) 1508{ 1509 return 0x00000001U; 1510} 1511static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_f(void) 1512{ 1513 return 0x2U; 1514} 1515static inline u32 fb_mmu_fault_status_dropped_bar2_phys_m(void) 1516{ 1517 return 0x1U << 2U; 1518} 1519static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_v(void) 1520{ 1521 return 0x00000001U; 1522} 1523static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_f(void) 1524{ 1525 return 0x4U; 1526} 1527static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_v(void) 1528{ 1529 return 0x00000001U; 1530} 1531static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_f(void) 1532{ 1533 return 0x4U; 1534} 1535static inline u32 fb_mmu_fault_status_dropped_bar2_virt_m(void) 1536{ 1537 return 0x1U << 3U; 1538} 1539static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_v(void) 1540{ 1541 return 0x00000001U; 1542} 1543static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_f(void) 1544{ 1545 return 0x8U; 1546} 1547static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_v(void) 1548{ 1549 return 0x00000001U; 1550} 1551static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_f(void) 1552{ 1553 return 0x8U; 1554} 1555static inline u32 fb_mmu_fault_status_dropped_ifb_phys_m(void) 1556{ 1557 return 0x1U << 4U; 1558} 1559static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_v(void) 1560{ 1561 return 0x00000001U; 1562} 1563static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_f(void) 1564{ 1565 return 0x10U; 1566} 1567static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_v(void) 1568{ 1569 return 0x00000001U; 1570} 1571static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_f(void) 1572{ 1573 return 0x10U; 1574} 1575static inline u32 fb_mmu_fault_status_dropped_ifb_virt_m(void) 1576{ 1577 return 0x1U << 5U; 1578} 1579static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_v(void) 1580{ 1581 return 0x00000001U; 1582} 1583static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_f(void) 1584{ 1585 return 0x20U; 1586} 1587static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_v(void) 1588{ 1589 return 0x00000001U; 1590} 1591static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_f(void) 1592{ 1593 return 0x20U; 1594} 1595static inline u32 fb_mmu_fault_status_dropped_other_phys_m(void) 1596{ 1597 return 0x1U << 6U; 1598} 1599static inline u32 fb_mmu_fault_status_dropped_other_phys_set_v(void) 1600{ 1601 return 0x00000001U; 1602} 1603static inline u32 fb_mmu_fault_status_dropped_other_phys_set_f(void) 1604{ 1605 return 0x40U; 1606} 1607static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_v(void) 1608{ 1609 return 0x00000001U; 1610} 1611static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_f(void) 1612{ 1613 return 0x40U; 1614} 1615static inline u32 fb_mmu_fault_status_dropped_other_virt_m(void) 1616{ 1617 return 0x1U << 7U; 1618} 1619static inline u32 fb_mmu_fault_status_dropped_other_virt_set_v(void) 1620{ 1621 return 0x00000001U; 1622} 1623static inline u32 fb_mmu_fault_status_dropped_other_virt_set_f(void) 1624{ 1625 return 0x80U; 1626} 1627static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_v(void) 1628{ 1629 return 0x00000001U; 1630} 1631static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_f(void) 1632{ 1633 return 0x80U; 1634} 1635static inline u32 fb_mmu_fault_status_replayable_m(void) 1636{ 1637 return 0x1U << 8U; 1638} 1639static inline u32 fb_mmu_fault_status_replayable_set_v(void) 1640{ 1641 return 0x00000001U; 1642} 1643static inline u32 fb_mmu_fault_status_replayable_set_f(void) 1644{ 1645 return 0x100U; 1646} 1647static inline u32 fb_mmu_fault_status_replayable_reset_f(void) 1648{ 1649 return 0x0U; 1650} 1651static inline u32 fb_mmu_fault_status_non_replayable_m(void) 1652{ 1653 return 0x1U << 9U; 1654} 1655static inline u32 fb_mmu_fault_status_non_replayable_set_v(void) 1656{ 1657 return 0x00000001U; 1658} 1659static inline u32 fb_mmu_fault_status_non_replayable_set_f(void) 1660{ 1661 return 0x200U; 1662} 1663static inline u32 fb_mmu_fault_status_non_replayable_reset_f(void) 1664{ 1665 return 0x0U; 1666} 1667static inline u32 fb_mmu_fault_status_replayable_error_m(void) 1668{ 1669 return 0x1U << 10U; 1670} 1671static inline u32 fb_mmu_fault_status_replayable_error_set_v(void) 1672{ 1673 return 0x00000001U; 1674} 1675static inline u32 fb_mmu_fault_status_replayable_error_set_f(void) 1676{ 1677 return 0x400U; 1678} 1679static inline u32 fb_mmu_fault_status_replayable_error_reset_f(void) 1680{ 1681 return 0x0U; 1682} 1683static inline u32 fb_mmu_fault_status_non_replayable_error_m(void) 1684{ 1685 return 0x1U << 11U; 1686} 1687static inline u32 fb_mmu_fault_status_non_replayable_error_set_v(void) 1688{ 1689 return 0x00000001U; 1690} 1691static inline u32 fb_mmu_fault_status_non_replayable_error_set_f(void) 1692{ 1693 return 0x800U; 1694} 1695static inline u32 fb_mmu_fault_status_non_replayable_error_reset_f(void) 1696{ 1697 return 0x0U; 1698} 1699static inline u32 fb_mmu_fault_status_replayable_overflow_m(void) 1700{ 1701 return 0x1U << 12U; 1702} 1703static inline u32 fb_mmu_fault_status_replayable_overflow_set_v(void) 1704{ 1705 return 0x00000001U; 1706} 1707static inline u32 fb_mmu_fault_status_replayable_overflow_set_f(void) 1708{ 1709 return 0x1000U; 1710} 1711static inline u32 fb_mmu_fault_status_replayable_overflow_reset_f(void) 1712{ 1713 return 0x0U; 1714} 1715static inline u32 fb_mmu_fault_status_non_replayable_overflow_m(void) 1716{ 1717 return 0x1U << 13U; 1718} 1719static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_v(void) 1720{ 1721 return 0x00000001U; 1722} 1723static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_f(void) 1724{ 1725 return 0x2000U; 1726} 1727static inline u32 fb_mmu_fault_status_non_replayable_overflow_reset_f(void) 1728{ 1729 return 0x0U; 1730} 1731static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_m(void) 1732{ 1733 return 0x1U << 14U; 1734} 1735static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_v(void) 1736{ 1737 return 0x00000001U; 1738} 1739static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_f(void) 1740{ 1741 return 0x4000U; 1742} 1743static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_m(void) 1744{ 1745 return 0x1U << 15U; 1746} 1747static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_v(void) 1748{ 1749 return 0x00000001U; 1750} 1751static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_f(void) 1752{ 1753 return 0x8000U; 1754} 1755static inline u32 fb_mmu_fault_status_busy_m(void) 1756{ 1757 return 0x1U << 30U; 1758} 1759static inline u32 fb_mmu_fault_status_busy_true_v(void) 1760{ 1761 return 0x00000001U; 1762} 1763static inline u32 fb_mmu_fault_status_busy_true_f(void) 1764{ 1765 return 0x40000000U; 1766} 1767static inline u32 fb_mmu_fault_status_valid_m(void) 1768{ 1769 return 0x1U << 31U; 1770} 1771static inline u32 fb_mmu_fault_status_valid_set_v(void) 1772{ 1773 return 0x00000001U; 1774} 1775static inline u32 fb_mmu_fault_status_valid_set_f(void) 1776{ 1777 return 0x80000000U; 1778} 1779static inline u32 fb_mmu_fault_status_valid_clear_v(void) 1780{ 1781 return 0x00000001U; 1782} 1783static inline u32 fb_mmu_fault_status_valid_clear_f(void) 1784{ 1785 return 0x80000000U; 1786} 1787static inline u32 fb_mmu_num_active_ltcs_r(void) 1788{ 1789 return 0x00100ec0U; 1790} 1791static inline u32 fb_mmu_num_active_ltcs_count_f(u32 v) 1792{ 1793 return (v & 0x1fU) << 0U; 1794} 1795static inline u32 fb_mmu_num_active_ltcs_count_v(u32 r) 1796{ 1797 return (r >> 0U) & 0x1fU; 1798} 1799static inline u32 fb_mmu_cbc_base_r(void) 1800{ 1801 return 0x00100ec4U; 1802} 1803static inline u32 fb_mmu_cbc_base_address_f(u32 v) 1804{ 1805 return (v & 0x3ffffffU) << 0U; 1806} 1807static inline u32 fb_mmu_cbc_base_address_v(u32 r) 1808{ 1809 return (r >> 0U) & 0x3ffffffU; 1810} 1811static inline u32 fb_mmu_cbc_base_address_alignment_shift_v(void) 1812{ 1813 return 0x0000000bU; 1814} 1815static inline u32 fb_mmu_cbc_top_r(void) 1816{ 1817 return 0x00100ec8U; 1818} 1819static inline u32 fb_mmu_cbc_top_size_f(u32 v) 1820{ 1821 return (v & 0x7fffU) << 0U; 1822} 1823static inline u32 fb_mmu_cbc_top_size_v(u32 r) 1824{ 1825 return (r >> 0U) & 0x7fffU; 1826} 1827static inline u32 fb_mmu_cbc_top_size_alignment_shift_v(void) 1828{ 1829 return 0x0000000bU; 1830} 1831static inline u32 fb_mmu_cbc_max_r(void) 1832{ 1833 return 0x00100eccU; 1834} 1835static inline u32 fb_mmu_cbc_max_comptagline_f(u32 v) 1836{ 1837 return (v & 0xffffffU) << 0U; 1838} 1839static inline u32 fb_mmu_cbc_max_comptagline_v(u32 r) 1840{ 1841 return (r >> 0U) & 0xffffffU; 1842} 1843static inline u32 fb_mmu_cbc_max_safe_f(u32 v) 1844{ 1845 return (v & 0x1U) << 30U; 1846} 1847static inline u32 fb_mmu_cbc_max_safe_true_v(void) 1848{ 1849 return 0x00000001U; 1850} 1851static inline u32 fb_mmu_cbc_max_safe_false_v(void) 1852{ 1853 return 0x00000000U; 1854} 1855static inline u32 fb_mmu_cbc_max_unsafe_fault_f(u32 v) 1856{ 1857 return (v & 0x1U) << 31U; 1858} 1859static inline u32 fb_mmu_cbc_max_unsafe_fault_enabled_v(void) 1860{ 1861 return 0x00000000U; 1862} 1863static inline u32 fb_mmu_cbc_max_unsafe_fault_disabled_v(void) 1864{ 1865 return 0x00000001U; 1866} 1867#endif
diff --git a/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h b/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h
deleted file mode 100644
index 9ec30bf..0000000
--- a/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h
+++ /dev/null
@@ -1,667 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fifo_gv11b_h_ 57#define _hw_fifo_gv11b_h_ 58 59static inline u32 fifo_bar1_base_r(void) 60{ 61 return 0x00002254U; 62} 63static inline u32 fifo_bar1_base_ptr_f(u32 v) 64{ 65 return (v & 0xfffffffU) << 0U; 66} 67static inline u32 fifo_bar1_base_ptr_align_shift_v(void) 68{ 69 return 0x0000000cU; 70} 71static inline u32 fifo_bar1_base_valid_false_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 fifo_bar1_base_valid_true_f(void) 76{ 77 return 0x10000000U; 78} 79static inline u32 fifo_userd_writeback_r(void) 80{ 81 return 0x0000225cU; 82} 83static inline u32 fifo_userd_writeback_timer_f(u32 v) 84{ 85 return (v & 0xffU) << 0U; 86} 87static inline u32 fifo_userd_writeback_timer_disabled_v(void) 88{ 89 return 0x00000000U; 90} 91static inline u32 fifo_userd_writeback_timer_shorter_v(void) 92{ 93 return 0x00000003U; 94} 95static inline u32 fifo_userd_writeback_timer_100us_v(void) 96{ 97 return 0x00000064U; 98} 99static inline u32 fifo_userd_writeback_timescale_f(u32 v) 100{ 101 return (v & 0xfU) << 12U; 102} 103static inline u32 fifo_userd_writeback_timescale_0_v(void) 104{ 105 return 0x00000000U; 106} 107static inline u32 fifo_runlist_base_r(void) 108{ 109 return 0x00002270U; 110} 111static inline u32 fifo_runlist_base_ptr_f(u32 v) 112{ 113 return (v & 0xfffffffU) << 0U; 114} 115static inline u32 fifo_runlist_base_target_vid_mem_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) 120{ 121 return 0x20000000U; 122} 123static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) 124{ 125 return 0x30000000U; 126} 127static inline u32 fifo_runlist_r(void) 128{ 129 return 0x00002274U; 130} 131static inline u32 fifo_runlist_engine_f(u32 v) 132{ 133 return (v & 0xfU) << 20U; 134} 135static inline u32 fifo_eng_runlist_base_r(u32 i) 136{ 137 return 0x00002280U + i*8U; 138} 139static inline u32 fifo_eng_runlist_base__size_1_v(void) 140{ 141 return 0x00000002U; 142} 143static inline u32 fifo_eng_runlist_r(u32 i) 144{ 145 return 0x00002284U + i*8U; 146} 147static inline u32 fifo_eng_runlist__size_1_v(void) 148{ 149 return 0x00000002U; 150} 151static inline u32 fifo_eng_runlist_length_f(u32 v) 152{ 153 return (v & 0xffffU) << 0U; 154} 155static inline u32 fifo_eng_runlist_length_max_v(void) 156{ 157 return 0x0000ffffU; 158} 159static inline u32 fifo_eng_runlist_pending_true_f(void) 160{ 161 return 0x100000U; 162} 163static inline u32 fifo_pb_timeslice_r(u32 i) 164{ 165 return 0x00002350U + i*4U; 166} 167static inline u32 fifo_pb_timeslice_timeout_16_f(void) 168{ 169 return 0x10U; 170} 171static inline u32 fifo_pb_timeslice_timescale_0_f(void) 172{ 173 return 0x0U; 174} 175static inline u32 fifo_pb_timeslice_enable_true_f(void) 176{ 177 return 0x10000000U; 178} 179static inline u32 fifo_pbdma_map_r(u32 i) 180{ 181 return 0x00002390U + i*4U; 182} 183static inline u32 fifo_intr_0_r(void) 184{ 185 return 0x00002100U; 186} 187static inline u32 fifo_intr_0_bind_error_pending_f(void) 188{ 189 return 0x1U; 190} 191static inline u32 fifo_intr_0_bind_error_reset_f(void) 192{ 193 return 0x1U; 194} 195static inline u32 fifo_intr_0_sched_error_pending_f(void) 196{ 197 return 0x100U; 198} 199static inline u32 fifo_intr_0_sched_error_reset_f(void) 200{ 201 return 0x100U; 202} 203static inline u32 fifo_intr_0_chsw_error_pending_f(void) 204{ 205 return 0x10000U; 206} 207static inline u32 fifo_intr_0_chsw_error_reset_f(void) 208{ 209 return 0x10000U; 210} 211static inline u32 fifo_intr_0_memop_timeout_pending_f(void) 212{ 213 return 0x800000U; 214} 215static inline u32 fifo_intr_0_memop_timeout_reset_f(void) 216{ 217 return 0x800000U; 218} 219static inline u32 fifo_intr_0_lb_error_pending_f(void) 220{ 221 return 0x1000000U; 222} 223static inline u32 fifo_intr_0_lb_error_reset_f(void) 224{ 225 return 0x1000000U; 226} 227static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) 228{ 229 return 0x20000000U; 230} 231static inline u32 fifo_intr_0_runlist_event_pending_f(void) 232{ 233 return 0x40000000U; 234} 235static inline u32 fifo_intr_0_channel_intr_pending_f(void) 236{ 237 return 0x80000000U; 238} 239static inline u32 fifo_intr_0_ctxsw_timeout_pending_f(void) 240{ 241 return 0x2U; 242} 243static inline u32 fifo_intr_en_0_r(void) 244{ 245 return 0x00002140U; 246} 247static inline u32 fifo_intr_en_0_sched_error_f(u32 v) 248{ 249 return (v & 0x1U) << 8U; 250} 251static inline u32 fifo_intr_en_0_sched_error_m(void) 252{ 253 return 0x1U << 8U; 254} 255static inline u32 fifo_intr_en_0_ctxsw_timeout_pending_f(void) 256{ 257 return 0x2U; 258} 259static inline u32 fifo_intr_en_1_r(void) 260{ 261 return 0x00002528U; 262} 263static inline u32 fifo_intr_bind_error_r(void) 264{ 265 return 0x0000252cU; 266} 267static inline u32 fifo_intr_sched_error_r(void) 268{ 269 return 0x0000254cU; 270} 271static inline u32 fifo_intr_sched_error_code_f(u32 v) 272{ 273 return (v & 0xffU) << 0U; 274} 275static inline u32 fifo_intr_chsw_error_r(void) 276{ 277 return 0x0000256cU; 278} 279static inline u32 fifo_intr_ctxsw_timeout_r(void) 280{ 281 return 0x00002a30U; 282} 283static inline u32 fifo_intr_ctxsw_timeout_engine_f(u32 v, u32 i) 284{ 285 return (v & 0x1U) << (0U + i*1U); 286} 287static inline u32 fifo_intr_ctxsw_timeout_engine_v(u32 r, u32 i) 288{ 289 return (r >> (0U + i*1U)) & 0x1U; 290} 291static inline u32 fifo_intr_ctxsw_timeout_engine__size_1_v(void) 292{ 293 return 0x00000020U; 294} 295static inline u32 fifo_intr_ctxsw_timeout_engine_pending_v(void) 296{ 297 return 0x00000001U; 298} 299static inline u32 fifo_intr_ctxsw_timeout_engine_pending_f(u32 i) 300{ 301 return 0x1U << (0U + i*1U); 302} 303static inline u32 fifo_intr_ctxsw_timeout_info_r(u32 i) 304{ 305 return 0x00003200U + i*4U; 306} 307static inline u32 fifo_intr_ctxsw_timeout_info__size_1_v(void) 308{ 309 return 0x00000004U; 310} 311static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_v(u32 r) 312{ 313 return (r >> 14U) & 0x3U; 314} 315static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v(void) 316{ 317 return 0x00000001U; 318} 319static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v(void) 320{ 321 return 0x00000002U; 322} 323static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v(void) 324{ 325 return 0x00000003U; 326} 327static inline u32 fifo_intr_ctxsw_timeout_info_prev_tsgid_v(u32 r) 328{ 329 return (r >> 0U) & 0x3fffU; 330} 331static inline u32 fifo_intr_ctxsw_timeout_info_next_tsgid_v(u32 r) 332{ 333 return (r >> 16U) & 0x3fffU; 334} 335static inline u32 fifo_intr_ctxsw_timeout_info_status_v(u32 r) 336{ 337 return (r >> 30U) & 0x3U; 338} 339static inline u32 fifo_intr_ctxsw_timeout_info_status_awaiting_ack_v(void) 340{ 341 return 0x00000000U; 342} 343static inline u32 fifo_intr_ctxsw_timeout_info_status_eng_was_reset_v(void) 344{ 345 return 0x00000001U; 346} 347static inline u32 fifo_intr_ctxsw_timeout_info_status_ack_received_v(void) 348{ 349 return 0x00000002U; 350} 351static inline u32 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v(void) 352{ 353 return 0x00000003U; 354} 355static inline u32 fifo_intr_pbdma_id_r(void) 356{ 357 return 0x000025a0U; 358} 359static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) 360{ 361 return (v & 0x1U) << (0U + i*1U); 362} 363static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) 364{ 365 return (r >> (0U + i*1U)) & 0x1U; 366} 367static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) 368{ 369 return 0x00000003U; 370} 371static inline u32 fifo_intr_runlist_r(void) 372{ 373 return 0x00002a00U; 374} 375static inline u32 fifo_fb_timeout_r(void) 376{ 377 return 0x00002a04U; 378} 379static inline u32 fifo_fb_timeout_period_m(void) 380{ 381 return 0x3fffffffU << 0U; 382} 383static inline u32 fifo_fb_timeout_period_max_f(void) 384{ 385 return 0x3fffffffU; 386} 387static inline u32 fifo_fb_timeout_period_init_f(void) 388{ 389 return 0x3c00U; 390} 391static inline u32 fifo_fb_timeout_detection_m(void) 392{ 393 return 0x1U << 31U; 394} 395static inline u32 fifo_fb_timeout_detection_enabled_f(void) 396{ 397 return 0x80000000U; 398} 399static inline u32 fifo_fb_timeout_detection_disabled_f(void) 400{ 401 return 0x0U; 402} 403static inline u32 fifo_sched_disable_r(void) 404{ 405 return 0x00002630U; 406} 407static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) 408{ 409 return (v & 0x1U) << (0U + i*1U); 410} 411static inline u32 fifo_sched_disable_runlist_m(u32 i) 412{ 413 return 0x1U << (0U + i*1U); 414} 415static inline u32 fifo_sched_disable_true_v(void) 416{ 417 return 0x00000001U; 418} 419static inline u32 fifo_runlist_preempt_r(void) 420{ 421 return 0x00002638U; 422} 423static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i) 424{ 425 return (v & 0x1U) << (0U + i*1U); 426} 427static inline u32 fifo_runlist_preempt_runlist_m(u32 i) 428{ 429 return 0x1U << (0U + i*1U); 430} 431static inline u32 fifo_runlist_preempt_runlist_pending_v(void) 432{ 433 return 0x00000001U; 434} 435static inline u32 fifo_preempt_r(void) 436{ 437 return 0x00002634U; 438} 439static inline u32 fifo_preempt_pending_true_f(void) 440{ 441 return 0x100000U; 442} 443static inline u32 fifo_preempt_type_channel_f(void) 444{ 445 return 0x0U; 446} 447static inline u32 fifo_preempt_type_tsg_f(void) 448{ 449 return 0x1000000U; 450} 451static inline u32 fifo_preempt_chid_f(u32 v) 452{ 453 return (v & 0xfffU) << 0U; 454} 455static inline u32 fifo_preempt_id_f(u32 v) 456{ 457 return (v & 0xfffU) << 0U; 458} 459static inline u32 fifo_engine_status_r(u32 i) 460{ 461 return 0x00002640U + i*8U; 462} 463static inline u32 fifo_engine_status__size_1_v(void) 464{ 465 return 0x00000004U; 466} 467static inline u32 fifo_engine_status_id_v(u32 r) 468{ 469 return (r >> 0U) & 0xfffU; 470} 471static inline u32 fifo_engine_status_id_type_v(u32 r) 472{ 473 return (r >> 12U) & 0x1U; 474} 475static inline u32 fifo_engine_status_id_type_chid_v(void) 476{ 477 return 0x00000000U; 478} 479static inline u32 fifo_engine_status_id_type_tsgid_v(void) 480{ 481 return 0x00000001U; 482} 483static inline u32 fifo_engine_status_ctx_status_v(u32 r) 484{ 485 return (r >> 13U) & 0x7U; 486} 487static inline u32 fifo_engine_status_ctx_status_valid_v(void) 488{ 489 return 0x00000001U; 490} 491static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) 492{ 493 return 0x00000005U; 494} 495static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) 496{ 497 return 0x00000006U; 498} 499static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) 500{ 501 return 0x00000007U; 502} 503static inline u32 fifo_engine_status_next_id_v(u32 r) 504{ 505 return (r >> 16U) & 0xfffU; 506} 507static inline u32 fifo_engine_status_next_id_type_v(u32 r) 508{ 509 return (r >> 28U) & 0x1U; 510} 511static inline u32 fifo_engine_status_next_id_type_chid_v(void) 512{ 513 return 0x00000000U; 514} 515static inline u32 fifo_engine_status_eng_reload_v(u32 r) 516{ 517 return (r >> 29U) & 0x1U; 518} 519static inline u32 fifo_engine_status_faulted_v(u32 r) 520{ 521 return (r >> 30U) & 0x1U; 522} 523static inline u32 fifo_engine_status_faulted_true_v(void) 524{ 525 return 0x00000001U; 526} 527static inline u32 fifo_engine_status_engine_v(u32 r) 528{ 529 return (r >> 31U) & 0x1U; 530} 531static inline u32 fifo_engine_status_engine_idle_v(void) 532{ 533 return 0x00000000U; 534} 535static inline u32 fifo_engine_status_engine_busy_v(void) 536{ 537 return 0x00000001U; 538} 539static inline u32 fifo_engine_status_ctxsw_v(u32 r) 540{ 541 return (r >> 15U) & 0x1U; 542} 543static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) 544{ 545 return 0x00000001U; 546} 547static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) 548{ 549 return 0x8000U; 550} 551static inline u32 fifo_eng_ctxsw_timeout_r(void) 552{ 553 return 0x00002a0cU; 554} 555static inline u32 fifo_eng_ctxsw_timeout_period_f(u32 v) 556{ 557 return (v & 0x7fffffffU) << 0U; 558} 559static inline u32 fifo_eng_ctxsw_timeout_period_m(void) 560{ 561 return 0x7fffffffU << 0U; 562} 563static inline u32 fifo_eng_ctxsw_timeout_period_v(u32 r) 564{ 565 return (r >> 0U) & 0x7fffffffU; 566} 567static inline u32 fifo_eng_ctxsw_timeout_period_init_f(void) 568{ 569 return 0x3fffffU; 570} 571static inline u32 fifo_eng_ctxsw_timeout_period_max_f(void) 572{ 573 return 0x7fffffffU; 574} 575static inline u32 fifo_eng_ctxsw_timeout_detection_f(u32 v) 576{ 577 return (v & 0x1U) << 31U; 578} 579static inline u32 fifo_eng_ctxsw_timeout_detection_m(void) 580{ 581 return 0x1U << 31U; 582} 583static inline u32 fifo_eng_ctxsw_timeout_detection_enabled_f(void) 584{ 585 return 0x80000000U; 586} 587static inline u32 fifo_eng_ctxsw_timeout_detection_disabled_f(void) 588{ 589 return 0x0U; 590} 591static inline u32 fifo_pbdma_status_r(u32 i) 592{ 593 return 0x00003080U + i*4U; 594} 595static inline u32 fifo_pbdma_status__size_1_v(void) 596{ 597 return 0x00000003U; 598} 599static inline u32 fifo_pbdma_status_id_v(u32 r) 600{ 601 return (r >> 0U) & 0xfffU; 602} 603static inline u32 fifo_pbdma_status_id_type_v(u32 r) 604{ 605 return (r >> 12U) & 0x1U; 606} 607static inline u32 fifo_pbdma_status_id_type_chid_v(void) 608{ 609 return 0x00000000U; 610} 611static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) 612{ 613 return 0x00000001U; 614} 615static inline u32 fifo_pbdma_status_chan_status_v(u32 r) 616{ 617 return (r >> 13U) & 0x7U; 618} 619static inline u32 fifo_pbdma_status_chan_status_valid_v(void) 620{ 621 return 0x00000001U; 622} 623static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) 624{ 625 return 0x00000005U; 626} 627static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) 628{ 629 return 0x00000006U; 630} 631static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) 632{ 633 return 0x00000007U; 634} 635static inline u32 fifo_pbdma_status_next_id_v(u32 r) 636{ 637 return (r >> 16U) & 0xfffU; 638} 639static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) 640{ 641 return (r >> 28U) & 0x1U; 642} 643static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) 644{ 645 return 0x00000000U; 646} 647static inline u32 fifo_pbdma_status_chsw_v(u32 r) 648{ 649 return (r >> 15U) & 0x1U; 650} 651static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) 652{ 653 return 0x00000001U; 654} 655static inline u32 fifo_cfg0_r(void) 656{ 657 return 0x00002004U; 658} 659static inline u32 fifo_cfg0_num_pbdma_v(u32 r) 660{ 661 return (r >> 0U) & 0xffU; 662} 663static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r) 664{ 665 return (r >> 16U) & 0xffU; 666} 667#endif
diff --git a/include/nvgpu/hw/gv11b/hw_flush_gv11b.h b/include/nvgpu/hw/gv11b/hw_flush_gv11b.h
deleted file mode 100644
index 45c01de..0000000
--- a/include/nvgpu/hw/gv11b/hw_flush_gv11b.h
+++ /dev/null
@@ -1,187 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_flush_gv11b_h_ 57#define _hw_flush_gv11b_h_ 58 59static inline u32 flush_l2_system_invalidate_r(void) 60{ 61 return 0x00070004U; 62} 63static inline u32 flush_l2_system_invalidate_pending_v(u32 r) 64{ 65 return (r >> 0U) & 0x1U; 66} 67static inline u32 flush_l2_system_invalidate_pending_busy_v(void) 68{ 69 return 0x00000001U; 70} 71static inline u32 flush_l2_system_invalidate_pending_busy_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) 76{ 77 return (r >> 1U) & 0x1U; 78} 79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) 80{ 81 return 0x00000001U; 82} 83static inline u32 flush_l2_flush_dirty_r(void) 84{ 85 return 0x00070010U; 86} 87static inline u32 flush_l2_flush_dirty_pending_v(u32 r) 88{ 89 return (r >> 0U) & 0x1U; 90} 91static inline u32 flush_l2_flush_dirty_pending_empty_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 flush_l2_flush_dirty_pending_empty_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 flush_l2_flush_dirty_pending_busy_v(void) 100{ 101 return 0x00000001U; 102} 103static inline u32 flush_l2_flush_dirty_pending_busy_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) 112{ 113 return 0x00000000U; 114} 115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) 116{ 117 return 0x0U; 118} 119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) 120{ 121 return 0x00000001U; 122} 123static inline u32 flush_l2_clean_comptags_r(void) 124{ 125 return 0x0007000cU; 126} 127static inline u32 flush_l2_clean_comptags_pending_v(u32 r) 128{ 129 return (r >> 0U) & 0x1U; 130} 131static inline u32 flush_l2_clean_comptags_pending_empty_v(void) 132{ 133 return 0x00000000U; 134} 135static inline u32 flush_l2_clean_comptags_pending_empty_f(void) 136{ 137 return 0x0U; 138} 139static inline u32 flush_l2_clean_comptags_pending_busy_v(void) 140{ 141 return 0x00000001U; 142} 143static inline u32 flush_l2_clean_comptags_pending_busy_f(void) 144{ 145 return 0x1U; 146} 147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) 148{ 149 return (r >> 1U) & 0x1U; 150} 151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) 152{ 153 return 0x00000000U; 154} 155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 flush_fb_flush_r(void) 164{ 165 return 0x00070000U; 166} 167static inline u32 flush_fb_flush_pending_v(u32 r) 168{ 169 return (r >> 0U) & 0x1U; 170} 171static inline u32 flush_fb_flush_pending_busy_v(void) 172{ 173 return 0x00000001U; 174} 175static inline u32 flush_fb_flush_pending_busy_f(void) 176{ 177 return 0x1U; 178} 179static inline u32 flush_fb_flush_outstanding_v(u32 r) 180{ 181 return (r >> 1U) & 0x1U; 182} 183static inline u32 flush_fb_flush_outstanding_true_v(void) 184{ 185 return 0x00000001U; 186} 187#endif
diff --git a/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h b/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h
deleted file mode 100644
index 9395da3..0000000
--- a/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h
+++ /dev/null
@@ -1,155 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_fuse_gv11b_h_ 57#define _hw_fuse_gv11b_h_ 58 59static inline u32 fuse_status_opt_gpc_r(void) 60{ 61 return 0x00021c1cU; 62} 63static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) 64{ 65 return 0x00021c38U + i*4U; 66} 67static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) 68{ 69 return 0x00021838U + i*4U; 70} 71static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) 72{ 73 return 0x00021944U; 74} 75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) 76{ 77 return (v & 0xffU) << 0U; 78} 79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) 80{ 81 return 0xffU << 0U; 82} 83static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) 84{ 85 return (r >> 0U) & 0xffU; 86} 87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) 88{ 89 return 0x00021948U; 90} 91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) 92{ 93 return (v & 0x1U) << 0U; 94} 95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) 96{ 97 return 0x1U << 0U; 98} 99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) 104{ 105 return 0x1U; 106} 107static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) 108{ 109 return 0x0U; 110} 111static inline u32 fuse_status_opt_fbio_r(void) 112{ 113 return 0x00021c14U; 114} 115static inline u32 fuse_status_opt_fbio_data_f(u32 v) 116{ 117 return (v & 0xffffU) << 0U; 118} 119static inline u32 fuse_status_opt_fbio_data_m(void) 120{ 121 return 0xffffU << 0U; 122} 123static inline u32 fuse_status_opt_fbio_data_v(u32 r) 124{ 125 return (r >> 0U) & 0xffffU; 126} 127static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) 128{ 129 return 0x00021d70U + i*4U; 130} 131static inline u32 fuse_status_opt_fbp_r(void) 132{ 133 return 0x00021d38U; 134} 135static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) 136{ 137 return (r >> (0U + i*1U)) & 0x1U; 138} 139static inline u32 fuse_opt_ecc_en_r(void) 140{ 141 return 0x00021228U; 142} 143static inline u32 fuse_opt_feature_fuses_override_disable_r(void) 144{ 145 return 0x000213f0U; 146} 147static inline u32 fuse_opt_sec_debug_en_r(void) 148{ 149 return 0x00021218U; 150} 151static inline u32 fuse_opt_priv_sec_en_r(void) 152{ 153 return 0x00021434U; 154} 155#endif
diff --git a/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h b/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h
deleted file mode 100644
index 922dd68..0000000
--- a/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h
+++ /dev/null
@@ -1,571 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gmmu_gv11b_h_ 57#define _hw_gmmu_gv11b_h_ 58 59static inline u32 gmmu_new_pde_is_pte_w(void) 60{ 61 return 0U; 62} 63static inline u32 gmmu_new_pde_is_pte_false_f(void) 64{ 65 return 0x0U; 66} 67static inline u32 gmmu_new_pde_aperture_w(void) 68{ 69 return 0U; 70} 71static inline u32 gmmu_new_pde_aperture_invalid_f(void) 72{ 73 return 0x0U; 74} 75static inline u32 gmmu_new_pde_aperture_video_memory_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void) 80{ 81 return 0x4U; 82} 83static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void) 84{ 85 return 0x6U; 86} 87static inline u32 gmmu_new_pde_address_sys_f(u32 v) 88{ 89 return (v & 0xffffffU) << 8U; 90} 91static inline u32 gmmu_new_pde_address_sys_w(void) 92{ 93 return 0U; 94} 95static inline u32 gmmu_new_pde_vol_w(void) 96{ 97 return 0U; 98} 99static inline u32 gmmu_new_pde_vol_true_f(void) 100{ 101 return 0x8U; 102} 103static inline u32 gmmu_new_pde_vol_false_f(void) 104{ 105 return 0x0U; 106} 107static inline u32 gmmu_new_pde_address_shift_v(void) 108{ 109 return 0x0000000cU; 110} 111static inline u32 gmmu_new_pde__size_v(void) 112{ 113 return 0x00000008U; 114} 115static inline u32 gmmu_new_dual_pde_is_pte_w(void) 116{ 117 return 0U; 118} 119static inline u32 gmmu_new_dual_pde_is_pte_false_f(void) 120{ 121 return 0x0U; 122} 123static inline u32 gmmu_new_dual_pde_aperture_big_w(void) 124{ 125 return 0U; 126} 127static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void) 128{ 129 return 0x0U; 130} 131static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void) 132{ 133 return 0x2U; 134} 135static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void) 136{ 137 return 0x4U; 138} 139static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void) 140{ 141 return 0x6U; 142} 143static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v) 144{ 145 return (v & 0xfffffffU) << 4U; 146} 147static inline u32 gmmu_new_dual_pde_address_big_sys_w(void) 148{ 149 return 0U; 150} 151static inline u32 gmmu_new_dual_pde_aperture_small_w(void) 152{ 153 return 2U; 154} 155static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void) 160{ 161 return 0x2U; 162} 163static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void) 164{ 165 return 0x4U; 166} 167static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void) 168{ 169 return 0x6U; 170} 171static inline u32 gmmu_new_dual_pde_vol_small_w(void) 172{ 173 return 2U; 174} 175static inline u32 gmmu_new_dual_pde_vol_small_true_f(void) 176{ 177 return 0x8U; 178} 179static inline u32 gmmu_new_dual_pde_vol_small_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 gmmu_new_dual_pde_vol_big_w(void) 184{ 185 return 0U; 186} 187static inline u32 gmmu_new_dual_pde_vol_big_true_f(void) 188{ 189 return 0x8U; 190} 191static inline u32 gmmu_new_dual_pde_vol_big_false_f(void) 192{ 193 return 0x0U; 194} 195static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v) 196{ 197 return (v & 0xffffffU) << 8U; 198} 199static inline u32 gmmu_new_dual_pde_address_small_sys_w(void) 200{ 201 return 2U; 202} 203static inline u32 gmmu_new_dual_pde_address_shift_v(void) 204{ 205 return 0x0000000cU; 206} 207static inline u32 gmmu_new_dual_pde_address_big_shift_v(void) 208{ 209 return 0x00000008U; 210} 211static inline u32 gmmu_new_dual_pde__size_v(void) 212{ 213 return 0x00000010U; 214} 215static inline u32 gmmu_new_pte__size_v(void) 216{ 217 return 0x00000008U; 218} 219static inline u32 gmmu_new_pte_valid_w(void) 220{ 221 return 0U; 222} 223static inline u32 gmmu_new_pte_valid_true_f(void) 224{ 225 return 0x1U; 226} 227static inline u32 gmmu_new_pte_valid_false_f(void) 228{ 229 return 0x0U; 230} 231static inline u32 gmmu_new_pte_privilege_w(void) 232{ 233 return 0U; 234} 235static inline u32 gmmu_new_pte_privilege_true_f(void) 236{ 237 return 0x20U; 238} 239static inline u32 gmmu_new_pte_privilege_false_f(void) 240{ 241 return 0x0U; 242} 243static inline u32 gmmu_new_pte_address_sys_f(u32 v) 244{ 245 return (v & 0xffffffU) << 8U; 246} 247static inline u32 gmmu_new_pte_address_sys_w(void) 248{ 249 return 0U; 250} 251static inline u32 gmmu_new_pte_address_vid_f(u32 v) 252{ 253 return (v & 0xffffffU) << 8U; 254} 255static inline u32 gmmu_new_pte_address_vid_w(void) 256{ 257 return 0U; 258} 259static inline u32 gmmu_new_pte_vol_w(void) 260{ 261 return 0U; 262} 263static inline u32 gmmu_new_pte_vol_true_f(void) 264{ 265 return 0x8U; 266} 267static inline u32 gmmu_new_pte_vol_false_f(void) 268{ 269 return 0x0U; 270} 271static inline u32 gmmu_new_pte_aperture_w(void) 272{ 273 return 0U; 274} 275static inline u32 gmmu_new_pte_aperture_video_memory_f(void) 276{ 277 return 0x0U; 278} 279static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void) 280{ 281 return 0x4U; 282} 283static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void) 284{ 285 return 0x6U; 286} 287static inline u32 gmmu_new_pte_read_only_w(void) 288{ 289 return 0U; 290} 291static inline u32 gmmu_new_pte_read_only_true_f(void) 292{ 293 return 0x40U; 294} 295static inline u32 gmmu_new_pte_comptagline_f(u32 v) 296{ 297 return (v & 0x3ffffU) << 4U; 298} 299static inline u32 gmmu_new_pte_comptagline_w(void) 300{ 301 return 1U; 302} 303static inline u32 gmmu_new_pte_kind_f(u32 v) 304{ 305 return (v & 0xffU) << 24U; 306} 307static inline u32 gmmu_new_pte_kind_w(void) 308{ 309 return 1U; 310} 311static inline u32 gmmu_new_pte_address_shift_v(void) 312{ 313 return 0x0000000cU; 314} 315static inline u32 gmmu_pte_kind_f(u32 v) 316{ 317 return (v & 0xffU) << 4U; 318} 319static inline u32 gmmu_pte_kind_w(void) 320{ 321 return 1U; 322} 323static inline u32 gmmu_pte_kind_invalid_v(void) 324{ 325 return 0x000000ffU; 326} 327static inline u32 gmmu_pte_kind_pitch_v(void) 328{ 329 return 0x00000000U; 330} 331static inline u32 gmmu_fault_client_type_gpc_v(void) 332{ 333 return 0x00000000U; 334} 335static inline u32 gmmu_fault_client_type_hub_v(void) 336{ 337 return 0x00000001U; 338} 339static inline u32 gmmu_fault_type_unbound_inst_block_v(void) 340{ 341 return 0x00000004U; 342} 343static inline u32 gmmu_fault_type_pte_v(void) 344{ 345 return 0x00000002U; 346} 347static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void) 348{ 349 return 0x00000005U; 350} 351static inline u32 gmmu_fault_mmu_eng_id_physical_v(void) 352{ 353 return 0x0000001fU; 354} 355static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void) 356{ 357 return 0x0000000fU; 358} 359static inline u32 gmmu_fault_buf_size_v(void) 360{ 361 return 0x00000020U; 362} 363static inline u32 gmmu_fault_buf_entry_inst_aperture_v(u32 r) 364{ 365 return (r >> 8U) & 0x3U; 366} 367static inline u32 gmmu_fault_buf_entry_inst_aperture_w(void) 368{ 369 return 0U; 370} 371static inline u32 gmmu_fault_buf_entry_inst_aperture_vid_mem_v(void) 372{ 373 return 0x00000000U; 374} 375static inline u32 gmmu_fault_buf_entry_inst_aperture_sys_coh_v(void) 376{ 377 return 0x00000002U; 378} 379static inline u32 gmmu_fault_buf_entry_inst_aperture_sys_nocoh_v(void) 380{ 381 return 0x00000003U; 382} 383static inline u32 gmmu_fault_buf_entry_inst_lo_f(u32 v) 384{ 385 return (v & 0xfffffU) << 12U; 386} 387static inline u32 gmmu_fault_buf_entry_inst_lo_v(u32 r) 388{ 389 return (r >> 12U) & 0xfffffU; 390} 391static inline u32 gmmu_fault_buf_entry_inst_lo_b(void) 392{ 393 return 12U; 394} 395static inline u32 gmmu_fault_buf_entry_inst_lo_w(void) 396{ 397 return 0U; 398} 399static inline u32 gmmu_fault_buf_entry_inst_hi_v(u32 r) 400{ 401 return (r >> 0U) & 0xffffffffU; 402} 403static inline u32 gmmu_fault_buf_entry_inst_hi_w(void) 404{ 405 return 1U; 406} 407static inline u32 gmmu_fault_buf_entry_addr_phys_aperture_v(u32 r) 408{ 409 return (r >> 0U) & 0x3U; 410} 411static inline u32 gmmu_fault_buf_entry_addr_phys_aperture_w(void) 412{ 413 return 2U; 414} 415static inline u32 gmmu_fault_buf_entry_addr_lo_f(u32 v) 416{ 417 return (v & 0xfffffU) << 12U; 418} 419static inline u32 gmmu_fault_buf_entry_addr_lo_v(u32 r) 420{ 421 return (r >> 12U) & 0xfffffU; 422} 423static inline u32 gmmu_fault_buf_entry_addr_lo_b(void) 424{ 425 return 12U; 426} 427static inline u32 gmmu_fault_buf_entry_addr_lo_w(void) 428{ 429 return 2U; 430} 431static inline u32 gmmu_fault_buf_entry_addr_hi_v(u32 r) 432{ 433 return (r >> 0U) & 0xffffffffU; 434} 435static inline u32 gmmu_fault_buf_entry_addr_hi_w(void) 436{ 437 return 3U; 438} 439static inline u32 gmmu_fault_buf_entry_timestamp_lo_v(u32 r) 440{ 441 return (r >> 0U) & 0xffffffffU; 442} 443static inline u32 gmmu_fault_buf_entry_timestamp_lo_w(void) 444{ 445 return 4U; 446} 447static inline u32 gmmu_fault_buf_entry_timestamp_hi_v(u32 r) 448{ 449 return (r >> 0U) & 0xffffffffU; 450} 451static inline u32 gmmu_fault_buf_entry_timestamp_hi_w(void) 452{ 453 return 5U; 454} 455static inline u32 gmmu_fault_buf_entry_engine_id_v(u32 r) 456{ 457 return (r >> 0U) & 0x1ffU; 458} 459static inline u32 gmmu_fault_buf_entry_engine_id_w(void) 460{ 461 return 6U; 462} 463static inline u32 gmmu_fault_buf_entry_fault_type_v(u32 r) 464{ 465 return (r >> 0U) & 0x1fU; 466} 467static inline u32 gmmu_fault_buf_entry_fault_type_w(void) 468{ 469 return 7U; 470} 471static inline u32 gmmu_fault_buf_entry_replayable_fault_v(u32 r) 472{ 473 return (r >> 7U) & 0x1U; 474} 475static inline u32 gmmu_fault_buf_entry_replayable_fault_w(void) 476{ 477 return 7U; 478} 479static inline u32 gmmu_fault_buf_entry_replayable_fault_true_v(void) 480{ 481 return 0x00000001U; 482} 483static inline u32 gmmu_fault_buf_entry_replayable_fault_true_f(void) 484{ 485 return 0x80U; 486} 487static inline u32 gmmu_fault_buf_entry_client_v(u32 r) 488{ 489 return (r >> 8U) & 0x7fU; 490} 491static inline u32 gmmu_fault_buf_entry_client_w(void) 492{ 493 return 7U; 494} 495static inline u32 gmmu_fault_buf_entry_access_type_v(u32 r) 496{ 497 return (r >> 16U) & 0xfU; 498} 499static inline u32 gmmu_fault_buf_entry_access_type_w(void) 500{ 501 return 7U; 502} 503static inline u32 gmmu_fault_buf_entry_mmu_client_type_v(u32 r) 504{ 505 return (r >> 20U) & 0x1U; 506} 507static inline u32 gmmu_fault_buf_entry_mmu_client_type_w(void) 508{ 509 return 7U; 510} 511static inline u32 gmmu_fault_buf_entry_gpc_id_v(u32 r) 512{ 513 return (r >> 24U) & 0x1fU; 514} 515static inline u32 gmmu_fault_buf_entry_gpc_id_w(void) 516{ 517 return 7U; 518} 519static inline u32 gmmu_fault_buf_entry_protected_mode_v(u32 r) 520{ 521 return (r >> 29U) & 0x1U; 522} 523static inline u32 gmmu_fault_buf_entry_protected_mode_w(void) 524{ 525 return 7U; 526} 527static inline u32 gmmu_fault_buf_entry_protected_mode_true_v(void) 528{ 529 return 0x00000001U; 530} 531static inline u32 gmmu_fault_buf_entry_protected_mode_true_f(void) 532{ 533 return 0x20000000U; 534} 535static inline u32 gmmu_fault_buf_entry_replayable_fault_en_v(u32 r) 536{ 537 return (r >> 30U) & 0x1U; 538} 539static inline u32 gmmu_fault_buf_entry_replayable_fault_en_w(void) 540{ 541 return 7U; 542} 543static inline u32 gmmu_fault_buf_entry_replayable_fault_en_true_v(void) 544{ 545 return 0x00000001U; 546} 547static inline u32 gmmu_fault_buf_entry_replayable_fault_en_true_f(void) 548{ 549 return 0x40000000U; 550} 551static inline u32 gmmu_fault_buf_entry_valid_m(void) 552{ 553 return 0x1U << 31U; 554} 555static inline u32 gmmu_fault_buf_entry_valid_v(u32 r) 556{ 557 return (r >> 31U) & 0x1U; 558} 559static inline u32 gmmu_fault_buf_entry_valid_w(void) 560{ 561 return 7U; 562} 563static inline u32 gmmu_fault_buf_entry_valid_true_v(void) 564{ 565 return 0x00000001U; 566} 567static inline u32 gmmu_fault_buf_entry_valid_true_f(void) 568{ 569 return 0x80000000U; 570} 571#endif
diff --git a/include/nvgpu/hw/gv11b/hw_gr_gv11b.h b/include/nvgpu/hw/gv11b/hw_gr_gv11b.h
deleted file mode 100644
index 4a3da79..0000000
--- a/include/nvgpu/hw/gv11b/hw_gr_gv11b.h
+++ /dev/null
@@ -1,5703 +0,0 @@ 1/* 2 * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_gr_gv11b_h_ 57#define _hw_gr_gv11b_h_ 58 59static inline u32 gr_intr_r(void) 60{ 61 return 0x00400100U; 62} 63static inline u32 gr_intr_notify_pending_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 gr_intr_notify_reset_f(void) 68{ 69 return 0x1U; 70} 71static inline u32 gr_intr_semaphore_pending_f(void) 72{ 73 return 0x2U; 74} 75static inline u32 gr_intr_semaphore_reset_f(void) 76{ 77 return 0x2U; 78} 79static inline u32 gr_intr_illegal_method_pending_f(void) 80{ 81 return 0x10U; 82} 83static inline u32 gr_intr_illegal_method_reset_f(void) 84{ 85 return 0x10U; 86} 87static inline u32 gr_intr_illegal_notify_pending_f(void) 88{ 89 return 0x40U; 90} 91static inline u32 gr_intr_illegal_notify_reset_f(void) 92{ 93 return 0x40U; 94} 95static inline u32 gr_intr_firmware_method_f(u32 v) 96{ 97 return (v & 0x1U) << 8U; 98} 99static inline u32 gr_intr_firmware_method_pending_f(void) 100{ 101 return 0x100U; 102} 103static inline u32 gr_intr_firmware_method_reset_f(void) 104{ 105 return 0x100U; 106} 107static inline u32 gr_intr_illegal_class_pending_f(void) 108{ 109 return 0x20U; 110} 111static inline u32 gr_intr_illegal_class_reset_f(void) 112{ 113 return 0x20U; 114} 115static inline u32 gr_intr_fecs_error_pending_f(void) 116{ 117 return 0x80000U; 118} 119static inline u32 gr_intr_fecs_error_reset_f(void) 120{ 121 return 0x80000U; 122} 123static inline u32 gr_intr_class_error_pending_f(void) 124{ 125 return 0x100000U; 126} 127static inline u32 gr_intr_class_error_reset_f(void) 128{ 129 return 0x100000U; 130} 131static inline u32 gr_intr_exception_pending_f(void) 132{ 133 return 0x200000U; 134} 135static inline u32 gr_intr_exception_reset_f(void) 136{ 137 return 0x200000U; 138} 139static inline u32 gr_fecs_intr_r(void) 140{ 141 return 0x00400144U; 142} 143static inline u32 gr_class_error_r(void) 144{ 145 return 0x00400110U; 146} 147static inline u32 gr_class_error_code_v(u32 r) 148{ 149 return (r >> 0U) & 0xffffU; 150} 151static inline u32 gr_intr_nonstall_r(void) 152{ 153 return 0x00400120U; 154} 155static inline u32 gr_intr_nonstall_trap_pending_f(void) 156{ 157 return 0x2U; 158} 159static inline u32 gr_intr_en_r(void) 160{ 161 return 0x0040013cU; 162} 163static inline u32 gr_exception_r(void) 164{ 165 return 0x00400108U; 166} 167static inline u32 gr_exception_fe_m(void) 168{ 169 return 0x1U << 0U; 170} 171static inline u32 gr_exception_gpc_m(void) 172{ 173 return 0x1U << 24U; 174} 175static inline u32 gr_exception_memfmt_m(void) 176{ 177 return 0x1U << 1U; 178} 179static inline u32 gr_exception_ds_m(void) 180{ 181 return 0x1U << 4U; 182} 183static inline u32 gr_exception_sked_m(void) 184{ 185 return 0x1U << 8U; 186} 187static inline u32 gr_exception_pd_m(void) 188{ 189 return 0x1U << 2U; 190} 191static inline u32 gr_exception_scc_m(void) 192{ 193 return 0x1U << 3U; 194} 195static inline u32 gr_exception_ssync_m(void) 196{ 197 return 0x1U << 5U; 198} 199static inline u32 gr_exception_mme_m(void) 200{ 201 return 0x1U << 7U; 202} 203static inline u32 gr_exception1_r(void) 204{ 205 return 0x00400118U; 206} 207static inline u32 gr_exception1_gpc_0_pending_f(void) 208{ 209 return 0x1U; 210} 211static inline u32 gr_exception2_r(void) 212{ 213 return 0x0040011cU; 214} 215static inline u32 gr_exception_en_r(void) 216{ 217 return 0x00400138U; 218} 219static inline u32 gr_exception_en_fe_m(void) 220{ 221 return 0x1U << 0U; 222} 223static inline u32 gr_exception_en_fe_enabled_f(void) 224{ 225 return 0x1U; 226} 227static inline u32 gr_exception_en_gpc_m(void) 228{ 229 return 0x1U << 24U; 230} 231static inline u32 gr_exception_en_gpc_enabled_f(void) 232{ 233 return 0x1000000U; 234} 235static inline u32 gr_exception_en_memfmt_m(void) 236{ 237 return 0x1U << 1U; 238} 239static inline u32 gr_exception_en_memfmt_enabled_f(void) 240{ 241 return 0x2U; 242} 243static inline u32 gr_exception_en_ds_m(void) 244{ 245 return 0x1U << 4U; 246} 247static inline u32 gr_exception_en_ds_enabled_f(void) 248{ 249 return 0x10U; 250} 251static inline u32 gr_exception_en_pd_m(void) 252{ 253 return 0x1U << 2U; 254} 255static inline u32 gr_exception_en_pd_enabled_f(void) 256{ 257 return 0x4U; 258} 259static inline u32 gr_exception_en_scc_m(void) 260{ 261 return 0x1U << 3U; 262} 263static inline u32 gr_exception_en_scc_enabled_f(void) 264{ 265 return 0x8U; 266} 267static inline u32 gr_exception_en_ssync_m(void) 268{ 269 return 0x1U << 5U; 270} 271static inline u32 gr_exception_en_ssync_enabled_f(void) 272{ 273 return 0x20U; 274} 275static inline u32 gr_exception_en_mme_m(void) 276{ 277 return 0x1U << 7U; 278} 279static inline u32 gr_exception_en_mme_enabled_f(void) 280{ 281 return 0x80U; 282} 283static inline u32 gr_exception_en_sked_m(void) 284{ 285 return 0x1U << 8U; 286} 287static inline u32 gr_exception_en_sked_enabled_f(void) 288{ 289 return 0x100U; 290} 291static inline u32 gr_exception1_en_r(void) 292{ 293 return 0x00400130U; 294} 295static inline u32 gr_exception2_en_r(void) 296{ 297 return 0x00400134U; 298} 299static inline u32 gr_gpfifo_ctl_r(void) 300{ 301 return 0x00400500U; 302} 303static inline u32 gr_gpfifo_ctl_access_f(u32 v) 304{ 305 return (v & 0x1U) << 0U; 306} 307static inline u32 gr_gpfifo_ctl_access_disabled_f(void) 308{ 309 return 0x0U; 310} 311static inline u32 gr_gpfifo_ctl_access_enabled_f(void) 312{ 313 return 0x1U; 314} 315static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) 316{ 317 return (v & 0x1U) << 16U; 318} 319static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) 320{ 321 return 0x00000001U; 322} 323static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) 324{ 325 return 0x10000U; 326} 327static inline u32 gr_gpfifo_status_r(void) 328{ 329 return 0x00400504U; 330} 331static inline u32 gr_trapped_addr_r(void) 332{ 333 return 0x00400704U; 334} 335static inline u32 gr_trapped_addr_mthd_v(u32 r) 336{ 337 return (r >> 2U) & 0xfffU; 338} 339static inline u32 gr_trapped_addr_subch_v(u32 r) 340{ 341 return (r >> 16U) & 0x7U; 342} 343static inline u32 gr_trapped_addr_mme_generated_v(u32 r) 344{ 345 return (r >> 20U) & 0x1U; 346} 347static inline u32 gr_trapped_addr_datahigh_v(u32 r) 348{ 349 return (r >> 24U) & 0x1U; 350} 351static inline u32 gr_trapped_addr_priv_v(u32 r) 352{ 353 return (r >> 28U) & 0x1U; 354} 355static inline u32 gr_trapped_addr_status_v(u32 r) 356{ 357 return (r >> 31U) & 0x1U; 358} 359static inline u32 gr_trapped_data_lo_r(void) 360{ 361 return 0x00400708U; 362} 363static inline u32 gr_trapped_data_hi_r(void) 364{ 365 return 0x0040070cU; 366} 367static inline u32 gr_trapped_data_mme_r(void) 368{ 369 return 0x00400710U; 370} 371static inline u32 gr_trapped_data_mme_pc_v(u32 r) 372{ 373 return (r >> 0U) & 0xfffU; 374} 375static inline u32 gr_status_r(void) 376{ 377 return 0x00400700U; 378} 379static inline u32 gr_status_fe_method_upper_v(u32 r) 380{ 381 return (r >> 1U) & 0x1U; 382} 383static inline u32 gr_status_fe_method_lower_v(u32 r) 384{ 385 return (r >> 2U) & 0x1U; 386} 387static inline u32 gr_status_fe_method_lower_idle_v(void) 388{ 389 return 0x00000000U; 390} 391static inline u32 gr_status_fe_gi_v(u32 r) 392{ 393 return (r >> 21U) & 0x1U; 394} 395static inline u32 gr_status_mask_r(void) 396{ 397 return 0x00400610U; 398} 399static inline u32 gr_status_1_r(void) 400{ 401 return 0x00400604U; 402} 403static inline u32 gr_status_2_r(void) 404{ 405 return 0x00400608U; 406} 407static inline u32 gr_engine_status_r(void) 408{ 409 return 0x0040060cU; 410} 411static inline u32 gr_engine_status_value_busy_f(void) 412{ 413 return 0x1U; 414} 415static inline u32 gr_pri_be0_becs_be_exception_r(void) 416{ 417 return 0x00410204U; 418} 419static inline u32 gr_pri_be0_becs_be_exception_en_r(void) 420{ 421 return 0x00410208U; 422} 423static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) 424{ 425 return 0x00502c90U; 426} 427static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) 428{ 429 return 0x00502c94U; 430} 431static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) 432{ 433 return 0x00504508U; 434} 435static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 436{ 437 return 0x0050450cU; 438} 439static inline u32 gr_activity_0_r(void) 440{ 441 return 0x00400380U; 442} 443static inline u32 gr_activity_1_r(void) 444{ 445 return 0x00400384U; 446} 447static inline u32 gr_activity_2_r(void) 448{ 449 return 0x00400388U; 450} 451static inline u32 gr_activity_4_r(void) 452{ 453 return 0x00400390U; 454} 455static inline u32 gr_activity_4_gpc0_s(void) 456{ 457 return 3U; 458} 459static inline u32 gr_activity_4_gpc0_f(u32 v) 460{ 461 return (v & 0x7U) << 0U; 462} 463static inline u32 gr_activity_4_gpc0_m(void) 464{ 465 return 0x7U << 0U; 466} 467static inline u32 gr_activity_4_gpc0_v(u32 r) 468{ 469 return (r >> 0U) & 0x7U; 470} 471static inline u32 gr_activity_4_gpc0_empty_v(void) 472{ 473 return 0x00000000U; 474} 475static inline u32 gr_activity_4_gpc0_preempted_v(void) 476{ 477 return 0x00000004U; 478} 479static inline u32 gr_pri_gpc0_gcc_dbg_r(void) 480{ 481 return 0x00501000U; 482} 483static inline u32 gr_pri_gpcs_gcc_dbg_r(void) 484{ 485 return 0x00419000U; 486} 487static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) 488{ 489 return 0x1U << 1U; 490} 491static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) 492{ 493 return 0x0050433cU; 494} 495static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) 496{ 497 return 0x00419b3cU; 498} 499static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) 500{ 501 return 0x1U << 0U; 502} 503static inline u32 gr_pri_sked_activity_r(void) 504{ 505 return 0x00407054U; 506} 507static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) 508{ 509 return 0x00502c80U; 510} 511static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) 512{ 513 return 0x00502c84U; 514} 515static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) 516{ 517 return 0x00502c88U; 518} 519static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) 520{ 521 return 0x00502c8cU; 522} 523static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) 524{ 525 return 0x00504500U; 526} 527static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) 528{ 529 return 0x00504d00U; 530} 531static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) 532{ 533 return 0x00501d00U; 534} 535static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) 536{ 537 return 0x0041ac80U; 538} 539static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) 540{ 541 return 0x0041ac84U; 542} 543static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) 544{ 545 return 0x0041ac88U; 546} 547static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) 548{ 549 return 0x0041ac8cU; 550} 551static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) 552{ 553 return 0x0041c500U; 554} 555static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) 556{ 557 return 0x0041cd00U; 558} 559static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) 560{ 561 return 0x00419d00U; 562} 563static inline u32 gr_pri_be0_becs_be_activity0_r(void) 564{ 565 return 0x00410200U; 566} 567static inline u32 gr_pri_be1_becs_be_activity0_r(void) 568{ 569 return 0x00410600U; 570} 571static inline u32 gr_pri_bes_becs_be_activity0_r(void) 572{ 573 return 0x00408a00U; 574} 575static inline u32 gr_pri_ds_mpipe_status_r(void) 576{ 577 return 0x00405858U; 578} 579static inline u32 gr_pri_fe_go_idle_info_r(void) 580{ 581 return 0x00404194U; 582} 583static inline u32 gr_pri_fe_chip_def_info_r(void) 584{ 585 return 0x00404030U; 586} 587static inline u32 gr_pri_fe_chip_def_info_max_veid_count_v(u32 r) 588{ 589 return (r >> 0U) & 0xfffU; 590} 591static inline u32 gr_pri_fe_chip_def_info_max_veid_count_init_v(void) 592{ 593 return 0x00000040U; 594} 595static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) 596{ 597 return 0x00504238U; 598} 599static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void) 600{ 601 return 0x00504358U; 602} 603static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m(void) 604{ 605 return 0x1U << 0U; 606} 607static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m(void) 608{ 609 return 0x1U << 1U; 610} 611static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m(void) 612{ 613 return 0x1U << 2U; 614} 615static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m(void) 616{ 617 return 0x1U << 3U; 618} 619static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m(void) 620{ 621 return 0x1U << 4U; 622} 623static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m(void) 624{ 625 return 0x1U << 5U; 626} 627static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m(void) 628{ 629 return 0x1U << 6U; 630} 631static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m(void) 632{ 633 return 0x1U << 7U; 634} 635static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m(void) 636{ 637 return 0x1U << 8U; 638} 639static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m(void) 640{ 641 return 0x1U << 9U; 642} 643static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m(void) 644{ 645 return 0x1U << 10U; 646} 647static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m(void) 648{ 649 return 0x1U << 11U; 650} 651static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m(void) 652{ 653 return 0x1U << 12U; 654} 655static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m(void) 656{ 657 return 0x1U << 13U; 658} 659static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m(void) 660{ 661 return 0x1U << 14U; 662} 663static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m(void) 664{ 665 return 0x1U << 15U; 666} 667static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 668{ 669 return (r >> 24U) & 0x1U; 670} 671static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 672{ 673 return (r >> 26U) & 0x1U; 674} 675static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f(void) 676{ 677 return 0x40000000U; 678} 679static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r(void) 680{ 681 return 0x0050435cU; 682} 683static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s(void) 684{ 685 return 16U; 686} 687static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(u32 r) 688{ 689 return (r >> 0U) & 0xffffU; 690} 691static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r(void) 692{ 693 return 0x00504360U; 694} 695static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s(void) 696{ 697 return 16U; 698} 699static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(u32 r) 700{ 701 return (r >> 0U) & 0xffffU; 702} 703static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r(void) 704{ 705 return 0x0050436cU; 706} 707static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m(void) 708{ 709 return 0x1U << 0U; 710} 711static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m(void) 712{ 713 return 0x1U << 1U; 714} 715static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m(void) 716{ 717 return 0x1U << 2U; 718} 719static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m(void) 720{ 721 return 0x1U << 3U; 722} 723static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 724{ 725 return (r >> 8U) & 0x1U; 726} 727static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 728{ 729 return (r >> 10U) & 0x1U; 730} 731static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f(void) 732{ 733 return 0x40000000U; 734} 735static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r(void) 736{ 737 return 0x00504370U; 738} 739static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s(void) 740{ 741 return 16U; 742} 743static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(u32 r) 744{ 745 return (r >> 0U) & 0xffffU; 746} 747static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r(void) 748{ 749 return 0x00504374U; 750} 751static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s(void) 752{ 753 return 16U; 754} 755static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(u32 r) 756{ 757 return (r >> 0U) & 0xffffU; 758} 759static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_r(void) 760{ 761 return 0x0050464cU; 762} 763static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_data_m(void) 764{ 765 return 0x1U << 0U; 766} 767static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_predecode_m(void) 768{ 769 return 0x1U << 1U; 770} 771static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_data_m(void) 772{ 773 return 0x1U << 2U; 774} 775static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_predecode_m(void) 776{ 777 return 0x1U << 3U; 778} 779static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_data_m(void) 780{ 781 return 0x1U << 4U; 782} 783static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_predecode_m(void) 784{ 785 return 0x1U << 5U; 786} 787static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_data_m(void) 788{ 789 return 0x1U << 6U; 790} 791static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_predecode_m(void) 792{ 793 return 0x1U << 7U; 794} 795static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 796{ 797 return (r >> 16U) & 0x1U; 798} 799static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 800{ 801 return (r >> 18U) & 0x1U; 802} 803static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_reset_task_f(void) 804{ 805 return 0x40000000U; 806} 807static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r(void) 808{ 809 return 0x00504650U; 810} 811static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s(void) 812{ 813 return 16U; 814} 815static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_v(u32 r) 816{ 817 return (r >> 0U) & 0xffffU; 818} 819static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r(void) 820{ 821 return 0x00504654U; 822} 823static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s(void) 824{ 825 return 16U; 826} 827static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_v(u32 r) 828{ 829 return (r >> 0U) & 0xffffU; 830} 831static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r(void) 832{ 833 return 0x00504624U; 834} 835static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m(void) 836{ 837 return 0x1U << 0U; 838} 839static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_1_m(void) 840{ 841 return 0x1U << 1U; 842} 843static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m(void) 844{ 845 return 0x1U << 2U; 846} 847static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_1_m(void) 848{ 849 return 0x1U << 3U; 850} 851static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_pixrpf_m(void) 852{ 853 return 0x1U << 4U; 854} 855static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_miss_fifo_m(void) 856{ 857 return 0x1U << 5U; 858} 859static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_pixrpf_m(void) 860{ 861 return 0x1U << 6U; 862} 863static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_miss_fifo_m(void) 864{ 865 return 0x1U << 7U; 866} 867static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 868{ 869 return (r >> 8U) & 0x1U; 870} 871static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 872{ 873 return (r >> 10U) & 0x1U; 874} 875static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_reset_task_f(void) 876{ 877 return 0x40000000U; 878} 879static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r(void) 880{ 881 return 0x00504628U; 882} 883static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s(void) 884{ 885 return 16U; 886} 887static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_v(u32 r) 888{ 889 return (r >> 0U) & 0xffffU; 890} 891static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r(void) 892{ 893 return 0x0050462cU; 894} 895static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s(void) 896{ 897 return 16U; 898} 899static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_v(u32 r) 900{ 901 return (r >> 0U) & 0xffffU; 902} 903static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r(void) 904{ 905 return 0x00504638U; 906} 907static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m(void) 908{ 909 return 0x1U << 0U; 910} 911static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m(void) 912{ 913 return 0x1U << 1U; 914} 915static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m(void) 916{ 917 return 0x1U << 2U; 918} 919static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m(void) 920{ 921 return 0x1U << 3U; 922} 923static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m(void) 924{ 925 return 0x1U << 4U; 926} 927static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m(void) 928{ 929 return 0x1U << 5U; 930} 931static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m(void) 932{ 933 return 0x1U << 6U; 934} 935static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m(void) 936{ 937 return 0x1U << 7U; 938} 939static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 940{ 941 return (r >> 16U) & 0x1U; 942} 943static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 944{ 945 return (r >> 18U) & 0x1U; 946} 947static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f(void) 948{ 949 return 0x40000000U; 950} 951static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r(void) 952{ 953 return 0x0050463cU; 954} 955static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s(void) 956{ 957 return 16U; 958} 959static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(u32 r) 960{ 961 return (r >> 0U) & 0xffffU; 962} 963static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r(void) 964{ 965 return 0x00504640U; 966} 967static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s(void) 968{ 969 return 16U; 970} 971static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(u32 r) 972{ 973 return (r >> 0U) & 0xffffU; 974} 975static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_r(void) 976{ 977 return 0x00419b54U; 978} 979static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp0_f(u32 v) 980{ 981 return (v & 0x1U) << 0U; 982} 983static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp0_task_f(void) 984{ 985 return 0x1U; 986} 987static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp1_f(u32 v) 988{ 989 return (v & 0x1U) << 1U; 990} 991static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp1_task_f(void) 992{ 993 return 0x2U; 994} 995static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp2_f(u32 v) 996{ 997 return (v & 0x1U) << 2U; 998} 999static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp2_task_f(void) 1000{ 1001 return 0x4U; 1002} 1003static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp3_f(u32 v) 1004{ 1005 return (v & 0x1U) << 3U; 1006} 1007static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp3_task_f(void) 1008{ 1009 return 0x8U; 1010} 1011static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp4_f(u32 v) 1012{ 1013 return (v & 0x1U) << 4U; 1014} 1015static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp4_task_f(void) 1016{ 1017 return 0x10U; 1018} 1019static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp5_f(u32 v) 1020{ 1021 return (v & 0x1U) << 5U; 1022} 1023static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp5_task_f(void) 1024{ 1025 return 0x20U; 1026} 1027static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp6_f(u32 v) 1028{ 1029 return (v & 0x1U) << 6U; 1030} 1031static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp6_task_f(void) 1032{ 1033 return 0x40U; 1034} 1035static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp7_f(u32 v) 1036{ 1037 return (v & 0x1U) << 7U; 1038} 1039static inline u32 gr_pri_gpcs_tpcs_sm_lrf_ecc_control_scrub_qrfdp7_task_f(void) 1040{ 1041 return 0x80U; 1042} 1043static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_r(void) 1044{ 1045 return 0x00504354U; 1046} 1047static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp0_f(u32 v) 1048{ 1049 return (v & 0x1U) << 0U; 1050} 1051static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp0_init_f(void) 1052{ 1053 return 0x0U; 1054} 1055static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp1_f(u32 v) 1056{ 1057 return (v & 0x1U) << 1U; 1058} 1059static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp1_init_f(void) 1060{ 1061 return 0x0U; 1062} 1063static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp2_f(u32 v) 1064{ 1065 return (v & 0x1U) << 2U; 1066} 1067static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp2_init_f(void) 1068{ 1069 return 0x0U; 1070} 1071static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp3_f(u32 v) 1072{ 1073 return (v & 0x1U) << 3U; 1074} 1075static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp3_init_f(void) 1076{ 1077 return 0x0U; 1078} 1079static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp4_f(u32 v) 1080{ 1081 return (v & 0x1U) << 4U; 1082} 1083static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp4_init_f(void) 1084{ 1085 return 0x0U; 1086} 1087static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp5_f(u32 v) 1088{ 1089 return (v & 0x1U) << 5U; 1090} 1091static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp5_init_f(void) 1092{ 1093 return 0x0U; 1094} 1095static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp6_f(u32 v) 1096{ 1097 return (v & 0x1U) << 6U; 1098} 1099static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp6_init_f(void) 1100{ 1101 return 0x0U; 1102} 1103static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp7_f(u32 v) 1104{ 1105 return (v & 0x1U) << 7U; 1106} 1107static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp7_init_f(void) 1108{ 1109 return 0x0U; 1110} 1111static inline u32 gr_pri_gpcs_tpcs_sm_l1_data_ecc_control_r(void) 1112{ 1113 return 0x00419b68U; 1114} 1115static inline u32 gr_pri_gpcs_tpcs_sm_l1_data_ecc_control_scrub_el1_0_f(u32 v) 1116{ 1117 return (v & 0x1U) << 0U; 1118} 1119static inline u32 gr_pri_gpcs_tpcs_sm_l1_data_ecc_control_scrub_el1_0_task_f(void) 1120{ 1121 return 0x1U; 1122} 1123static inline u32 gr_pri_gpcs_tpcs_sm_l1_data_ecc_control_scrub_el1_1_f(u32 v) 1124{ 1125 return (v & 0x1U) << 1U; 1126} 1127static inline u32 gr_pri_gpcs_tpcs_sm_l1_data_ecc_control_scrub_el1_1_task_f(void) 1128{ 1129 return 0x2U; 1130} 1131static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_r(void) 1132{ 1133 return 0x00504368U; 1134} 1135static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_scrub_el1_0_f(u32 v) 1136{ 1137 return (v & 0x1U) << 0U; 1138} 1139static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_scrub_el1_0_init_f(void) 1140{ 1141 return 0x0U; 1142} 1143static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_scrub_el1_1_f(u32 v) 1144{ 1145 return (v & 0x1U) << 1U; 1146} 1147static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_scrub_el1_1_init_f(void) 1148{ 1149 return 0x0U; 1150} 1151static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_r(void) 1152{ 1153 return 0x00419e20U; 1154} 1155static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_el1_0_f(u32 v) 1156{ 1157 return (v & 0x1U) << 0U; 1158} 1159static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_el1_0_task_f(void) 1160{ 1161 return 0x1U; 1162} 1163static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_el1_1_f(u32 v) 1164{ 1165 return (v & 0x1U) << 1U; 1166} 1167static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_el1_1_task_f(void) 1168{ 1169 return 0x2U; 1170} 1171static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_pixprf_f(u32 v) 1172{ 1173 return (v & 0x1U) << 4U; 1174} 1175static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_pixprf_task_f(void) 1176{ 1177 return 0x10U; 1178} 1179static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_miss_fifo_f(u32 v) 1180{ 1181 return (v & 0x1U) << 5U; 1182} 1183static inline u32 gr_pri_gpcs_tpcs_sm_l1_tag_ecc_control_scrub_miss_fifo_task_f(void) 1184{ 1185 return 0x20U; 1186} 1187static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r(void) 1188{ 1189 return 0x00504620U; 1190} 1191static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_el1_0_f(u32 v) 1192{ 1193 return (v & 0x1U) << 0U; 1194} 1195static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_el1_0_init_f(void) 1196{ 1197 return 0x0U; 1198} 1199static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_el1_1_f(u32 v) 1200{ 1201 return (v & 0x1U) << 1U; 1202} 1203static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_el1_1_init_f(void) 1204{ 1205 return 0x0U; 1206} 1207static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_pixprf_f(u32 v) 1208{ 1209 return (v & 0x1U) << 4U; 1210} 1211static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_pixprf_init_f(void) 1212{ 1213 return 0x0U; 1214} 1215static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_miss_fifo_f(u32 v) 1216{ 1217 return (v & 0x1U) << 5U; 1218} 1219static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_miss_fifo_init_f(void) 1220{ 1221 return 0x0U; 1222} 1223static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_r(void) 1224{ 1225 return 0x00419e34U; 1226} 1227static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_warp_sm0_f(u32 v) 1228{ 1229 return (v & 0x1U) << 0U; 1230} 1231static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_warp_sm0_task_f(void) 1232{ 1233 return 0x1U; 1234} 1235static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_warp_sm1_f(u32 v) 1236{ 1237 return (v & 0x1U) << 1U; 1238} 1239static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_warp_sm1_task_f(void) 1240{ 1241 return 0x2U; 1242} 1243static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_barrier_sm0_f(u32 v) 1244{ 1245 return (v & 0x1U) << 2U; 1246} 1247static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_barrier_sm0_task_f(void) 1248{ 1249 return 0x4U; 1250} 1251static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_barrier_sm1_f(u32 v) 1252{ 1253 return (v & 0x1U) << 3U; 1254} 1255static inline u32 gr_pri_gpcs_tpcs_sm_cbu_ecc_control_scrub_barrier_sm1_task_f(void) 1256{ 1257 return 0x8U; 1258} 1259static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_r(void) 1260{ 1261 return 0x00504634U; 1262} 1263static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_warp_sm0_f(u32 v) 1264{ 1265 return (v & 0x1U) << 0U; 1266} 1267static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_warp_sm0_init_f(void) 1268{ 1269 return 0x0U; 1270} 1271static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_warp_sm1_f(u32 v) 1272{ 1273 return (v & 0x1U) << 1U; 1274} 1275static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_warp_sm1_init_f(void) 1276{ 1277 return 0x0U; 1278} 1279static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_barrier_sm0_f(u32 v) 1280{ 1281 return (v & 0x1U) << 2U; 1282} 1283static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_barrier_sm0_init_f(void) 1284{ 1285 return 0x0U; 1286} 1287static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_barrier_sm1_f(u32 v) 1288{ 1289 return (v & 0x1U) << 3U; 1290} 1291static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_barrier_sm1_init_f(void) 1292{ 1293 return 0x0U; 1294} 1295static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_r(void) 1296{ 1297 return 0x00419e48U; 1298} 1299static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l0_data_f(u32 v) 1300{ 1301 return (v & 0x1U) << 0U; 1302} 1303static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l0_data_task_f(void) 1304{ 1305 return 0x1U; 1306} 1307static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l0_predecode_f(u32 v) 1308{ 1309 return (v & 0x1U) << 1U; 1310} 1311static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l0_predecode_task_f(void) 1312{ 1313 return 0x2U; 1314} 1315static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l1_data_f(u32 v) 1316{ 1317 return (v & 0x1U) << 2U; 1318} 1319static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l1_data_task_f(void) 1320{ 1321 return 0x4U; 1322} 1323static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l1_predecode_f(u32 v) 1324{ 1325 return (v & 0x1U) << 3U; 1326} 1327static inline u32 gr_pri_gpcs_tpcs_sm_icache_ecc_control_scrub_l1_predecode_task_f(void) 1328{ 1329 return 0x8U; 1330} 1331static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_r(void) 1332{ 1333 return 0x00504648U; 1334} 1335static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l0_data_f(u32 v) 1336{ 1337 return (v & 0x1U) << 0U; 1338} 1339static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l0_data_init_f(void) 1340{ 1341 return 0x0U; 1342} 1343static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l0_predecode_f(u32 v) 1344{ 1345 return (v & 0x1U) << 1U; 1346} 1347static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l0_predecode_init_f(void) 1348{ 1349 return 0x0U; 1350} 1351static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l1_data_f(u32 v) 1352{ 1353 return (v & 0x1U) << 2U; 1354} 1355static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l1_data_init_f(void) 1356{ 1357 return 0x0U; 1358} 1359static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l1_predecode_f(u32 v) 1360{ 1361 return (v & 0x1U) << 3U; 1362} 1363static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l1_predecode_init_f(void) 1364{ 1365 return 0x0U; 1366} 1367static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void) 1368{ 1369 return 0x005042c4U; 1370} 1371static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void) 1372{ 1373 return 0x0U; 1374} 1375static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void) 1376{ 1377 return 0x1U; 1378} 1379static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void) 1380{ 1381 return 0x2U; 1382} 1383static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void) 1384{ 1385 return 0x00504430U; 1386} 1387static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void) 1388{ 1389 return 0x40000000U; 1390} 1391static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void) 1392{ 1393 return 0x00504434U; 1394} 1395static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r) 1396{ 1397 return (r >> 0U) & 0x3fU; 1398} 1399static inline u32 gr_pri_be0_crop_status1_r(void) 1400{ 1401 return 0x00410134U; 1402} 1403static inline u32 gr_pri_bes_crop_status1_r(void) 1404{ 1405 return 0x00408934U; 1406} 1407static inline u32 gr_pri_be0_zrop_status_r(void) 1408{ 1409 return 0x00410048U; 1410} 1411static inline u32 gr_pri_be0_zrop_status2_r(void) 1412{ 1413 return 0x0041004cU; 1414} 1415static inline u32 gr_pri_bes_zrop_status_r(void) 1416{ 1417 return 0x00408848U; 1418} 1419static inline u32 gr_pri_bes_zrop_status2_r(void) 1420{ 1421 return 0x0040884cU; 1422} 1423static inline u32 gr_pipe_bundle_address_r(void) 1424{ 1425 return 0x00400200U; 1426} 1427static inline u32 gr_pipe_bundle_address_value_v(u32 r) 1428{ 1429 return (r >> 0U) & 0xffffU; 1430} 1431static inline u32 gr_pipe_bundle_address_veid_f(u32 v) 1432{ 1433 return (v & 0x3fU) << 20U; 1434} 1435static inline u32 gr_pipe_bundle_address_veid_w(void) 1436{ 1437 return 0U; 1438} 1439static inline u32 gr_pipe_bundle_data_r(void) 1440{ 1441 return 0x00400204U; 1442} 1443static inline u32 gr_pipe_bundle_config_r(void) 1444{ 1445 return 0x00400208U; 1446} 1447static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) 1448{ 1449 return 0x0U; 1450} 1451static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) 1452{ 1453 return 0x80000000U; 1454} 1455static inline u32 gr_fe_hww_esr_r(void) 1456{ 1457 return 0x00404000U; 1458} 1459static inline u32 gr_fe_hww_esr_reset_active_f(void) 1460{ 1461 return 0x40000000U; 1462} 1463static inline u32 gr_fe_hww_esr_en_enable_f(void) 1464{ 1465 return 0x80000000U; 1466} 1467static inline u32 gr_fe_hww_esr_info_r(void) 1468{ 1469 return 0x004041b0U; 1470} 1471static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(void) 1472{ 1473 return 0x00419eacU; 1474} 1475static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r(void) 1476{ 1477 return 0x0050472cU; 1478} 1479static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) 1480{ 1481 return 0x4U; 1482} 1483static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_int_report_f(void) 1484{ 1485 return 0x10U; 1486} 1487static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_pause_report_f(void) 1488{ 1489 return 0x20U; 1490} 1491static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_single_step_complete_report_f(void) 1492{ 1493 return 0x40U; 1494} 1495static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_error_in_trap_report_f(void) 1496{ 1497 return 0x100U; 1498} 1499static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_r(void) 1500{ 1501 return 0x00419eb4U; 1502} 1503static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_r(void) 1504{ 1505 return 0x00504734U; 1506} 1507static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_m(void) 1508{ 1509 return 0x1U << 4U; 1510} 1511static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f(void) 1512{ 1513 return 0x10U; 1514} 1515static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_m(void) 1516{ 1517 return 0x1U << 5U; 1518} 1519static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(void) 1520{ 1521 return 0x20U; 1522} 1523static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_m(void) 1524{ 1525 return 0x1U << 6U; 1526} 1527static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(void) 1528{ 1529 return 0x40U; 1530} 1531static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_m(void) 1532{ 1533 return 0x1U << 2U; 1534} 1535static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f(void) 1536{ 1537 return 0x4U; 1538} 1539static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_m(void) 1540{ 1541 return 0x1U << 8U; 1542} 1543static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_pending_f(void) 1544{ 1545 return 0x100U; 1546} 1547static inline u32 gr_fe_go_idle_timeout_r(void) 1548{ 1549 return 0x00404154U; 1550} 1551static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) 1552{ 1553 return (v & 0xffffffffU) << 0U; 1554} 1555static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) 1556{ 1557 return 0x0U; 1558} 1559static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) 1560{ 1561 return 0x1800U; 1562} 1563static inline u32 gr_fe_object_table_r(u32 i) 1564{ 1565 return 0x00404200U + i*4U; 1566} 1567static inline u32 gr_fe_object_table_nvclass_v(u32 r) 1568{ 1569 return (r >> 0U) & 0xffffU; 1570} 1571static inline u32 gr_fe_tpc_fs_r(u32 i) 1572{ 1573 return 0x0040a200U + i*4U; 1574} 1575static inline u32 gr_fe_tpc_pesmask_r(void) 1576{ 1577 return 0x0040a260U; 1578} 1579static inline u32 gr_fe_tpc_pesmask_pesid_f(u32 v) 1580{ 1581 return (v & 0x3fU) << 24U; 1582} 1583static inline u32 gr_fe_tpc_pesmask_gpcid_f(u32 v) 1584{ 1585 return (v & 0xffU) << 16U; 1586} 1587static inline u32 gr_fe_tpc_pesmask_action_m(void) 1588{ 1589 return 0x1U << 30U; 1590} 1591static inline u32 gr_fe_tpc_pesmask_action_write_f(void) 1592{ 1593 return 0x40000000U; 1594} 1595static inline u32 gr_fe_tpc_pesmask_action_read_f(void) 1596{ 1597 return 0x0U; 1598} 1599static inline u32 gr_fe_tpc_pesmask_req_m(void) 1600{ 1601 return 0x1U << 31U; 1602} 1603static inline u32 gr_fe_tpc_pesmask_req_send_f(void) 1604{ 1605 return 0x80000000U; 1606} 1607static inline u32 gr_fe_tpc_pesmask_mask_m(void) 1608{ 1609 return 0xffffU << 0U; 1610} 1611static inline u32 gr_pri_mme_shadow_raw_index_r(void) 1612{ 1613 return 0x00404488U; 1614} 1615static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) 1616{ 1617 return 0x80000000U; 1618} 1619static inline u32 gr_pri_mme_shadow_raw_data_r(void) 1620{ 1621 return 0x0040448cU; 1622} 1623static inline u32 gr_mme_hww_esr_r(void) 1624{ 1625 return 0x00404490U; 1626} 1627static inline u32 gr_mme_hww_esr_reset_active_f(void) 1628{ 1629 return 0x40000000U; 1630} 1631static inline u32 gr_mme_hww_esr_en_enable_f(void) 1632{ 1633 return 0x80000000U; 1634} 1635static inline u32 gr_mme_hww_esr_info_r(void) 1636{ 1637 return 0x00404494U; 1638} 1639static inline u32 gr_memfmt_hww_esr_r(void) 1640{ 1641 return 0x00404600U; 1642} 1643static inline u32 gr_memfmt_hww_esr_reset_active_f(void) 1644{ 1645 return 0x40000000U; 1646} 1647static inline u32 gr_memfmt_hww_esr_en_enable_f(void) 1648{ 1649 return 0x80000000U; 1650} 1651static inline u32 gr_fecs_cpuctl_r(void) 1652{ 1653 return 0x00409100U; 1654} 1655static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) 1656{ 1657 return (v & 0x1U) << 1U; 1658} 1659static inline u32 gr_fecs_cpuctl_alias_r(void) 1660{ 1661 return 0x00409130U; 1662} 1663static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) 1664{ 1665 return (v & 0x1U) << 1U; 1666} 1667static inline u32 gr_fecs_dmactl_r(void) 1668{ 1669 return 0x0040910cU; 1670} 1671static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) 1672{ 1673 return (v & 0x1U) << 0U; 1674} 1675static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) 1676{ 1677 return 0x1U << 1U; 1678} 1679static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) 1680{ 1681 return 0x1U << 2U; 1682} 1683static inline u32 gr_fecs_os_r(void) 1684{ 1685 return 0x00409080U; 1686} 1687static inline u32 gr_fecs_idlestate_r(void) 1688{ 1689 return 0x0040904cU; 1690} 1691static inline u32 gr_fecs_mailbox0_r(void) 1692{ 1693 return 0x00409040U; 1694} 1695static inline u32 gr_fecs_mailbox1_r(void) 1696{ 1697 return 0x00409044U; 1698} 1699static inline u32 gr_fecs_irqstat_r(void) 1700{ 1701 return 0x00409008U; 1702} 1703static inline u32 gr_fecs_irqmode_r(void) 1704{ 1705 return 0x0040900cU; 1706} 1707static inline u32 gr_fecs_irqmask_r(void) 1708{ 1709 return 0x00409018U; 1710} 1711static inline u32 gr_fecs_irqdest_r(void) 1712{ 1713 return 0x0040901cU; 1714} 1715static inline u32 gr_fecs_curctx_r(void) 1716{ 1717 return 0x00409050U; 1718} 1719static inline u32 gr_fecs_nxtctx_r(void) 1720{ 1721 return 0x00409054U; 1722} 1723static inline u32 gr_fecs_engctl_r(void) 1724{ 1725 return 0x004090a4U; 1726} 1727static inline u32 gr_fecs_debug1_r(void) 1728{ 1729 return 0x00409090U; 1730} 1731static inline u32 gr_fecs_debuginfo_r(void) 1732{ 1733 return 0x00409094U; 1734} 1735static inline u32 gr_fecs_icd_cmd_r(void) 1736{ 1737 return 0x00409200U; 1738} 1739static inline u32 gr_fecs_icd_cmd_opc_s(void) 1740{ 1741 return 4U; 1742} 1743static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) 1744{ 1745 return (v & 0xfU) << 0U; 1746} 1747static inline u32 gr_fecs_icd_cmd_opc_m(void) 1748{ 1749 return 0xfU << 0U; 1750} 1751static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) 1752{ 1753 return (r >> 0U) & 0xfU; 1754} 1755static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) 1756{ 1757 return 0x8U; 1758} 1759static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) 1760{ 1761 return 0xeU; 1762} 1763static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) 1764{ 1765 return (v & 0x1fU) << 8U; 1766} 1767static inline u32 gr_fecs_icd_rdata_r(void) 1768{ 1769 return 0x0040920cU; 1770} 1771static inline u32 gr_fecs_imemc_r(u32 i) 1772{ 1773 return 0x00409180U + i*16U; 1774} 1775static inline u32 gr_fecs_imemc_offs_f(u32 v) 1776{ 1777 return (v & 0x3fU) << 2U; 1778} 1779static inline u32 gr_fecs_imemc_blk_f(u32 v) 1780{ 1781 return (v & 0xffU) << 8U; 1782} 1783static inline u32 gr_fecs_imemc_aincw_f(u32 v) 1784{ 1785 return (v & 0x1U) << 24U; 1786} 1787static inline u32 gr_fecs_imemd_r(u32 i) 1788{ 1789 return 0x00409184U + i*16U; 1790} 1791static inline u32 gr_fecs_imemt_r(u32 i) 1792{ 1793 return 0x00409188U + i*16U; 1794} 1795static inline u32 gr_fecs_imemt_tag_f(u32 v) 1796{ 1797 return (v & 0xffffU) << 0U; 1798} 1799static inline u32 gr_fecs_dmemc_r(u32 i) 1800{ 1801 return 0x004091c0U + i*8U; 1802} 1803static inline u32 gr_fecs_dmemc_offs_s(void) 1804{ 1805 return 6U; 1806} 1807static inline u32 gr_fecs_dmemc_offs_f(u32 v) 1808{ 1809 return (v & 0x3fU) << 2U; 1810} 1811static inline u32 gr_fecs_dmemc_offs_m(void) 1812{ 1813 return 0x3fU << 2U; 1814} 1815static inline u32 gr_fecs_dmemc_offs_v(u32 r) 1816{ 1817 return (r >> 2U) & 0x3fU; 1818} 1819static inline u32 gr_fecs_dmemc_blk_f(u32 v) 1820{ 1821 return (v & 0xffU) << 8U; 1822} 1823static inline u32 gr_fecs_dmemc_aincw_f(u32 v) 1824{ 1825 return (v & 0x1U) << 24U; 1826} 1827static inline u32 gr_fecs_dmemd_r(u32 i) 1828{ 1829 return 0x004091c4U + i*8U; 1830} 1831static inline u32 gr_fecs_dmatrfbase_r(void) 1832{ 1833 return 0x00409110U; 1834} 1835static inline u32 gr_fecs_dmatrfmoffs_r(void) 1836{ 1837 return 0x00409114U; 1838} 1839static inline u32 gr_fecs_dmatrffboffs_r(void) 1840{ 1841 return 0x0040911cU; 1842} 1843static inline u32 gr_fecs_dmatrfcmd_r(void) 1844{ 1845 return 0x00409118U; 1846} 1847static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) 1848{ 1849 return (v & 0x1U) << 4U; 1850} 1851static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) 1852{ 1853 return (v & 0x1U) << 5U; 1854} 1855static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) 1856{ 1857 return (v & 0x7U) << 8U; 1858} 1859static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) 1860{ 1861 return (v & 0x7U) << 12U; 1862} 1863static inline u32 gr_fecs_bootvec_r(void) 1864{ 1865 return 0x00409104U; 1866} 1867static inline u32 gr_fecs_bootvec_vec_f(u32 v) 1868{ 1869 return (v & 0xffffffffU) << 0U; 1870} 1871static inline u32 gr_fecs_falcon_hwcfg_r(void) 1872{ 1873 return 0x00409108U; 1874} 1875static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) 1876{ 1877 return 0x0041a108U; 1878} 1879static inline u32 gr_fecs_falcon_rm_r(void) 1880{ 1881 return 0x00409084U; 1882} 1883static inline u32 gr_fecs_current_ctx_r(void) 1884{ 1885 return 0x00409b00U; 1886} 1887static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) 1888{ 1889 return (v & 0xfffffffU) << 0U; 1890} 1891static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) 1892{ 1893 return (r >> 0U) & 0xfffffffU; 1894} 1895static inline u32 gr_fecs_current_ctx_target_s(void) 1896{ 1897 return 2U; 1898} 1899static inline u32 gr_fecs_current_ctx_target_f(u32 v) 1900{ 1901 return (v & 0x3U) << 28U; 1902} 1903static inline u32 gr_fecs_current_ctx_target_m(void) 1904{ 1905 return 0x3U << 28U; 1906} 1907static inline u32 gr_fecs_current_ctx_target_v(u32 r) 1908{ 1909 return (r >> 28U) & 0x3U; 1910} 1911static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) 1912{ 1913 return 0x0U; 1914} 1915static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) 1916{ 1917 return 0x20000000U; 1918} 1919static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) 1920{ 1921 return 0x30000000U; 1922} 1923static inline u32 gr_fecs_current_ctx_valid_s(void) 1924{ 1925 return 1U; 1926} 1927static inline u32 gr_fecs_current_ctx_valid_f(u32 v) 1928{ 1929 return (v & 0x1U) << 31U; 1930} 1931static inline u32 gr_fecs_current_ctx_valid_m(void) 1932{ 1933 return 0x1U << 31U; 1934} 1935static inline u32 gr_fecs_current_ctx_valid_v(u32 r) 1936{ 1937 return (r >> 31U) & 0x1U; 1938} 1939static inline u32 gr_fecs_current_ctx_valid_false_f(void) 1940{ 1941 return 0x0U; 1942} 1943static inline u32 gr_fecs_method_data_r(void) 1944{ 1945 return 0x00409500U; 1946} 1947static inline u32 gr_fecs_method_push_r(void) 1948{ 1949 return 0x00409504U; 1950} 1951static inline u32 gr_fecs_method_push_adr_f(u32 v) 1952{ 1953 return (v & 0xfffU) << 0U; 1954} 1955static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) 1956{ 1957 return 0x00000003U; 1958} 1959static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) 1960{ 1961 return 0x3U; 1962} 1963static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) 1964{ 1965 return 0x00000010U; 1966} 1967static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) 1968{ 1969 return 0x00000009U; 1970} 1971static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) 1972{ 1973 return 0x00000015U; 1974} 1975static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) 1976{ 1977 return 0x00000016U; 1978} 1979static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) 1980{ 1981 return 0x00000025U; 1982} 1983static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) 1984{ 1985 return 0x00000030U; 1986} 1987static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) 1988{ 1989 return 0x00000031U; 1990} 1991static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) 1992{ 1993 return 0x00000032U; 1994} 1995static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) 1996{ 1997 return 0x00000038U; 1998} 1999static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) 2000{ 2001 return 0x00000039U; 2002} 2003static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) 2004{ 2005 return 0x21U; 2006} 2007static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void) 2008{ 2009 return 0x0000001aU; 2010} 2011static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) 2012{ 2013 return 0x00000004U; 2014} 2015static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void) 2016{ 2017 return 0x0000003aU; 2018} 2019static inline u32 gr_fecs_host_int_status_r(void) 2020{ 2021 return 0x00409c18U; 2022} 2023static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) 2024{ 2025 return (v & 0x1U) << 16U; 2026} 2027static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) 2028{ 2029 return (v & 0x1U) << 17U; 2030} 2031static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) 2032{ 2033 return (v & 0x1U) << 18U; 2034} 2035static inline u32 gr_fecs_host_int_status_watchdog_active_f(void) 2036{ 2037 return 0x80000U; 2038} 2039static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) 2040{ 2041 return (v & 0xffffU) << 0U; 2042} 2043static inline u32 gr_fecs_host_int_status_ecc_corrected_f(u32 v) 2044{ 2045 return (v & 0x1U) << 21U; 2046} 2047static inline u32 gr_fecs_host_int_status_ecc_corrected_m(void) 2048{ 2049 return 0x1U << 21U; 2050} 2051static inline u32 gr_fecs_host_int_status_ecc_uncorrected_f(u32 v) 2052{ 2053 return (v & 0x1U) << 22U; 2054} 2055static inline u32 gr_fecs_host_int_status_ecc_uncorrected_m(void) 2056{ 2057 return 0x1U << 22U; 2058} 2059static inline u32 gr_fecs_host_int_clear_r(void) 2060{ 2061 return 0x00409c20U; 2062} 2063static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) 2064{ 2065 return (v & 0x1U) << 1U; 2066} 2067static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) 2068{ 2069 return 0x2U; 2070} 2071static inline u32 gr_fecs_host_int_enable_r(void) 2072{ 2073 return 0x00409c24U; 2074} 2075static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) 2076{ 2077 return 0x2U; 2078} 2079static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) 2080{ 2081 return 0x10000U; 2082} 2083static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) 2084{ 2085 return 0x20000U; 2086} 2087static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) 2088{ 2089 return 0x40000U; 2090} 2091static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) 2092{ 2093 return 0x80000U; 2094} 2095static inline u32 gr_fecs_host_int_enable_flush_when_busy_enable_f(void) 2096{ 2097 return 0x100000U; 2098} 2099static inline u32 gr_fecs_host_int_enable_ecc_corrected_enable_f(void) 2100{ 2101 return 0x200000U; 2102} 2103static inline u32 gr_fecs_host_int_enable_ecc_uncorrected_enable_f(void) 2104{ 2105 return 0x400000U; 2106} 2107static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) 2108{ 2109 return 0x00409614U; 2110} 2111static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) 2112{ 2113 return 0x0U; 2114} 2115static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) 2116{ 2117 return 0x0U; 2118} 2119static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) 2120{ 2121 return 0x0U; 2122} 2123static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) 2124{ 2125 return 0x10U; 2126} 2127static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) 2128{ 2129 return 0x20U; 2130} 2131static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) 2132{ 2133 return 0x40U; 2134} 2135static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) 2136{ 2137 return 0x0U; 2138} 2139static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) 2140{ 2141 return 0x100U; 2142} 2143static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) 2144{ 2145 return 0x0U; 2146} 2147static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) 2148{ 2149 return 0x200U; 2150} 2151static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) 2152{ 2153 return 1U; 2154} 2155static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) 2156{ 2157 return (v & 0x1U) << 10U; 2158} 2159static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) 2160{ 2161 return 0x1U << 10U; 2162} 2163static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) 2164{ 2165 return (r >> 10U) & 0x1U; 2166} 2167static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) 2168{ 2169 return 0x0U; 2170} 2171static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) 2172{ 2173 return 0x400U; 2174} 2175static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) 2176{ 2177 return 0x0040960cU; 2178} 2179static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) 2180{ 2181 return 0x00409800U + i*4U; 2182} 2183static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) 2184{ 2185 return 0x00000010U; 2186} 2187static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) 2188{ 2189 return (v & 0xffffffffU) << 0U; 2190} 2191static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) 2192{ 2193 return 0x00000001U; 2194} 2195static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) 2196{ 2197 return 0x00000002U; 2198} 2199static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) 2200{ 2201 return 0x004098c0U + i*4U; 2202} 2203static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) 2204{ 2205 return (v & 0xffffffffU) << 0U; 2206} 2207static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) 2208{ 2209 return 0x00409840U + i*4U; 2210} 2211static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) 2212{ 2213 return (v & 0xffffffffU) << 0U; 2214} 2215static inline u32 gr_fecs_fs_r(void) 2216{ 2217 return 0x00409604U; 2218} 2219static inline u32 gr_fecs_fs_num_available_gpcs_s(void) 2220{ 2221 return 5U; 2222} 2223static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) 2224{ 2225 return (v & 0x1fU) << 0U; 2226} 2227static inline u32 gr_fecs_fs_num_available_gpcs_m(void) 2228{ 2229 return 0x1fU << 0U; 2230} 2231static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) 2232{ 2233 return (r >> 0U) & 0x1fU; 2234} 2235static inline u32 gr_fecs_fs_num_available_fbps_s(void) 2236{ 2237 return 5U; 2238} 2239static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) 2240{ 2241 return (v & 0x1fU) << 16U; 2242} 2243static inline u32 gr_fecs_fs_num_available_fbps_m(void) 2244{ 2245 return 0x1fU << 16U; 2246} 2247static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) 2248{ 2249 return (r >> 16U) & 0x1fU; 2250} 2251static inline u32 gr_fecs_cfg_r(void) 2252{ 2253 return 0x00409620U; 2254} 2255static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) 2256{ 2257 return (r >> 0U) & 0xffU; 2258} 2259static inline u32 gr_fecs_rc_lanes_r(void) 2260{ 2261 return 0x00409880U; 2262} 2263static inline u32 gr_fecs_rc_lanes_num_chains_s(void) 2264{ 2265 return 6U; 2266} 2267static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) 2268{ 2269 return (v & 0x3fU) << 0U; 2270} 2271static inline u32 gr_fecs_rc_lanes_num_chains_m(void) 2272{ 2273 return 0x3fU << 0U; 2274} 2275static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) 2276{ 2277 return (r >> 0U) & 0x3fU; 2278} 2279static inline u32 gr_fecs_ctxsw_status_1_r(void) 2280{ 2281 return 0x00409400U; 2282} 2283static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) 2284{ 2285 return 1U; 2286} 2287static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) 2288{ 2289 return (v & 0x1U) << 12U; 2290} 2291static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) 2292{ 2293 return 0x1U << 12U; 2294} 2295static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) 2296{ 2297 return (r >> 12U) & 0x1U; 2298} 2299static inline u32 gr_fecs_arb_ctx_adr_r(void) 2300{ 2301 return 0x00409a24U; 2302} 2303static inline u32 gr_fecs_new_ctx_r(void) 2304{ 2305 return 0x00409b04U; 2306} 2307static inline u32 gr_fecs_new_ctx_ptr_s(void) 2308{ 2309 return 28U; 2310} 2311static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) 2312{ 2313 return (v & 0xfffffffU) << 0U; 2314} 2315static inline u32 gr_fecs_new_ctx_ptr_m(void) 2316{ 2317 return 0xfffffffU << 0U; 2318} 2319static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) 2320{ 2321 return (r >> 0U) & 0xfffffffU; 2322} 2323static inline u32 gr_fecs_new_ctx_target_s(void) 2324{ 2325 return 2U; 2326} 2327static inline u32 gr_fecs_new_ctx_target_f(u32 v) 2328{ 2329 return (v & 0x3U) << 28U; 2330} 2331static inline u32 gr_fecs_new_ctx_target_m(void) 2332{ 2333 return 0x3U << 28U; 2334} 2335static inline u32 gr_fecs_new_ctx_target_v(u32 r) 2336{ 2337 return (r >> 28U) & 0x3U; 2338} 2339static inline u32 gr_fecs_new_ctx_valid_s(void) 2340{ 2341 return 1U; 2342} 2343static inline u32 gr_fecs_new_ctx_valid_f(u32 v) 2344{ 2345 return (v & 0x1U) << 31U; 2346} 2347static inline u32 gr_fecs_new_ctx_valid_m(void) 2348{ 2349 return 0x1U << 31U; 2350} 2351static inline u32 gr_fecs_new_ctx_valid_v(u32 r) 2352{ 2353 return (r >> 31U) & 0x1U; 2354} 2355static inline u32 gr_fecs_arb_ctx_ptr_r(void) 2356{ 2357 return 0x00409a0cU; 2358} 2359static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) 2360{ 2361 return 28U; 2362} 2363static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) 2364{ 2365 return (v & 0xfffffffU) << 0U; 2366} 2367static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) 2368{ 2369 return 0xfffffffU << 0U; 2370} 2371static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) 2372{ 2373 return (r >> 0U) & 0xfffffffU; 2374} 2375static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) 2376{ 2377 return 2U; 2378} 2379static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) 2380{ 2381 return (v & 0x3U) << 28U; 2382} 2383static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) 2384{ 2385 return 0x3U << 28U; 2386} 2387static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) 2388{ 2389 return (r >> 28U) & 0x3U; 2390} 2391static inline u32 gr_fecs_arb_ctx_cmd_r(void) 2392{ 2393 return 0x00409a10U; 2394} 2395static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) 2396{ 2397 return 5U; 2398} 2399static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) 2400{ 2401 return (v & 0x1fU) << 0U; 2402} 2403static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) 2404{ 2405 return 0x1fU << 0U; 2406} 2407static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) 2408{ 2409 return (r >> 0U) & 0x1fU; 2410} 2411static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) 2412{ 2413 return 0x00409c00U; 2414} 2415static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) 2416{ 2417 return 0x00502c04U; 2418} 2419static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) 2420{ 2421 return 0x00502400U; 2422} 2423static inline u32 gr_gpc0_gpccs_ctxsw_mailbox__size_1_v(void) 2424{ 2425 return 0x00000010U; 2426} 2427static inline u32 gr_fecs_ctxsw_idlestate_r(void) 2428{ 2429 return 0x00409420U; 2430} 2431static inline u32 gr_fecs_feature_override_ecc_r(void) 2432{ 2433 return 0x00409658U; 2434} 2435static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r) 2436{ 2437 return (r >> 0U) & 0x1U; 2438} 2439static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r) 2440{ 2441 return (r >> 3U) & 0x1U; 2442} 2443static inline u32 gr_fecs_feature_override_ecc_sm_l1_data_v(u32 r) 2444{ 2445 return (r >> 4U) & 0x1U; 2446} 2447static inline u32 gr_fecs_feature_override_ecc_sm_l1_data_override_v(u32 r) 2448{ 2449 return (r >> 7U) & 0x1U; 2450} 2451static inline u32 gr_fecs_feature_override_ecc_sm_l1_tag_v(u32 r) 2452{ 2453 return (r >> 8U) & 0x1U; 2454} 2455static inline u32 gr_fecs_feature_override_ecc_sm_l1_tag_override_v(u32 r) 2456{ 2457 return (r >> 11U) & 0x1U; 2458} 2459static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r) 2460{ 2461 return (r >> 12U) & 0x1U; 2462} 2463static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r) 2464{ 2465 return (r >> 15U) & 0x1U; 2466} 2467static inline u32 gr_fecs_feature_override_ecc_sm_cbu_v(u32 r) 2468{ 2469 return (r >> 20U) & 0x1U; 2470} 2471static inline u32 gr_fecs_feature_override_ecc_sm_cbu_override_v(u32 r) 2472{ 2473 return (r >> 23U) & 0x1U; 2474} 2475static inline u32 gr_fecs_feature_override_ecc_1_r(void) 2476{ 2477 return 0x0040965cU; 2478} 2479static inline u32 gr_fecs_feature_override_ecc_1_sm_l0_icache_v(u32 r) 2480{ 2481 return (r >> 0U) & 0x1U; 2482} 2483static inline u32 gr_fecs_feature_override_ecc_1_sm_l0_icache_override_v(u32 r) 2484{ 2485 return (r >> 1U) & 0x1U; 2486} 2487static inline u32 gr_fecs_feature_override_ecc_1_sm_l1_icache_v(u32 r) 2488{ 2489 return (r >> 2U) & 0x1U; 2490} 2491static inline u32 gr_fecs_feature_override_ecc_1_sm_l1_icache_override_v(u32 r) 2492{ 2493 return (r >> 3U) & 0x1U; 2494} 2495static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) 2496{ 2497 return 0x00502420U; 2498} 2499static inline u32 gr_rstr2d_gpc_map_r(u32 i) 2500{ 2501 return 0x0040780cU + i*4U; 2502} 2503static inline u32 gr_rstr2d_map_table_cfg_r(void) 2504{ 2505 return 0x004078bcU; 2506} 2507static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) 2508{ 2509 return (v & 0xffU) << 0U; 2510} 2511static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) 2512{ 2513 return (v & 0xffU) << 8U; 2514} 2515static inline u32 gr_pd_hww_esr_r(void) 2516{ 2517 return 0x00406018U; 2518} 2519static inline u32 gr_pd_hww_esr_reset_active_f(void) 2520{ 2521 return 0x40000000U; 2522} 2523static inline u32 gr_pd_hww_esr_en_enable_f(void) 2524{ 2525 return 0x80000000U; 2526} 2527static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) 2528{ 2529 return 0x00406028U + i*4U; 2530} 2531static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) 2532{ 2533 return 0x00000004U; 2534} 2535static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) 2536{ 2537 return (v & 0xfU) << 0U; 2538} 2539static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) 2540{ 2541 return (v & 0xfU) << 4U; 2542} 2543static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) 2544{ 2545 return (v & 0xfU) << 8U; 2546} 2547static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) 2548{ 2549 return (v & 0xfU) << 12U; 2550} 2551static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) 2552{ 2553 return (v & 0xfU) << 16U; 2554} 2555static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) 2556{ 2557 return (v & 0xfU) << 20U; 2558} 2559static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) 2560{ 2561 return (v & 0xfU) << 24U; 2562} 2563static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) 2564{ 2565 return (v & 0xfU) << 28U; 2566} 2567static inline u32 gr_pd_ab_dist_cfg0_r(void) 2568{ 2569 return 0x004064c0U; 2570} 2571static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) 2572{ 2573 return 0x80000000U; 2574} 2575static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) 2576{ 2577 return 0x0U; 2578} 2579static inline u32 gr_pd_ab_dist_cfg1_r(void) 2580{ 2581 return 0x004064c4U; 2582} 2583static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) 2584{ 2585 return 0xffffU; 2586} 2587static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) 2588{ 2589 return (v & 0xffffU) << 16U; 2590} 2591static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) 2592{ 2593 return 0x00000080U; 2594} 2595static inline u32 gr_pd_ab_dist_cfg2_r(void) 2596{ 2597 return 0x004064c8U; 2598} 2599static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) 2600{ 2601 return (v & 0x1fffU) << 0U; 2602} 2603static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) 2604{ 2605 return 0x00000380U; 2606} 2607static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) 2608{ 2609 return (v & 0x1fffU) << 16U; 2610} 2611static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) 2612{ 2613 return 0x00000020U; 2614} 2615static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) 2616{ 2617 return 0x00000302U; 2618} 2619static inline u32 gr_pd_dist_skip_table_r(u32 i) 2620{ 2621 return 0x004064d0U + i*4U; 2622} 2623static inline u32 gr_pd_dist_skip_table__size_1_v(void) 2624{ 2625 return 0x00000008U; 2626} 2627static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) 2628{ 2629 return (v & 0xffU) << 0U; 2630} 2631static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) 2632{ 2633 return (v & 0xffU) << 8U; 2634} 2635static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) 2636{ 2637 return (v & 0xffU) << 16U; 2638} 2639static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) 2640{ 2641 return (v & 0xffU) << 24U; 2642} 2643static inline u32 gr_ds_debug_r(void) 2644{ 2645 return 0x00405800U; 2646} 2647static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) 2648{ 2649 return 0x0U; 2650} 2651static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) 2652{ 2653 return 0x8000000U; 2654} 2655static inline u32 gr_ds_zbc_color_r_r(void) 2656{ 2657 return 0x00405804U; 2658} 2659static inline u32 gr_ds_zbc_color_r_val_f(u32 v) 2660{ 2661 return (v & 0xffffffffU) << 0U; 2662} 2663static inline u32 gr_ds_zbc_color_g_r(void) 2664{ 2665 return 0x00405808U; 2666} 2667static inline u32 gr_ds_zbc_color_g_val_f(u32 v) 2668{ 2669 return (v & 0xffffffffU) << 0U; 2670} 2671static inline u32 gr_ds_zbc_color_b_r(void) 2672{ 2673 return 0x0040580cU; 2674} 2675static inline u32 gr_ds_zbc_color_b_val_f(u32 v) 2676{ 2677 return (v & 0xffffffffU) << 0U; 2678} 2679static inline u32 gr_ds_zbc_color_a_r(void) 2680{ 2681 return 0x00405810U; 2682} 2683static inline u32 gr_ds_zbc_color_a_val_f(u32 v) 2684{ 2685 return (v & 0xffffffffU) << 0U; 2686} 2687static inline u32 gr_ds_zbc_color_fmt_r(void) 2688{ 2689 return 0x00405814U; 2690} 2691static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) 2692{ 2693 return (v & 0x7fU) << 0U; 2694} 2695static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) 2696{ 2697 return 0x0U; 2698} 2699static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) 2700{ 2701 return 0x00000001U; 2702} 2703static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) 2704{ 2705 return 0x00000002U; 2706} 2707static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) 2708{ 2709 return 0x00000004U; 2710} 2711static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) 2712{ 2713 return 0x00000028U; 2714} 2715static inline u32 gr_ds_zbc_z_r(void) 2716{ 2717 return 0x00405818U; 2718} 2719static inline u32 gr_ds_zbc_z_val_s(void) 2720{ 2721 return 32U; 2722} 2723static inline u32 gr_ds_zbc_z_val_f(u32 v) 2724{ 2725 return (v & 0xffffffffU) << 0U; 2726} 2727static inline u32 gr_ds_zbc_z_val_m(void) 2728{ 2729 return 0xffffffffU << 0U; 2730} 2731static inline u32 gr_ds_zbc_z_val_v(u32 r) 2732{ 2733 return (r >> 0U) & 0xffffffffU; 2734} 2735static inline u32 gr_ds_zbc_z_val__init_v(void) 2736{ 2737 return 0x00000000U; 2738} 2739static inline u32 gr_ds_zbc_z_val__init_f(void) 2740{ 2741 return 0x0U; 2742} 2743static inline u32 gr_ds_zbc_z_fmt_r(void) 2744{ 2745 return 0x0040581cU; 2746} 2747static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) 2748{ 2749 return (v & 0x1U) << 0U; 2750} 2751static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) 2752{ 2753 return 0x0U; 2754} 2755static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) 2756{ 2757 return 0x00000001U; 2758} 2759static inline u32 gr_ds_zbc_tbl_index_r(void) 2760{ 2761 return 0x00405820U; 2762} 2763static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) 2764{ 2765 return (v & 0xfU) << 0U; 2766} 2767static inline u32 gr_ds_zbc_tbl_ld_r(void) 2768{ 2769 return 0x00405824U; 2770} 2771static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) 2772{ 2773 return 0x0U; 2774} 2775static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) 2776{ 2777 return 0x1U; 2778} 2779static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) 2780{ 2781 return 0x0U; 2782} 2783static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) 2784{ 2785 return 0x4U; 2786} 2787static inline u32 gr_ds_tga_constraintlogic_beta_r(void) 2788{ 2789 return 0x00405830U; 2790} 2791static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) 2792{ 2793 return (v & 0x3fffffU) << 0U; 2794} 2795static inline u32 gr_ds_tga_constraintlogic_alpha_r(void) 2796{ 2797 return 0x0040585cU; 2798} 2799static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) 2800{ 2801 return (v & 0xffffU) << 0U; 2802} 2803static inline u32 gr_ds_hww_esr_r(void) 2804{ 2805 return 0x00405840U; 2806} 2807static inline u32 gr_ds_hww_esr_reset_s(void) 2808{ 2809 return 1U; 2810} 2811static inline u32 gr_ds_hww_esr_reset_f(u32 v) 2812{ 2813 return (v & 0x1U) << 30U; 2814} 2815static inline u32 gr_ds_hww_esr_reset_m(void) 2816{ 2817 return 0x1U << 30U; 2818} 2819static inline u32 gr_ds_hww_esr_reset_v(u32 r) 2820{ 2821 return (r >> 30U) & 0x1U; 2822} 2823static inline u32 gr_ds_hww_esr_reset_task_v(void) 2824{ 2825 return 0x00000001U; 2826} 2827static inline u32 gr_ds_hww_esr_reset_task_f(void) 2828{ 2829 return 0x40000000U; 2830} 2831static inline u32 gr_ds_hww_esr_en_enabled_f(void) 2832{ 2833 return 0x80000000U; 2834} 2835static inline u32 gr_ds_hww_esr_2_r(void) 2836{ 2837 return 0x00405848U; 2838} 2839static inline u32 gr_ds_hww_esr_2_reset_s(void) 2840{ 2841 return 1U; 2842} 2843static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) 2844{ 2845 return (v & 0x1U) << 30U; 2846} 2847static inline u32 gr_ds_hww_esr_2_reset_m(void) 2848{ 2849 return 0x1U << 30U; 2850} 2851static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) 2852{ 2853 return (r >> 30U) & 0x1U; 2854} 2855static inline u32 gr_ds_hww_esr_2_reset_task_v(void) 2856{ 2857 return 0x00000001U; 2858} 2859static inline u32 gr_ds_hww_esr_2_reset_task_f(void) 2860{ 2861 return 0x40000000U; 2862} 2863static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) 2864{ 2865 return 0x80000000U; 2866} 2867static inline u32 gr_ds_hww_report_mask_r(void) 2868{ 2869 return 0x00405844U; 2870} 2871static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) 2872{ 2873 return 0x1U; 2874} 2875static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) 2876{ 2877 return 0x2U; 2878} 2879static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) 2880{ 2881 return 0x4U; 2882} 2883static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) 2884{ 2885 return 0x8U; 2886} 2887static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) 2888{ 2889 return 0x10U; 2890} 2891static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) 2892{ 2893 return 0x20U; 2894} 2895static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) 2896{ 2897 return 0x40U; 2898} 2899static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) 2900{ 2901 return 0x80U; 2902} 2903static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) 2904{ 2905 return 0x100U; 2906} 2907static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) 2908{ 2909 return 0x200U; 2910} 2911static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) 2912{ 2913 return 0x400U; 2914} 2915static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) 2916{ 2917 return 0x800U; 2918} 2919static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) 2920{ 2921 return 0x1000U; 2922} 2923static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) 2924{ 2925 return 0x2000U; 2926} 2927static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) 2928{ 2929 return 0x4000U; 2930} 2931static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) 2932{ 2933 return 0x8000U; 2934} 2935static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) 2936{ 2937 return 0x10000U; 2938} 2939static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) 2940{ 2941 return 0x20000U; 2942} 2943static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) 2944{ 2945 return 0x40000U; 2946} 2947static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) 2948{ 2949 return 0x80000U; 2950} 2951static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) 2952{ 2953 return 0x100000U; 2954} 2955static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) 2956{ 2957 return 0x200000U; 2958} 2959static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) 2960{ 2961 return 0x400000U; 2962} 2963static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) 2964{ 2965 return 0x800000U; 2966} 2967static inline u32 gr_ds_hww_report_mask_2_r(void) 2968{ 2969 return 0x0040584cU; 2970} 2971static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) 2972{ 2973 return 0x1U; 2974} 2975static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) 2976{ 2977 return 0x00405870U + i*4U; 2978} 2979static inline u32 gr_scc_debug_r(void) 2980{ 2981 return 0x00408000U; 2982} 2983static inline u32 gr_scc_debug_pagepool_invalidates_m(void) 2984{ 2985 return 0x1U << 9U; 2986} 2987static inline u32 gr_scc_debug_pagepool_invalidates_disable_f(void) 2988{ 2989 return 0x200U; 2990} 2991static inline u32 gr_scc_debug_pagepool_invalidates_enable_f(void) 2992{ 2993 return 0x0U; 2994} 2995static inline u32 gr_scc_bundle_cb_base_r(void) 2996{ 2997 return 0x00408004U; 2998} 2999static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) 3000{ 3001 return (v & 0xffffffffU) << 0U; 3002} 3003static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) 3004{ 3005 return 0x00000008U; 3006} 3007static inline u32 gr_scc_bundle_cb_size_r(void) 3008{ 3009 return 0x00408008U; 3010} 3011static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) 3012{ 3013 return (v & 0x7ffU) << 0U; 3014} 3015static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) 3016{ 3017 return 0x00000030U; 3018} 3019static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) 3020{ 3021 return 0x00000100U; 3022} 3023static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) 3024{ 3025 return 0x00000000U; 3026} 3027static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) 3028{ 3029 return 0x0U; 3030} 3031static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) 3032{ 3033 return 0x80000000U; 3034} 3035static inline u32 gr_scc_pagepool_base_r(void) 3036{ 3037 return 0x0040800cU; 3038} 3039static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) 3040{ 3041 return (v & 0xffffffffU) << 0U; 3042} 3043static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) 3044{ 3045 return 0x00000008U; 3046} 3047static inline u32 gr_scc_pagepool_r(void) 3048{ 3049 return 0x00408010U; 3050} 3051static inline u32 gr_scc_pagepool_total_pages_f(u32 v) 3052{ 3053 return (v & 0x3ffU) << 0U; 3054} 3055static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) 3056{ 3057 return 0x00000000U; 3058} 3059static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) 3060{ 3061 return 0x00000200U; 3062} 3063static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) 3064{ 3065 return 0x00000100U; 3066} 3067static inline u32 gr_scc_pagepool_max_valid_pages_s(void) 3068{ 3069 return 10U; 3070} 3071static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) 3072{ 3073 return (v & 0x3ffU) << 10U; 3074} 3075static inline u32 gr_scc_pagepool_max_valid_pages_m(void) 3076{ 3077 return 0x3ffU << 10U; 3078} 3079static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) 3080{ 3081 return (r >> 10U) & 0x3ffU; 3082} 3083static inline u32 gr_scc_pagepool_valid_true_f(void) 3084{ 3085 return 0x80000000U; 3086} 3087static inline u32 gr_scc_init_r(void) 3088{ 3089 return 0x0040802cU; 3090} 3091static inline u32 gr_scc_init_ram_trigger_f(void) 3092{ 3093 return 0x1U; 3094} 3095static inline u32 gr_scc_hww_esr_r(void) 3096{ 3097 return 0x00408030U; 3098} 3099static inline u32 gr_scc_hww_esr_reset_active_f(void) 3100{ 3101 return 0x40000000U; 3102} 3103static inline u32 gr_scc_hww_esr_en_enable_f(void) 3104{ 3105 return 0x80000000U; 3106} 3107static inline u32 gr_ssync_hww_esr_r(void) 3108{ 3109 return 0x00405a14U; 3110} 3111static inline u32 gr_ssync_hww_esr_reset_active_f(void) 3112{ 3113 return 0x40000000U; 3114} 3115static inline u32 gr_ssync_hww_esr_en_enable_f(void) 3116{ 3117 return 0x80000000U; 3118} 3119static inline u32 gr_sked_hww_esr_r(void) 3120{ 3121 return 0x00407020U; 3122} 3123static inline u32 gr_sked_hww_esr_reset_active_f(void) 3124{ 3125 return 0x40000000U; 3126} 3127static inline u32 gr_sked_hww_esr_en_r(void) 3128{ 3129 return 0x00407024U; 3130} 3131static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(void) 3132{ 3133 return 0x1U << 25U; 3134} 3135static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f(void) 3136{ 3137 return 0x0U; 3138} 3139static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f(void) 3140{ 3141 return 0x2000000U; 3142} 3143static inline u32 gr_cwd_fs_r(void) 3144{ 3145 return 0x00405b00U; 3146} 3147static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) 3148{ 3149 return (v & 0xffU) << 0U; 3150} 3151static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) 3152{ 3153 return (v & 0xffU) << 8U; 3154} 3155static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) 3156{ 3157 return 0x00405b60U + i*4U; 3158} 3159static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) 3160{ 3161 return 4U; 3162} 3163static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) 3164{ 3165 return (v & 0xfU) << 0U; 3166} 3167static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) 3168{ 3169 return 4U; 3170} 3171static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) 3172{ 3173 return (v & 0xfU) << 4U; 3174} 3175static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) 3176{ 3177 return (v & 0xfU) << 8U; 3178} 3179static inline u32 gr_cwd_sm_id_r(u32 i) 3180{ 3181 return 0x00405ba0U + i*4U; 3182} 3183static inline u32 gr_cwd_sm_id__size_1_v(void) 3184{ 3185 return 0x00000010U; 3186} 3187static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) 3188{ 3189 return (v & 0xffU) << 0U; 3190} 3191static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) 3192{ 3193 return (v & 0xffU) << 8U; 3194} 3195static inline u32 gr_gpc0_fs_gpc_r(void) 3196{ 3197 return 0x00502608U; 3198} 3199static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) 3200{ 3201 return (r >> 0U) & 0x1fU; 3202} 3203static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) 3204{ 3205 return (r >> 16U) & 0x1fU; 3206} 3207static inline u32 gr_gpc0_cfg_r(void) 3208{ 3209 return 0x00502620U; 3210} 3211static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) 3212{ 3213 return (r >> 0U) & 0xffU; 3214} 3215static inline u32 gr_gpccs_rc_lanes_r(void) 3216{ 3217 return 0x00502880U; 3218} 3219static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) 3220{ 3221 return 6U; 3222} 3223static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) 3224{ 3225 return (v & 0x3fU) << 0U; 3226} 3227static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) 3228{ 3229 return 0x3fU << 0U; 3230} 3231static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) 3232{ 3233 return (r >> 0U) & 0x3fU; 3234} 3235static inline u32 gr_gpccs_rc_lane_size_r(void) 3236{ 3237 return 0x00502910U; 3238} 3239static inline u32 gr_gpccs_rc_lane_size_v_s(void) 3240{ 3241 return 24U; 3242} 3243static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) 3244{ 3245 return (v & 0xffffffU) << 0U; 3246} 3247static inline u32 gr_gpccs_rc_lane_size_v_m(void) 3248{ 3249 return 0xffffffU << 0U; 3250} 3251static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) 3252{ 3253 return (r >> 0U) & 0xffffffU; 3254} 3255static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) 3256{ 3257 return 0x00000000U; 3258} 3259static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) 3260{ 3261 return 0x0U; 3262} 3263static inline u32 gr_gpc0_zcull_fs_r(void) 3264{ 3265 return 0x00500910U; 3266} 3267static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) 3268{ 3269 return (v & 0x1ffU) << 0U; 3270} 3271static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) 3272{ 3273 return (v & 0xfU) << 16U; 3274} 3275static inline u32 gr_gpc0_zcull_ram_addr_r(void) 3276{ 3277 return 0x00500914U; 3278} 3279static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) 3280{ 3281 return (v & 0xfU) << 0U; 3282} 3283static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) 3284{ 3285 return (v & 0xfU) << 8U; 3286} 3287static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) 3288{ 3289 return 0x00500918U; 3290} 3291static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) 3292{ 3293 return (v & 0xffffffU) << 0U; 3294} 3295static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) 3296{ 3297 return 0x00800000U; 3298} 3299static inline u32 gr_gpc0_zcull_total_ram_size_r(void) 3300{ 3301 return 0x00500920U; 3302} 3303static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) 3304{ 3305 return (v & 0xffffU) << 0U; 3306} 3307static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) 3308{ 3309 return 0x00500a04U + i*32U; 3310} 3311static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) 3312{ 3313 return 0x00000040U; 3314} 3315static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) 3316{ 3317 return 0x00000010U; 3318} 3319static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) 3320{ 3321 return 0x00500c10U + i*4U; 3322} 3323static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) 3324{ 3325 return (v & 0xffU) << 0U; 3326} 3327static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) 3328{ 3329 return 0x00500c30U + i*4U; 3330} 3331static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) 3332{ 3333 return (r >> 0U) & 0xffU; 3334} 3335static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) 3336{ 3337 return 0x00504088U; 3338} 3339static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) 3340{ 3341 return (v & 0xffffU) << 0U; 3342} 3343static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) 3344{ 3345 return 0x00504608U; 3346} 3347static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_f(u32 v) 3348{ 3349 return (v & 0xffffU) << 0U; 3350} 3351static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_v(u32 r) 3352{ 3353 return (r >> 0U) & 0xffffU; 3354} 3355static inline u32 gr_gpc0_tpc0_sm_arch_r(void) 3356{ 3357 return 0x00504330U; 3358} 3359static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) 3360{ 3361 return (r >> 0U) & 0xffU; 3362} 3363static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) 3364{ 3365 return (r >> 8U) & 0xfffU; 3366} 3367static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) 3368{ 3369 return (r >> 20U) & 0xfffU; 3370} 3371static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) 3372{ 3373 return 0x00503018U; 3374} 3375static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) 3376{ 3377 return 0x1U << 0U; 3378} 3379static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) 3380{ 3381 return 0x1U; 3382} 3383static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) 3384{ 3385 return 0x005030c0U; 3386} 3387static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) 3388{ 3389 return (v & 0x3fffffU) << 0U; 3390} 3391static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) 3392{ 3393 return 0x3fffffU << 0U; 3394} 3395static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) 3396{ 3397 return 0x00000800U; 3398} 3399static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void) 3400{ 3401 return 0x00001100U; 3402} 3403static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) 3404{ 3405 return 0x00000020U; 3406} 3407static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) 3408{ 3409 return 0x005030f4U; 3410} 3411static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) 3412{ 3413 return 0x005030e4U; 3414} 3415static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) 3416{ 3417 return (v & 0xffffU) << 0U; 3418} 3419static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) 3420{ 3421 return 0xffffU << 0U; 3422} 3423static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) 3424{ 3425 return 0x00000800U; 3426} 3427static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) 3428{ 3429 return 0x00000020U; 3430} 3431static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) 3432{ 3433 return 0x005030f8U; 3434} 3435static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void) 3436{ 3437 return 0x005030f0U; 3438} 3439static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v) 3440{ 3441 return (v & 0x3fffffU) << 0U; 3442} 3443static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void) 3444{ 3445 return 0x00000800U; 3446} 3447static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void) 3448{ 3449 return 0x00419e00U; 3450} 3451static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v) 3452{ 3453 return (v & 0xffffffffU) << 0U; 3454} 3455static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void) 3456{ 3457 return 0x00419e04U; 3458} 3459static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void) 3460{ 3461 return 21U; 3462} 3463static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v) 3464{ 3465 return (v & 0x1fffffU) << 0U; 3466} 3467static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void) 3468{ 3469 return 0x1fffffU << 0U; 3470} 3471static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r) 3472{ 3473 return (r >> 0U) & 0x1fffffU; 3474} 3475static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void) 3476{ 3477 return 0x80U; 3478} 3479static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void) 3480{ 3481 return 1U; 3482} 3483static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v) 3484{ 3485 return (v & 0x1U) << 31U; 3486} 3487static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void) 3488{ 3489 return 0x1U << 31U; 3490} 3491static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r) 3492{ 3493 return (r >> 31U) & 0x1U; 3494} 3495static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void) 3496{ 3497 return 0x80000000U; 3498} 3499static inline u32 gr_gpccs_falcon_addr_r(void) 3500{ 3501 return 0x0041a0acU; 3502} 3503static inline u32 gr_gpccs_falcon_addr_lsb_s(void) 3504{ 3505 return 6U; 3506} 3507static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) 3508{ 3509 return (v & 0x3fU) << 0U; 3510} 3511static inline u32 gr_gpccs_falcon_addr_lsb_m(void) 3512{ 3513 return 0x3fU << 0U; 3514} 3515static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) 3516{ 3517 return (r >> 0U) & 0x3fU; 3518} 3519static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) 3520{ 3521 return 0x00000000U; 3522} 3523static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) 3524{ 3525 return 0x0U; 3526} 3527static inline u32 gr_gpccs_falcon_addr_msb_s(void) 3528{ 3529 return 6U; 3530} 3531static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) 3532{ 3533 return (v & 0x3fU) << 6U; 3534} 3535static inline u32 gr_gpccs_falcon_addr_msb_m(void) 3536{ 3537 return 0x3fU << 6U; 3538} 3539static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) 3540{ 3541 return (r >> 6U) & 0x3fU; 3542} 3543static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) 3544{ 3545 return 0x00000000U; 3546} 3547static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) 3548{ 3549 return 0x0U; 3550} 3551static inline u32 gr_gpccs_falcon_addr_ext_s(void) 3552{ 3553 return 12U; 3554} 3555static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) 3556{ 3557 return (v & 0xfffU) << 0U; 3558} 3559static inline u32 gr_gpccs_falcon_addr_ext_m(void) 3560{ 3561 return 0xfffU << 0U; 3562} 3563static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) 3564{ 3565 return (r >> 0U) & 0xfffU; 3566} 3567static inline u32 gr_gpccs_cpuctl_r(void) 3568{ 3569 return 0x0041a100U; 3570} 3571static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) 3572{ 3573 return (v & 0x1U) << 1U; 3574} 3575static inline u32 gr_gpccs_dmactl_r(void) 3576{ 3577 return 0x0041a10cU; 3578} 3579static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) 3580{ 3581 return (v & 0x1U) << 0U; 3582} 3583static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) 3584{ 3585 return 0x1U << 1U; 3586} 3587static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) 3588{ 3589 return 0x1U << 2U; 3590} 3591static inline u32 gr_gpccs_imemc_r(u32 i) 3592{ 3593 return 0x0041a180U + i*16U; 3594} 3595static inline u32 gr_gpccs_imemc_offs_f(u32 v) 3596{ 3597 return (v & 0x3fU) << 2U; 3598} 3599static inline u32 gr_gpccs_imemc_blk_f(u32 v) 3600{ 3601 return (v & 0xffU) << 8U; 3602} 3603static inline u32 gr_gpccs_imemc_aincw_f(u32 v) 3604{ 3605 return (v & 0x1U) << 24U; 3606} 3607static inline u32 gr_gpccs_imemd_r(u32 i) 3608{ 3609 return 0x0041a184U + i*16U; 3610} 3611static inline u32 gr_gpccs_imemt_r(u32 i) 3612{ 3613 return 0x0041a188U + i*16U; 3614} 3615static inline u32 gr_gpccs_imemt__size_1_v(void) 3616{ 3617 return 0x00000004U; 3618} 3619static inline u32 gr_gpccs_imemt_tag_f(u32 v) 3620{ 3621 return (v & 0xffffU) << 0U; 3622} 3623static inline u32 gr_gpccs_dmemc_r(u32 i) 3624{ 3625 return 0x0041a1c0U + i*8U; 3626} 3627static inline u32 gr_gpccs_dmemc_offs_f(u32 v) 3628{ 3629 return (v & 0x3fU) << 2U; 3630} 3631static inline u32 gr_gpccs_dmemc_blk_f(u32 v) 3632{ 3633 return (v & 0xffU) << 8U; 3634} 3635static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) 3636{ 3637 return (v & 0x1U) << 24U; 3638} 3639static inline u32 gr_gpccs_dmemd_r(u32 i) 3640{ 3641 return 0x0041a1c4U + i*8U; 3642} 3643static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) 3644{ 3645 return 0x0041a800U + i*4U; 3646} 3647static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) 3648{ 3649 return (v & 0xffffffffU) << 0U; 3650} 3651static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) 3652{ 3653 return 0x00418e24U; 3654} 3655static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) 3656{ 3657 return 32U; 3658} 3659static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) 3660{ 3661 return (v & 0xffffffffU) << 0U; 3662} 3663static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) 3664{ 3665 return 0xffffffffU << 0U; 3666} 3667static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) 3668{ 3669 return (r >> 0U) & 0xffffffffU; 3670} 3671static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) 3672{ 3673 return 0x00000000U; 3674} 3675static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) 3676{ 3677 return 0x0U; 3678} 3679static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) 3680{ 3681 return 0x00418e28U; 3682} 3683static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) 3684{ 3685 return 11U; 3686} 3687static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) 3688{ 3689 return (v & 0x7ffU) << 0U; 3690} 3691static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) 3692{ 3693 return 0x7ffU << 0U; 3694} 3695static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) 3696{ 3697 return (r >> 0U) & 0x7ffU; 3698} 3699static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) 3700{ 3701 return 0x00000030U; 3702} 3703static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) 3704{ 3705 return 0x30U; 3706} 3707static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) 3708{ 3709 return 1U; 3710} 3711static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) 3712{ 3713 return (v & 0x1U) << 31U; 3714} 3715static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) 3716{ 3717 return 0x1U << 31U; 3718} 3719static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) 3720{ 3721 return (r >> 31U) & 0x1U; 3722} 3723static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) 3724{ 3725 return 0x00000000U; 3726} 3727static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) 3728{ 3729 return 0x0U; 3730} 3731static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) 3732{ 3733 return 0x00000001U; 3734} 3735static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) 3736{ 3737 return 0x80000000U; 3738} 3739static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void) 3740{ 3741 return 0x005001dcU; 3742} 3743static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v) 3744{ 3745 return (v & 0xffffU) << 0U; 3746} 3747static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void) 3748{ 3749 return 0x00000170U; 3750} 3751static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void) 3752{ 3753 return 0x00000100U; 3754} 3755static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void) 3756{ 3757 return 0x005001d8U; 3758} 3759static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v) 3760{ 3761 return (v & 0xffffffffU) << 0U; 3762} 3763static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void) 3764{ 3765 return 0x00000008U; 3766} 3767static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void) 3768{ 3769 return 0x004181e4U; 3770} 3771static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v) 3772{ 3773 return (v & 0xfffU) << 0U; 3774} 3775static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void) 3776{ 3777 return 0x00000100U; 3778} 3779static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void) 3780{ 3781 return 0x0041befcU; 3782} 3783static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v) 3784{ 3785 return (v & 0xfffU) << 0U; 3786} 3787static inline u32 gr_gpcs_ppcs_cbm_debug_r(void) 3788{ 3789 return 0x0041bec4U; 3790} 3791static inline u32 gr_gpcs_ppcs_cbm_debug_invalidate_alpha_m(void) 3792{ 3793 return 0x1U << 0U; 3794} 3795static inline u32 gr_gpcs_ppcs_cbm_debug_invalidate_alpha_disable_f(void) 3796{ 3797 return 0x0U; 3798} 3799static inline u32 gr_gpcs_ppcs_cbm_debug_invalidate_alpha_enable_f(void) 3800{ 3801 return 0x1U; 3802} 3803static inline u32 gr_gpcs_ppcs_cbm_debug_invalidate_beta_m(void) 3804{ 3805 return 0x1U << 1U; 3806} 3807static inline u32 gr_gpcs_ppcs_cbm_debug_invalidate_beta_disable_f(void) 3808{ 3809 return 0x0U; 3810} 3811static inline u32 gr_gpcs_ppcs_cbm_debug_invalidate_beta_enable_f(void) 3812{ 3813 return 0x2U; 3814} 3815static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) 3816{ 3817 return 0x00418ea0U + i*4U; 3818} 3819static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) 3820{ 3821 return (v & 0x3fffffU) << 0U; 3822} 3823static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) 3824{ 3825 return 0x3fffffU << 0U; 3826} 3827static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i) 3828{ 3829 return 0x00418010U + i*4U; 3830} 3831static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v) 3832{ 3833 return (v & 0xffffffffU) << 0U; 3834} 3835static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i) 3836{ 3837 return 0x0041804cU + i*4U; 3838} 3839static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v) 3840{ 3841 return (v & 0xffffffffU) << 0U; 3842} 3843static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i) 3844{ 3845 return 0x00418088U + i*4U; 3846} 3847static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v) 3848{ 3849 return (v & 0xffffffffU) << 0U; 3850} 3851static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i) 3852{ 3853 return 0x004180c4U + i*4U; 3854} 3855static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v) 3856{ 3857 return (v & 0xffffffffU) << 0U; 3858} 3859static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void) 3860{ 3861 return 0x00418100U; 3862} 3863static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i) 3864{ 3865 return 0x00418110U + i*4U; 3866} 3867static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v) 3868{ 3869 return (v & 0xffffffffU) << 0U; 3870} 3871static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void) 3872{ 3873 return 0x0041814cU; 3874} 3875static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i) 3876{ 3877 return 0x0041815cU + i*4U; 3878} 3879static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v) 3880{ 3881 return (v & 0xffU) << 0U; 3882} 3883static inline u32 gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r(void) 3884{ 3885 return 0x00418198U; 3886} 3887static inline u32 gr_gpcs_swdx_spill_unit_r(void) 3888{ 3889 return 0x00418e9cU; 3890} 3891static inline u32 gr_gpcs_swdx_spill_unit_spill_buffer_cache_mgmt_mode_m(void) 3892{ 3893 return 0x1U << 16U; 3894} 3895static inline u32 gr_gpcs_swdx_spill_unit_spill_buffer_cache_mgmt_mode_disabled_f(void) 3896{ 3897 return 0x0U; 3898} 3899static inline u32 gr_gpcs_swdx_spill_unit_spill_buffer_cache_mgmt_mode_enabled_f(void) 3900{ 3901 return 0x10000U; 3902} 3903static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) 3904{ 3905 return 0x00418810U; 3906} 3907static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) 3908{ 3909 return (v & 0xfffffffU) << 0U; 3910} 3911static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) 3912{ 3913 return 0x0000000cU; 3914} 3915static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) 3916{ 3917 return 0x80000000U; 3918} 3919static inline u32 gr_crstr_gpc_map_r(u32 i) 3920{ 3921 return 0x00418b08U + i*4U; 3922} 3923static inline u32 gr_crstr_gpc_map_tile0_f(u32 v) 3924{ 3925 return (v & 0x1fU) << 0U; 3926} 3927static inline u32 gr_crstr_gpc_map_tile1_f(u32 v) 3928{ 3929 return (v & 0x1fU) << 5U; 3930} 3931static inline u32 gr_crstr_gpc_map_tile2_f(u32 v) 3932{ 3933 return (v & 0x1fU) << 10U; 3934} 3935static inline u32 gr_crstr_gpc_map_tile3_f(u32 v) 3936{ 3937 return (v & 0x1fU) << 15U; 3938} 3939static inline u32 gr_crstr_gpc_map_tile4_f(u32 v) 3940{ 3941 return (v & 0x1fU) << 20U; 3942} 3943static inline u32 gr_crstr_gpc_map_tile5_f(u32 v) 3944{ 3945 return (v & 0x1fU) << 25U; 3946} 3947static inline u32 gr_crstr_map_table_cfg_r(void) 3948{ 3949 return 0x00418bb8U; 3950} 3951static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) 3952{ 3953 return (v & 0xffU) << 0U; 3954} 3955static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) 3956{ 3957 return (v & 0xffU) << 8U; 3958} 3959static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i) 3960{ 3961 return 0x00418980U + i*4U; 3962} 3963static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v) 3964{ 3965 return (v & 0x7U) << 0U; 3966} 3967static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(u32 v) 3968{ 3969 return (v & 0x7U) << 4U; 3970} 3971static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(u32 v) 3972{ 3973 return (v & 0x7U) << 8U; 3974} 3975static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(u32 v) 3976{ 3977 return (v & 0x7U) << 12U; 3978} 3979static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(u32 v) 3980{ 3981 return (v & 0x7U) << 16U; 3982} 3983static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(u32 v) 3984{ 3985 return (v & 0x7U) << 20U; 3986} 3987static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(u32 v) 3988{ 3989 return (v & 0x7U) << 24U; 3990} 3991static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(u32 v) 3992{ 3993 return (v & 0x7U) << 28U; 3994} 3995static inline u32 gr_gpcs_gpm_pd_cfg_r(void) 3996{ 3997 return 0x00418c6cU; 3998} 3999static inline u32 gr_gpcs_gcc_pagepool_base_r(void) 4000{ 4001 return 0x00419004U; 4002} 4003static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) 4004{ 4005 return (v & 0xffffffffU) << 0U; 4006} 4007static inline u32 gr_gpcs_gcc_pagepool_r(void) 4008{ 4009 return 0x00419008U; 4010} 4011static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) 4012{ 4013 return (v & 0x3ffU) << 0U; 4014} 4015static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) 4016{ 4017 return 0x0041980cU; 4018} 4019static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) 4020{ 4021 return 0x10U; 4022} 4023static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) 4024{ 4025 return 0x00419848U; 4026} 4027static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) 4028{ 4029 return (v & 0xfffffffU) << 0U; 4030} 4031static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) 4032{ 4033 return (v & 0x1U) << 28U; 4034} 4035static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) 4036{ 4037 return 0x10000000U; 4038} 4039static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) 4040{ 4041 return 0x00419c00U; 4042} 4043static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) 4044{ 4045 return 0x0U; 4046} 4047static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) 4048{ 4049 return 0x8U; 4050} 4051static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) 4052{ 4053 return 0x00419c2cU; 4054} 4055static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) 4056{ 4057 return (v & 0xfffffffU) << 0U; 4058} 4059static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) 4060{ 4061 return (v & 0x1U) << 28U; 4062} 4063static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) 4064{ 4065 return 0x10000000U; 4066} 4067static inline u32 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(void) 4068{ 4069 return 0x00419ea8U; 4070} 4071static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r(void) 4072{ 4073 return 0x00504728U; 4074} 4075static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f(void) 4076{ 4077 return 0x2U; 4078} 4079static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f(void) 4080{ 4081 return 0x4U; 4082} 4083static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f(void) 4084{ 4085 return 0x10U; 4086} 4087static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f(void) 4088{ 4089 return 0x20U; 4090} 4091static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f(void) 4092{ 4093 return 0x40U; 4094} 4095static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f(void) 4096{ 4097 return 0x100U; 4098} 4099static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) 4100{ 4101 return 0x200U; 4102} 4103static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) 4104{ 4105 return 0x800U; 4106} 4107static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f(void) 4108{ 4109 return 0x2000U; 4110} 4111static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f(void) 4112{ 4113 return 0x4000U; 4114} 4115static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f(void) 4116{ 4117 return 0x8000U; 4118} 4119static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) 4120{ 4121 return 0x10000U; 4122} 4123static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) 4124{ 4125 return 0x40000U; 4126} 4127static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f(void) 4128{ 4129 return 0x800000U; 4130} 4131static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f(void) 4132{ 4133 return 0x400000U; 4134} 4135static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_nack_report_f(void) 4136{ 4137 return 0x4000000U; 4138} 4139static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) 4140{ 4141 return 0x00419d0cU; 4142} 4143static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) 4144{ 4145 return 0x2U; 4146} 4147static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) 4148{ 4149 return 0x1U; 4150} 4151static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void) 4152{ 4153 return 0x10U; 4154} 4155static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) 4156{ 4157 return 0x0050450cU; 4158} 4159static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) 4160{ 4161 return (r >> 1U) & 0x1U; 4162} 4163static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) 4164{ 4165 return 0x2U; 4166} 4167static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void) 4168{ 4169 return 0x10U; 4170} 4171static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) 4172{ 4173 return 0x0041ac94U; 4174} 4175static inline u32 gr_gpcs_gpccs_gpc_exception_en_gcc_f(u32 v) 4176{ 4177 return (v & 0x1U) << 2U; 4178} 4179static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) 4180{ 4181 return (v & 0xffU) << 16U; 4182} 4183static inline u32 gr_gpcs_gpccs_gpc_exception_en_gpccs_f(u32 v) 4184{ 4185 return (v & 0x1U) << 14U; 4186} 4187static inline u32 gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(u32 v) 4188{ 4189 return (v & 0x1U) << 15U; 4190} 4191static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) 4192{ 4193 return 0x00502c90U; 4194} 4195static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) 4196{ 4197 return (r >> 2U) & 0x1U; 4198} 4199static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) 4200{ 4201 return (r >> 16U) & 0xffU; 4202} 4203static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) 4204{ 4205 return 0x00000001U; 4206} 4207static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_f(u32 v) 4208{ 4209 return (v & 0x1U) << 14U; 4210} 4211static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_m(void) 4212{ 4213 return 0x1U << 14U; 4214} 4215static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_pending_f(void) 4216{ 4217 return 0x4000U; 4218} 4219static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_f(u32 v) 4220{ 4221 return (v & 0x1U) << 15U; 4222} 4223static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_m(void) 4224{ 4225 return 0x1U << 15U; 4226} 4227static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_pending_f(void) 4228{ 4229 return 0x8000U; 4230} 4231static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_r(void) 4232{ 4233 return 0x00501048U; 4234} 4235static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank0_m(void) 4236{ 4237 return 0x1U << 0U; 4238} 4239static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank1_m(void) 4240{ 4241 return 0x1U << 1U; 4242} 4243static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank0_m(void) 4244{ 4245 return 0x1U << 4U; 4246} 4247static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank1_m(void) 4248{ 4249 return 0x1U << 5U; 4250} 4251static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_total_counter_overflow_v(u32 r) 4252{ 4253 return (r >> 8U) & 0x1U; 4254} 4255static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) 4256{ 4257 return (r >> 10U) & 0x1U; 4258} 4259static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_reset_task_f(void) 4260{ 4261 return 0x40000000U; 4262} 4263static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r(void) 4264{ 4265 return 0x0050104cU; 4266} 4267static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s(void) 4268{ 4269 return 16U; 4270} 4271static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_v(u32 r) 4272{ 4273 return (r >> 0U) & 0xffffU; 4274} 4275static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r(void) 4276{ 4277 return 0x00501054U; 4278} 4279static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s(void) 4280{ 4281 return 16U; 4282} 4283static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_v(u32 r) 4284{ 4285 return (r >> 0U) & 0xffffU; 4286} 4287static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) 4288{ 4289 return 0x00504508U; 4290} 4291static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) 4292{ 4293 return (r >> 0U) & 0x1U; 4294} 4295static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) 4296{ 4297 return 0x00000001U; 4298} 4299static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) 4300{ 4301 return (r >> 1U) & 0x1U; 4302} 4303static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) 4304{ 4305 return 0x00000001U; 4306} 4307static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void) 4308{ 4309 return 0x1U << 4U; 4310} 4311static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void) 4312{ 4313 return 0x10U; 4314} 4315static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void) 4316{ 4317 return 0x00504704U; 4318} 4319static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(void) 4320{ 4321 return 0x1U << 0U; 4322} 4323static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(u32 r) 4324{ 4325 return (r >> 0U) & 0x1U; 4326} 4327static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v(void) 4328{ 4329 return 0x00000001U; 4330} 4331static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(void) 4332{ 4333 return 0x1U; 4334} 4335static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_v(void) 4336{ 4337 return 0x00000000U; 4338} 4339static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(void) 4340{ 4341 return 0x0U; 4342} 4343static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(void) 4344{ 4345 return 0x1U << 31U; 4346} 4347static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(void) 4348{ 4349 return 0x80000000U; 4350} 4351static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f(void) 4352{ 4353 return 0x0U; 4354} 4355static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(void) 4356{ 4357 return 0x1U << 3U; 4358} 4359static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f(void) 4360{ 4361 return 0x8U; 4362} 4363static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f(void) 4364{ 4365 return 0x0U; 4366} 4367static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(void) 4368{ 4369 return 0x40000000U; 4370} 4371static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r(void) 4372{ 4373 return 0x00504708U; 4374} 4375static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r(void) 4376{ 4377 return 0x0050470cU; 4378} 4379static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r(void) 4380{ 4381 return 0x00504710U; 4382} 4383static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r(void) 4384{ 4385 return 0x00504714U; 4386} 4387static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r(void) 4388{ 4389 return 0x00504718U; 4390} 4391static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r(void) 4392{ 4393 return 0x0050471cU; 4394} 4395static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(void) 4396{ 4397 return 0x00419e90U; 4398} 4399static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r(void) 4400{ 4401 return 0x00419e94U; 4402} 4403static inline u32 gr_gpcs_tpcs_sms_dbgr_status0_r(void) 4404{ 4405 return 0x00419e80U; 4406} 4407static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_r(void) 4408{ 4409 return 0x00504700U; 4410} 4411static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_sm_in_trap_mode_v(u32 r) 4412{ 4413 return (r >> 0U) & 0x1U; 4414} 4415static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(u32 r) 4416{ 4417 return (r >> 4U) & 0x1U; 4418} 4419static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v(void) 4420{ 4421 return 0x00000001U; 4422} 4423static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_r(void) 4424{ 4425 return 0x00504730U; 4426} 4427static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(u32 r) 4428{ 4429 return (r >> 0U) & 0xffffU; 4430} 4431static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v(void) 4432{ 4433 return 0x00000000U; 4434} 4435static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_f(void) 4436{ 4437 return 0x0U; 4438} 4439static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_stack_error_f(void) 4440{ 4441 return 0x1U; 4442} 4443static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_api_stack_error_f(void) 4444{ 4445 return 0x2U; 4446} 4447static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_pc_wrap_f(void) 4448{ 4449 return 0x4U; 4450} 4451static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_misaligned_pc_f(void) 4452{ 4453 return 0x5U; 4454} 4455static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_pc_overflow_f(void) 4456{ 4457 return 0x6U; 4458} 4459static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_misaligned_reg_f(void) 4460{ 4461 return 0x8U; 4462} 4463static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_illegal_instr_encoding_f(void) 4464{ 4465 return 0x9U; 4466} 4467static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_illegal_instr_param_f(void) 4468{ 4469 return 0xbU; 4470} 4471static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_oor_reg_f(void) 4472{ 4473 return 0xdU; 4474} 4475static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_oor_addr_f(void) 4476{ 4477 return 0xeU; 4478} 4479static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_misaligned_addr_f(void) 4480{ 4481 return 0xfU; 4482} 4483static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_invalid_addr_space_f(void) 4484{ 4485 return 0x10U; 4486} 4487static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_invalid_const_addr_ldc_f(void) 4488{ 4489 return 0x12U; 4490} 4491static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_stack_overflow_f(void) 4492{ 4493 return 0x16U; 4494} 4495static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_mmu_fault_f(void) 4496{ 4497 return 0x17U; 4498} 4499static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_tex_format_f(void) 4500{ 4501 return 0x18U; 4502} 4503static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_tex_layout_f(void) 4504{ 4505 return 0x19U; 4506} 4507static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_mmu_nack_f(void) 4508{ 4509 return 0x20U; 4510} 4511static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m(void) 4512{ 4513 return 0xffU << 16U; 4514} 4515static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(void) 4516{ 4517 return 0xfU << 24U; 4518} 4519static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f(void) 4520{ 4521 return 0x0U; 4522} 4523static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r(void) 4524{ 4525 return 0x0050460cU; 4526} 4527static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(u32 r) 4528{ 4529 return (r >> 0U) & 0x1U; 4530} 4531static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(u32 r) 4532{ 4533 return (r >> 1U) & 0x1U; 4534} 4535static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r(void) 4536{ 4537 return 0x00504738U; 4538} 4539static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_hi_r(void) 4540{ 4541 return 0x0050473cU; 4542} 4543static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) 4544{ 4545 return 0x005043a0U; 4546} 4547static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) 4548{ 4549 return 0x00419ba0U; 4550} 4551static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) 4552{ 4553 return 0x1U << 4U; 4554} 4555static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) 4556{ 4557 return (v & 0x1U) << 4U; 4558} 4559static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) 4560{ 4561 return 0x005043b0U; 4562} 4563static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) 4564{ 4565 return 0x00419bb0U; 4566} 4567static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) 4568{ 4569 return 0x1U << 0U; 4570} 4571static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) 4572{ 4573 return (v & 0x1U) << 0U; 4574} 4575static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) 4576{ 4577 return 0x0041be08U; 4578} 4579static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) 4580{ 4581 return 0x4U; 4582} 4583static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i) 4584{ 4585 return 0x0041bf00U + i*4U; 4586} 4587static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) 4588{ 4589 return 0x0041bfd0U; 4590} 4591static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) 4592{ 4593 return (v & 0xffU) << 0U; 4594} 4595static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) 4596{ 4597 return (v & 0xffU) << 8U; 4598} 4599static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) 4600{ 4601 return (v & 0x1fU) << 16U; 4602} 4603static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) 4604{ 4605 return (v & 0x7U) << 21U; 4606} 4607static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) 4608{ 4609 return 0x0041bfd4U; 4610} 4611static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) 4612{ 4613 return (v & 0xffffffU) << 0U; 4614} 4615static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i) 4616{ 4617 return 0x0041bfb0U + i*4U; 4618} 4619static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void) 4620{ 4621 return 0x00000005U; 4622} 4623static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(u32 v) 4624{ 4625 return (v & 0xffU) << 0U; 4626} 4627static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(u32 v) 4628{ 4629 return (v & 0xffU) << 8U; 4630} 4631static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(u32 v) 4632{ 4633 return (v & 0xffU) << 16U; 4634} 4635static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(u32 v) 4636{ 4637 return (v & 0xffU) << 24U; 4638} 4639static inline u32 gr_bes_zrop_settings_r(void) 4640{ 4641 return 0x00408850U; 4642} 4643static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) 4644{ 4645 return (v & 0xfU) << 0U; 4646} 4647static inline u32 gr_be0_crop_debug3_r(void) 4648{ 4649 return 0x00410108U; 4650} 4651static inline u32 gr_bes_crop_debug3_r(void) 4652{ 4653 return 0x00408908U; 4654} 4655static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) 4656{ 4657 return 0x1U << 31U; 4658} 4659static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void) 4660{ 4661 return 0x1U << 1U; 4662} 4663static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void) 4664{ 4665 return 0x0U; 4666} 4667static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void) 4668{ 4669 return 0x2U; 4670} 4671static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void) 4672{ 4673 return 0x1U << 2U; 4674} 4675static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void) 4676{ 4677 return 0x0U; 4678} 4679static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void) 4680{ 4681 return 0x4U; 4682} 4683static inline u32 gr_bes_crop_debug4_r(void) 4684{ 4685 return 0x0040894cU; 4686} 4687static inline u32 gr_bes_crop_debug4_clamp_fp_blend_m(void) 4688{ 4689 return 0x1U << 18U; 4690} 4691static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_inf_f(void) 4692{ 4693 return 0x0U; 4694} 4695static inline u32 gr_bes_crop_debug4_clamp_fp_blend_to_maxval_f(void) 4696{ 4697 return 0x40000U; 4698} 4699static inline u32 gr_bes_crop_settings_r(void) 4700{ 4701 return 0x00408958U; 4702} 4703static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) 4704{ 4705 return (v & 0xfU) << 0U; 4706} 4707static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) 4708{ 4709 return 0x00000020U; 4710} 4711static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) 4712{ 4713 return 0x00000020U; 4714} 4715static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) 4716{ 4717 return 0x000000c0U; 4718} 4719static inline u32 gr_zcull_subregion_qty_v(void) 4720{ 4721 return 0x00000010U; 4722} 4723static inline u32 gr_gpcs_tpcs_tex_in_dbg_r(void) 4724{ 4725 return 0x00419a00U; 4726} 4727static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(u32 v) 4728{ 4729 return (v & 0x1U) << 19U; 4730} 4731static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(void) 4732{ 4733 return 0x1U << 19U; 4734} 4735static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_r(void) 4736{ 4737 return 0x00419bf0U; 4738} 4739static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(u32 v) 4740{ 4741 return (v & 0x1U) << 5U; 4742} 4743static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(void) 4744{ 4745 return 0x1U << 5U; 4746} 4747static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(u32 v) 4748{ 4749 return (v & 0x1U) << 10U; 4750} 4751static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(void) 4752{ 4753 return 0x1U << 10U; 4754} 4755static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_m(void) 4756{ 4757 return 0x1U << 28U; 4758} 4759static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_disable_f(void) 4760{ 4761 return 0x0U; 4762} 4763static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_enable_f(void) 4764{ 4765 return 0x10000000U; 4766} 4767static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel0_r(void) 4768{ 4769 return 0x00584200U; 4770} 4771static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel1_r(void) 4772{ 4773 return 0x00584204U; 4774} 4775static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r(void) 4776{ 4777 return 0x00584208U; 4778} 4779static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control1_r(void) 4780{ 4781 return 0x00584210U; 4782} 4783static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control2_r(void) 4784{ 4785 return 0x00584214U; 4786} 4787static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control3_r(void) 4788{ 4789 return 0x00584218U; 4790} 4791static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control4_r(void) 4792{ 4793 return 0x0058421cU; 4794} 4795static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r(void) 4796{ 4797 return 0x0058420cU; 4798} 4799static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter0_control_r(void) 4800{ 4801 return 0x00584220U; 4802} 4803static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter1_control_r(void) 4804{ 4805 return 0x00584224U; 4806} 4807static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter2_control_r(void) 4808{ 4809 return 0x00584228U; 4810} 4811static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter3_control_r(void) 4812{ 4813 return 0x0058422cU; 4814} 4815static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter4_control_r(void) 4816{ 4817 return 0x00584230U; 4818} 4819static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter5_control_r(void) 4820{ 4821 return 0x00584234U; 4822} 4823static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter6_control_r(void) 4824{ 4825 return 0x00584238U; 4826} 4827static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter7_control_r(void) 4828{ 4829 return 0x0058423cU; 4830} 4831static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter_status_s0_r(void) 4832{ 4833 return 0x00584600U; 4834} 4835static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter_status_s1_r(void) 4836{ 4837 return 0x00584604U; 4838} 4839static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s0_r(void) 4840{ 4841 return 0x00584624U; 4842} 4843static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s0_r(void) 4844{ 4845 return 0x00584628U; 4846} 4847static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s0_r(void) 4848{ 4849 return 0x0058462cU; 4850} 4851static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s0_r(void) 4852{ 4853 return 0x00584630U; 4854} 4855static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s1_r(void) 4856{ 4857 return 0x00584634U; 4858} 4859static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s1_r(void) 4860{ 4861 return 0x00584638U; 4862} 4863static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s1_r(void) 4864{ 4865 return 0x0058463cU; 4866} 4867static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s1_r(void) 4868{ 4869 return 0x00584640U; 4870} 4871static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s2_r(void) 4872{ 4873 return 0x00584644U; 4874} 4875static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s2_r(void) 4876{ 4877 return 0x00584648U; 4878} 4879static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s2_r(void) 4880{ 4881 return 0x0058464cU; 4882} 4883static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s2_r(void) 4884{ 4885 return 0x00584650U; 4886} 4887static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s3_r(void) 4888{ 4889 return 0x00584654U; 4890} 4891static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s3_r(void) 4892{ 4893 return 0x00584658U; 4894} 4895static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s3_r(void) 4896{ 4897 return 0x0058465cU; 4898} 4899static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s3_r(void) 4900{ 4901 return 0x00584660U; 4902} 4903static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter4_r(void) 4904{ 4905 return 0x00584614U; 4906} 4907static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter5_r(void) 4908{ 4909 return 0x00584618U; 4910} 4911static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter6_r(void) 4912{ 4913 return 0x0058461cU; 4914} 4915static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter7_r(void) 4916{ 4917 return 0x00584620U; 4918} 4919static inline u32 gr_fe_pwr_mode_r(void) 4920{ 4921 return 0x00404170U; 4922} 4923static inline u32 gr_fe_pwr_mode_mode_auto_f(void) 4924{ 4925 return 0x0U; 4926} 4927static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) 4928{ 4929 return 0x2U; 4930} 4931static inline u32 gr_fe_pwr_mode_req_v(u32 r) 4932{ 4933 return (r >> 4U) & 0x1U; 4934} 4935static inline u32 gr_fe_pwr_mode_req_send_f(void) 4936{ 4937 return 0x10U; 4938} 4939static inline u32 gr_fe_pwr_mode_req_done_v(void) 4940{ 4941 return 0x00000000U; 4942} 4943static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) 4944{ 4945 return 0x00418880U; 4946} 4947static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) 4948{ 4949 return 0x1U << 0U; 4950} 4951static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) 4952{ 4953 return 0x1U << 11U; 4954} 4955static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) 4956{ 4957 return 0x1U << 1U; 4958} 4959static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) 4960{ 4961 return 0x1U << 2U; 4962} 4963static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) 4964{ 4965 return 0x3U << 3U; 4966} 4967static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) 4968{ 4969 return 0x3U << 5U; 4970} 4971static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) 4972{ 4973 return 0x3U << 28U; 4974} 4975static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) 4976{ 4977 return 0x1U << 30U; 4978} 4979static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) 4980{ 4981 return 0x1U << 31U; 4982} 4983static inline u32 gr_gpcs_pri_mmu_ctrl_atomic_capability_mode_m(void) 4984{ 4985 return 0x3U << 24U; 4986} 4987static inline u32 gr_gpcs_pri_mmu_ctrl_atomic_capability_sys_ncoh_mode_m(void) 4988{ 4989 return 0x1U << 27U; 4990} 4991static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) 4992{ 4993 return 0x00418890U; 4994} 4995static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) 4996{ 4997 return 0x00418894U; 4998} 4999static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) 5000{ 5001 return 0x004188b0U; 5002} 5003static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) 5004{ 5005 return (r >> 16U) & 0x1U; 5006} 5007static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) 5008{ 5009 return 0x00000001U; 5010} 5011static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) 5012{ 5013 return 0x004188b4U; 5014} 5015static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) 5016{ 5017 return 0x004188b8U; 5018} 5019static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) 5020{ 5021 return 0x004188acU; 5022} 5023static inline u32 gr_gpcs_tpcs_sms_dbgr_control0_r(void) 5024{ 5025 return 0x00419e84U; 5026} 5027static inline u32 gr_fe_gfxp_wfi_timeout_r(void) 5028{ 5029 return 0x004041c0U; 5030} 5031static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v) 5032{ 5033 return (v & 0xffffffffU) << 0U; 5034} 5035static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void) 5036{ 5037 return 0x0U; 5038} 5039static inline u32 gr_fe_gfxp_wfi_timeout_count_init_f(void) 5040{ 5041 return 0x800U; 5042} 5043static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void) 5044{ 5045 return 0x00419bd8U; 5046} 5047static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v) 5048{ 5049 return (v & 0x7U) << 8U; 5050} 5051static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void) 5052{ 5053 return 0x7U << 8U; 5054} 5055static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void) 5056{ 5057 return 0x100U; 5058} 5059static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void) 5060{ 5061 return 0x00419ba4U; 5062} 5063static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void) 5064{ 5065 return 0x3U << 11U; 5066} 5067static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void) 5068{ 5069 return 0x1000U; 5070} 5071static inline u32 gr_gpcs_tc_debug0_r(void) 5072{ 5073 return 0x00418708U; 5074} 5075static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v) 5076{ 5077 return (v & 0x1ffU) << 0U; 5078} 5079static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void) 5080{ 5081 return 0x1ffU << 0U; 5082} 5083static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_r(void) 5084{ 5085 return 0x00500324U; 5086} 5087static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_f(u32 v) 5088{ 5089 return (v & 0x1U) << 0U; 5090} 5091static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m(void) 5092{ 5093 return 0x1U << 0U; 5094} 5095static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_f(u32 v) 5096{ 5097 return (v & 0x1U) << 1U; 5098} 5099static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m(void) 5100{ 5101 return 0x1U << 1U; 5102} 5103static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_r(void) 5104{ 5105 return 0x00500314U; 5106} 5107static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_f(u32 v) 5108{ 5109 return (v & 0x1U) << 0U; 5110} 5111static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m(void) 5112{ 5113 return 0x1U << 0U; 5114} 5115static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_f(u32 v) 5116{ 5117 return (v & 0x1U) << 2U; 5118} 5119static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m(void) 5120{ 5121 return 0x1U << 2U; 5122} 5123static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_f(u32 v) 5124{ 5125 return (v & 0x1U) << 1U; 5126} 5127static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m(void) 5128{ 5129 return 0x1U << 1U; 5130} 5131static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_f(u32 v) 5132{ 5133 return (v & 0x1U) << 3U; 5134} 5135static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m(void) 5136{ 5137 return 0x1U << 3U; 5138} 5139static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) 5140{ 5141 return (v & 0x1U) << 18U; 5142} 5143static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_m(void) 5144{ 5145 return 0x1U << 18U; 5146} 5147static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_f(u32 v) 5148{ 5149 return (v & 0x1U) << 16U; 5150} 5151static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_m(void) 5152{ 5153 return 0x1U << 16U; 5154} 5155static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) 5156{ 5157 return (v & 0x1U) << 19U; 5158} 5159static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_unique_counter_overflow_m(void) 5160{ 5161 return 0x1U << 19U; 5162} 5163static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) 5164{ 5165 return (v & 0x1U) << 17U; 5166} 5167static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_unique_counter_overflow_m(void) 5168{ 5169 return 0x1U << 17U; 5170} 5171static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_reset_f(u32 v) 5172{ 5173 return (v & 0x1U) << 30U; 5174} 5175static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f(void) 5176{ 5177 return 0x40000000U; 5178} 5179static inline u32 gr_gpc0_mmu_l1tlb_ecc_address_r(void) 5180{ 5181 return 0x00500320U; 5182} 5183static inline u32 gr_gpc0_mmu_l1tlb_ecc_address_index_f(u32 v) 5184{ 5185 return (v & 0xffffffffU) << 0U; 5186} 5187static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r(void) 5188{ 5189 return 0x00500318U; 5190} 5191static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s(void) 5192{ 5193 return 16U; 5194} 5195static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_f(u32 v) 5196{ 5197 return (v & 0xffffU) << 0U; 5198} 5199static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_m(void) 5200{ 5201 return 0xffffU << 0U; 5202} 5203static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_v(u32 r) 5204{ 5205 return (r >> 0U) & 0xffffU; 5206} 5207static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_s(void) 5208{ 5209 return 16U; 5210} 5211static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_f(u32 v) 5212{ 5213 return (v & 0xffffU) << 16U; 5214} 5215static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_m(void) 5216{ 5217 return 0xffffU << 16U; 5218} 5219static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_v(u32 r) 5220{ 5221 return (r >> 16U) & 0xffffU; 5222} 5223static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r(void) 5224{ 5225 return 0x0050031cU; 5226} 5227static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s(void) 5228{ 5229 return 16U; 5230} 5231static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_f(u32 v) 5232{ 5233 return (v & 0xffffU) << 0U; 5234} 5235static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_m(void) 5236{ 5237 return 0xffffU << 0U; 5238} 5239static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_v(u32 r) 5240{ 5241 return (r >> 0U) & 0xffffU; 5242} 5243static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_s(void) 5244{ 5245 return 16U; 5246} 5247static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_f(u32 v) 5248{ 5249 return (v & 0xffffU) << 16U; 5250} 5251static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_m(void) 5252{ 5253 return 0xffffU << 16U; 5254} 5255static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_v(u32 r) 5256{ 5257 return (r >> 16U) & 0xffffU; 5258} 5259static inline u32 gr_gpc0_gpccs_hww_esr_r(void) 5260{ 5261 return 0x00502c98U; 5262} 5263static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_f(u32 v) 5264{ 5265 return (v & 0x1U) << 0U; 5266} 5267static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_m(void) 5268{ 5269 return 0x1U << 0U; 5270} 5271static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_pending_f(void) 5272{ 5273 return 0x1U; 5274} 5275static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_f(u32 v) 5276{ 5277 return (v & 0x1U) << 1U; 5278} 5279static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m(void) 5280{ 5281 return 0x1U << 1U; 5282} 5283static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_pending_f(void) 5284{ 5285 return 0x2U; 5286} 5287static inline u32 gr_gpc0_gpccs_falcon_ecc_status_r(void) 5288{ 5289 return 0x00502678U; 5290} 5291static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_f(u32 v) 5292{ 5293 return (v & 0x1U) << 0U; 5294} 5295static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m(void) 5296{ 5297 return 0x1U << 0U; 5298} 5299static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_pending_f(void) 5300{ 5301 return 0x1U; 5302} 5303static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_f(u32 v) 5304{ 5305 return (v & 0x1U) << 1U; 5306} 5307static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m(void) 5308{ 5309 return 0x1U << 1U; 5310} 5311static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_pending_f(void) 5312{ 5313 return 0x2U; 5314} 5315static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_f(u32 v) 5316{ 5317 return (v & 0x1U) << 4U; 5318} 5319static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m(void) 5320{ 5321 return 0x1U << 4U; 5322} 5323static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_pending_f(void) 5324{ 5325 return 0x10U; 5326} 5327static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_f(u32 v) 5328{ 5329 return (v & 0x1U) << 5U; 5330} 5331static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m(void) 5332{ 5333 return 0x1U << 5U; 5334} 5335static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_pending_f(void) 5336{ 5337 return 0x20U; 5338} 5339static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) 5340{ 5341 return (v & 0x1U) << 10U; 5342} 5343static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void) 5344{ 5345 return 0x1U << 10U; 5346} 5347static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_pending_f(void) 5348{ 5349 return 0x400U; 5350} 5351static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v) 5352{ 5353 return (v & 0x1U) << 8U; 5354} 5355static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_m(void) 5356{ 5357 return 0x1U << 8U; 5358} 5359static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_pending_f(void) 5360{ 5361 return 0x100U; 5362} 5363static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) 5364{ 5365 return (v & 0x1U) << 11U; 5366} 5367static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_m(void) 5368{ 5369 return 0x1U << 11U; 5370} 5371static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_pending_f(void) 5372{ 5373 return 0x800U; 5374} 5375static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) 5376{ 5377 return (v & 0x1U) << 9U; 5378} 5379static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_m(void) 5380{ 5381 return 0x1U << 9U; 5382} 5383static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_pending_f(void) 5384{ 5385 return 0x200U; 5386} 5387static inline u32 gr_gpc0_gpccs_falcon_ecc_status_reset_f(u32 v) 5388{ 5389 return (v & 0x1U) << 31U; 5390} 5391static inline u32 gr_gpc0_gpccs_falcon_ecc_status_reset_task_f(void) 5392{ 5393 return 0x80000000U; 5394} 5395static inline u32 gr_gpc0_gpccs_falcon_ecc_address_r(void) 5396{ 5397 return 0x00502684U; 5398} 5399static inline u32 gr_gpc0_gpccs_falcon_ecc_address_index_f(u32 v) 5400{ 5401 return (v & 0x7fffffU) << 0U; 5402} 5403static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_s(void) 5404{ 5405 return 20U; 5406} 5407static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_f(u32 v) 5408{ 5409 return (v & 0xfffffU) << 0U; 5410} 5411static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_m(void) 5412{ 5413 return 0xfffffU << 0U; 5414} 5415static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_v(u32 r) 5416{ 5417 return (r >> 0U) & 0xfffffU; 5418} 5419static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r(void) 5420{ 5421 return 0x0050267cU; 5422} 5423static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_s(void) 5424{ 5425 return 16U; 5426} 5427static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_f(u32 v) 5428{ 5429 return (v & 0xffffU) << 0U; 5430} 5431static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_m(void) 5432{ 5433 return 0xffffU << 0U; 5434} 5435static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_v(u32 r) 5436{ 5437 return (r >> 0U) & 0xffffU; 5438} 5439static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_s(void) 5440{ 5441 return 16U; 5442} 5443static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_f(u32 v) 5444{ 5445 return (v & 0xffffU) << 16U; 5446} 5447static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_m(void) 5448{ 5449 return 0xffffU << 16U; 5450} 5451static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_v(u32 r) 5452{ 5453 return (r >> 16U) & 0xffffU; 5454} 5455static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r(void) 5456{ 5457 return 0x00502680U; 5458} 5459static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_f(u32 v) 5460{ 5461 return (v & 0xffffU) << 0U; 5462} 5463static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_m(void) 5464{ 5465 return 0xffffU << 0U; 5466} 5467static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_v(u32 r) 5468{ 5469 return (r >> 0U) & 0xffffU; 5470} 5471static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_s(void) 5472{ 5473 return 16U; 5474} 5475static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v) 5476{ 5477 return (v & 0xffffU) << 16U; 5478} 5479static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_m(void) 5480{ 5481 return 0xffffU << 16U; 5482} 5483static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r) 5484{ 5485 return (r >> 16U) & 0xffffU; 5486} 5487static inline u32 gr_fecs_falcon_ecc_status_r(void) 5488{ 5489 return 0x00409678U; 5490} 5491static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_f(u32 v) 5492{ 5493 return (v & 0x1U) << 0U; 5494} 5495static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_m(void) 5496{ 5497 return 0x1U << 0U; 5498} 5499static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_pending_f(void) 5500{ 5501 return 0x1U; 5502} 5503static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_f(u32 v) 5504{ 5505 return (v & 0x1U) << 1U; 5506} 5507static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_m(void) 5508{ 5509 return 0x1U << 1U; 5510} 5511static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_pending_f(void) 5512{ 5513 return 0x2U; 5514} 5515static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_f(u32 v) 5516{ 5517 return (v & 0x1U) << 4U; 5518} 5519static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_m(void) 5520{ 5521 return 0x1U << 4U; 5522} 5523static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_pending_f(void) 5524{ 5525 return 0x10U; 5526} 5527static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_f(u32 v) 5528{ 5529 return (v & 0x1U) << 5U; 5530} 5531static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m(void) 5532{ 5533 return 0x1U << 5U; 5534} 5535static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_pending_f(void) 5536{ 5537 return 0x20U; 5538} 5539static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) 5540{ 5541 return (v & 0x1U) << 10U; 5542} 5543static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void) 5544{ 5545 return 0x1U << 10U; 5546} 5547static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_pending_f(void) 5548{ 5549 return 0x400U; 5550} 5551static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v) 5552{ 5553 return (v & 0x1U) << 8U; 5554} 5555static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_m(void) 5556{ 5557 return 0x1U << 8U; 5558} 5559static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_pending_f(void) 5560{ 5561 return 0x100U; 5562} 5563static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) 5564{ 5565 return (v & 0x1U) << 11U; 5566} 5567static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_m(void) 5568{ 5569 return 0x1U << 11U; 5570} 5571static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_pending_f(void) 5572{ 5573 return 0x800U; 5574} 5575static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) 5576{ 5577 return (v & 0x1U) << 9U; 5578} 5579static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_m(void) 5580{ 5581 return 0x1U << 9U; 5582} 5583static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_pending_f(void) 5584{ 5585 return 0x200U; 5586} 5587static inline u32 gr_fecs_falcon_ecc_status_reset_f(u32 v) 5588{ 5589 return (v & 0x1U) << 31U; 5590} 5591static inline u32 gr_fecs_falcon_ecc_status_reset_task_f(void) 5592{ 5593 return 0x80000000U; 5594} 5595static inline u32 gr_fecs_falcon_ecc_address_r(void) 5596{ 5597 return 0x00409684U; 5598} 5599static inline u32 gr_fecs_falcon_ecc_address_index_f(u32 v) 5600{ 5601 return (v & 0x7fffffU) << 0U; 5602} 5603static inline u32 gr_fecs_falcon_ecc_address_row_address_s(void) 5604{ 5605 return 20U; 5606} 5607static inline u32 gr_fecs_falcon_ecc_address_row_address_f(u32 v) 5608{ 5609 return (v & 0xfffffU) << 0U; 5610} 5611static inline u32 gr_fecs_falcon_ecc_address_row_address_m(void) 5612{ 5613 return 0xfffffU << 0U; 5614} 5615static inline u32 gr_fecs_falcon_ecc_address_row_address_v(u32 r) 5616{ 5617 return (r >> 0U) & 0xfffffU; 5618} 5619static inline u32 gr_fecs_falcon_ecc_corrected_err_count_r(void) 5620{ 5621 return 0x0040967cU; 5622} 5623static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_s(void) 5624{ 5625 return 16U; 5626} 5627static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_f(u32 v) 5628{ 5629 return (v & 0xffffU) << 0U; 5630} 5631static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_m(void) 5632{ 5633 return 0xffffU << 0U; 5634} 5635static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_v(u32 r) 5636{ 5637 return (r >> 0U) & 0xffffU; 5638} 5639static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_s(void) 5640{ 5641 return 16U; 5642} 5643static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_f(u32 v) 5644{ 5645 return (v & 0xffffU) << 16U; 5646} 5647static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_m(void) 5648{ 5649 return 0xffffU << 16U; 5650} 5651static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_v(u32 r) 5652{ 5653 return (r >> 16U) & 0xffffU; 5654} 5655static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_r(void) 5656{ 5657 return 0x00409680U; 5658} 5659static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_f(u32 v) 5660{ 5661 return (v & 0xffffU) << 0U; 5662} 5663static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_m(void) 5664{ 5665 return 0xffffU << 0U; 5666} 5667static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_v(u32 r) 5668{ 5669 return (r >> 0U) & 0xffffU; 5670} 5671static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_s(void) 5672{ 5673 return 16U; 5674} 5675static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v) 5676{ 5677 return (v & 0xffffU) << 16U; 5678} 5679static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_m(void) 5680{ 5681 return 0xffffU << 16U; 5682} 5683static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r) 5684{ 5685 return (r >> 16U) & 0xffffU; 5686} 5687static inline u32 gr_debug_2_r(void) 5688{ 5689 return 0x00400088U; 5690} 5691static inline u32 gr_debug_2_gfxp_wfi_timeout_unit_m(void) 5692{ 5693 return 0x1U << 27U; 5694} 5695static inline u32 gr_debug_2_gfxp_wfi_timeout_unit_usec_f(void) 5696{ 5697 return 0x0U; 5698} 5699static inline u32 gr_debug_2_gfxp_wfi_timeout_unit_sysclk_f(void) 5700{ 5701 return 0x8000000U; 5702} 5703#endif
diff --git a/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h b/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h
deleted file mode 100644
index 342f90d..0000000
--- a/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h
+++ /dev/null
@@ -1,815 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ltc_gv11b_h_ 57#define _hw_ltc_gv11b_h_ 58 59static inline u32 ltc_pltcg_base_v(void) 60{ 61 return 0x00140000U; 62} 63static inline u32 ltc_pltcg_extent_v(void) 64{ 65 return 0x0017ffffU; 66} 67static inline u32 ltc_ltc0_ltss_v(void) 68{ 69 return 0x00140200U; 70} 71static inline u32 ltc_ltc0_lts0_v(void) 72{ 73 return 0x00140400U; 74} 75static inline u32 ltc_ltcs_ltss_v(void) 76{ 77 return 0x0017e200U; 78} 79static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) 80{ 81 return 0x0014046cU; 82} 83static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) 84{ 85 return 0x00140518U; 86} 87static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) 88{ 89 return 0x0017e318U; 90} 91static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) 92{ 93 return 0x1U << 15U; 94} 95static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) 96{ 97 return 0x00140494U; 98} 99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) 100{ 101 return (r >> 0U) & 0xffffU; 102} 103static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) 104{ 105 return (r >> 16U) & 0x3U; 106} 107static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) 108{ 109 return 0x00000000U; 110} 111static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) 120{ 121 return 0x0017e26cU; 122} 123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) 124{ 125 return 0x1U; 126} 127static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) 128{ 129 return 0x2U; 130} 131static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) 132{ 133 return (r >> 2U) & 0x1U; 134} 135static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) 136{ 137 return 0x00000001U; 138} 139static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) 140{ 141 return 0x4U; 142} 143static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) 144{ 145 return 0x0014046cU; 146} 147static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) 148{ 149 return 0x0017e270U; 150} 151static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) 152{ 153 return (v & 0x3ffffU) << 0U; 154} 155static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) 156{ 157 return 0x0017e274U; 158} 159static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) 160{ 161 return (v & 0x3ffffU) << 0U; 162} 163static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) 164{ 165 return 0x0003ffffU; 166} 167static inline u32 ltc_ltcs_ltss_cbc_base_r(void) 168{ 169 return 0x0017e278U; 170} 171static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) 172{ 173 return 0x0000000bU; 174} 175static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) 176{ 177 return (r >> 0U) & 0x3ffffffU; 178} 179static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) 180{ 181 return 0x0017e27cU; 182} 183static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r) 184{ 185 return (r >> 0U) & 0x1fU; 186} 187static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v) 188{ 189 return (v & 0x1U) << 24U; 190} 191static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r) 192{ 193 return (r >> 24U) & 0x1U; 194} 195static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v) 196{ 197 return (v & 0x1U) << 25U; 198} 199static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r) 200{ 201 return (r >> 25U) & 0x1U; 202} 203static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) 204{ 205 return 0x0017e000U; 206} 207static inline u32 ltc_ltcs_ltss_cbc_param_r(void) 208{ 209 return 0x0017e280U; 210} 211static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) 212{ 213 return (r >> 0U) & 0xffffU; 214} 215static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) 216{ 217 return (r >> 24U) & 0xfU; 218} 219static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) 220{ 221 return (r >> 28U) & 0xfU; 222} 223static inline u32 ltc_ltcs_ltss_cbc_param2_r(void) 224{ 225 return 0x0017e3f4U; 226} 227static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r) 228{ 229 return (r >> 0U) & 0xffffU; 230} 231static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) 232{ 233 return 0x0017e2acU; 234} 235static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) 236{ 237 return (v & 0x1fU) << 16U; 238} 239static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) 240{ 241 return 0x0017e338U; 242} 243static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) 244{ 245 return (v & 0xfU) << 0U; 246} 247static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) 248{ 249 return 0x0017e33cU + i*4U; 250} 251static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) 252{ 253 return 0x00000004U; 254} 255static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) 256{ 257 return 0x0017e34cU; 258} 259static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) 260{ 261 return 32U; 262} 263static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) 264{ 265 return (v & 0xffffffffU) << 0U; 266} 267static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) 268{ 269 return 0xffffffffU << 0U; 270} 271static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) 272{ 273 return (r >> 0U) & 0xffffffffU; 274} 275static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void) 276{ 277 return 0x0017e204U; 278} 279static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void) 280{ 281 return 8U; 282} 283static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v) 284{ 285 return (v & 0xffU) << 0U; 286} 287static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void) 288{ 289 return 0xffU << 0U; 290} 291static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r) 292{ 293 return (r >> 0U) & 0xffU; 294} 295static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) 296{ 297 return 0x0017e2b0U; 298} 299static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) 300{ 301 return 0x10000000U; 302} 303static inline u32 ltc_ltcs_ltss_g_elpg_r(void) 304{ 305 return 0x0017e214U; 306} 307static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) 308{ 309 return (r >> 0U) & 0x1U; 310} 311static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) 312{ 313 return 0x00000001U; 314} 315static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) 316{ 317 return 0x1U; 318} 319static inline u32 ltc_ltc0_ltss_g_elpg_r(void) 320{ 321 return 0x00140214U; 322} 323static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) 324{ 325 return (r >> 0U) & 0x1U; 326} 327static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) 328{ 329 return 0x00000001U; 330} 331static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) 332{ 333 return 0x1U; 334} 335static inline u32 ltc_ltc1_ltss_g_elpg_r(void) 336{ 337 return 0x00142214U; 338} 339static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) 340{ 341 return (r >> 0U) & 0x1U; 342} 343static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) 344{ 345 return 0x00000001U; 346} 347static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) 348{ 349 return 0x1U; 350} 351static inline u32 ltc_ltcs_ltss_intr_r(void) 352{ 353 return 0x0017e20cU; 354} 355static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void) 356{ 357 return 0x100U; 358} 359static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void) 360{ 361 return 0x200U; 362} 363static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) 364{ 365 return 0x1U << 20U; 366} 367static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_m(void) 368{ 369 return 0x1U << 21U; 370} 371static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f(void) 372{ 373 return 0x200000U; 374} 375static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f(void) 376{ 377 return 0x0U; 378} 379static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) 380{ 381 return 0x1U << 30U; 382} 383static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void) 384{ 385 return 0x1000000U; 386} 387static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void) 388{ 389 return 0x2000000U; 390} 391static inline u32 ltc_ltc0_lts0_intr_r(void) 392{ 393 return 0x0014040cU; 394} 395static inline u32 ltc_ltcs_ltss_intr3_r(void) 396{ 397 return 0x0017e388U; 398} 399static inline u32 ltc_ltcs_ltss_intr3_ecc_corrected_m(void) 400{ 401 return 0x1U << 7U; 402} 403static inline u32 ltc_ltcs_ltss_intr3_ecc_uncorrected_m(void) 404{ 405 return 0x1U << 8U; 406} 407static inline u32 ltc_ltc0_lts0_intr3_r(void) 408{ 409 return 0x00140588U; 410} 411static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_r(void) 412{ 413 return 0x001404f0U; 414} 415static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_f(u32 v) 416{ 417 return (v & 0x1U) << 1U; 418} 419static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m(void) 420{ 421 return 0x1U << 1U; 422} 423static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_f(u32 v) 424{ 425 return (v & 0x1U) << 3U; 426} 427static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m(void) 428{ 429 return 0x1U << 3U; 430} 431static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_f(u32 v) 432{ 433 return (v & 0x1U) << 5U; 434} 435static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m(void) 436{ 437 return 0x1U << 5U; 438} 439static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_f(u32 v) 440{ 441 return (v & 0x1U) << 0U; 442} 443static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m(void) 444{ 445 return 0x1U << 0U; 446} 447static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_f(u32 v) 448{ 449 return (v & 0x1U) << 2U; 450} 451static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m(void) 452{ 453 return 0x1U << 2U; 454} 455static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_f(u32 v) 456{ 457 return (v & 0x1U) << 4U; 458} 459static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m(void) 460{ 461 return 0x1U << 4U; 462} 463static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) 464{ 465 return (v & 0x1U) << 18U; 466} 467static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(void) 468{ 469 return 0x1U << 18U; 470} 471static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_f(u32 v) 472{ 473 return (v & 0x1U) << 16U; 474} 475static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m(void) 476{ 477 return 0x1U << 16U; 478} 479static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) 480{ 481 return (v & 0x1U) << 19U; 482} 483static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_unique_counter_overflow_m(void) 484{ 485 return 0x1U << 19U; 486} 487static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) 488{ 489 return (v & 0x1U) << 17U; 490} 491static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_unique_counter_overflow_m(void) 492{ 493 return 0x1U << 17U; 494} 495static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_reset_f(u32 v) 496{ 497 return (v & 0x1U) << 30U; 498} 499static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f(void) 500{ 501 return 0x40000000U; 502} 503static inline u32 ltc_ltc0_lts0_l2_cache_ecc_address_r(void) 504{ 505 return 0x001404fcU; 506} 507static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(void) 508{ 509 return 0x001404f4U; 510} 511static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s(void) 512{ 513 return 16U; 514} 515static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_f(u32 v) 516{ 517 return (v & 0xffffU) << 0U; 518} 519static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_m(void) 520{ 521 return 0xffffU << 0U; 522} 523static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(u32 r) 524{ 525 return (r >> 0U) & 0xffffU; 526} 527static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_s(void) 528{ 529 return 16U; 530} 531static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_f(u32 v) 532{ 533 return (v & 0xffffU) << 16U; 534} 535static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_m(void) 536{ 537 return 0xffffU << 16U; 538} 539static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_v(u32 r) 540{ 541 return (r >> 16U) & 0xffffU; 542} 543static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(void) 544{ 545 return 0x001404f8U; 546} 547static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s(void) 548{ 549 return 16U; 550} 551static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_f(u32 v) 552{ 553 return (v & 0xffffU) << 0U; 554} 555static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_m(void) 556{ 557 return 0xffffU << 0U; 558} 559static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(u32 r) 560{ 561 return (r >> 0U) & 0xffffU; 562} 563static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_s(void) 564{ 565 return 16U; 566} 567static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_f(u32 v) 568{ 569 return (v & 0xffffU) << 16U; 570} 571static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_m(void) 572{ 573 return 0xffffU << 16U; 574} 575static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_v(u32 r) 576{ 577 return (r >> 16U) & 0xffffU; 578} 579static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void) 580{ 581 return 0x0014051cU; 582} 583static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void) 584{ 585 return 0xffU << 0U; 586} 587static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r) 588{ 589 return (r >> 0U) & 0xffU; 590} 591static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void) 592{ 593 return 0xffU << 16U; 594} 595static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r) 596{ 597 return (r >> 16U) & 0xffU; 598} 599static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) 600{ 601 return 0x0017e2a0U; 602} 603static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) 604{ 605 return (r >> 0U) & 0x1U; 606} 607static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) 608{ 609 return 0x00000001U; 610} 611static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) 612{ 613 return 0x1U; 614} 615static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) 616{ 617 return (r >> 8U) & 0xfU; 618} 619static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) 620{ 621 return 0x00000003U; 622} 623static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) 624{ 625 return 0x300U; 626} 627static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) 628{ 629 return (r >> 28U) & 0x1U; 630} 631static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) 632{ 633 return 0x00000001U; 634} 635static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) 636{ 637 return 0x10000000U; 638} 639static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) 640{ 641 return (r >> 29U) & 0x1U; 642} 643static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) 644{ 645 return 0x00000001U; 646} 647static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) 648{ 649 return 0x20000000U; 650} 651static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) 652{ 653 return (r >> 30U) & 0x1U; 654} 655static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) 656{ 657 return 0x00000001U; 658} 659static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) 660{ 661 return 0x40000000U; 662} 663static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) 664{ 665 return 0x0017e2a4U; 666} 667static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) 668{ 669 return (r >> 0U) & 0x1U; 670} 671static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) 672{ 673 return 0x00000001U; 674} 675static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) 676{ 677 return 0x1U; 678} 679static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) 680{ 681 return (r >> 8U) & 0xfU; 682} 683static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) 684{ 685 return 0x00000003U; 686} 687static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) 688{ 689 return 0x300U; 690} 691static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) 692{ 693 return (r >> 16U) & 0x1U; 694} 695static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) 696{ 697 return 0x00000001U; 698} 699static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) 700{ 701 return 0x10000U; 702} 703static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) 704{ 705 return (r >> 28U) & 0x1U; 706} 707static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) 708{ 709 return 0x00000001U; 710} 711static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) 712{ 713 return 0x10000000U; 714} 715static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) 716{ 717 return (r >> 29U) & 0x1U; 718} 719static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) 720{ 721 return 0x00000001U; 722} 723static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) 724{ 725 return 0x20000000U; 726} 727static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) 728{ 729 return (r >> 30U) & 0x1U; 730} 731static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) 732{ 733 return 0x00000001U; 734} 735static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) 736{ 737 return 0x40000000U; 738} 739static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) 740{ 741 return 0x001402a0U; 742} 743static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) 744{ 745 return (r >> 0U) & 0x1U; 746} 747static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) 748{ 749 return 0x00000001U; 750} 751static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) 752{ 753 return 0x1U; 754} 755static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) 756{ 757 return 0x001402a4U; 758} 759static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) 760{ 761 return (r >> 0U) & 0x1U; 762} 763static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) 764{ 765 return 0x00000001U; 766} 767static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) 768{ 769 return 0x1U; 770} 771static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) 772{ 773 return 0x001422a0U; 774} 775static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) 776{ 777 return (r >> 0U) & 0x1U; 778} 779static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) 780{ 781 return 0x00000001U; 782} 783static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) 784{ 785 return 0x1U; 786} 787static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) 788{ 789 return 0x001422a4U; 790} 791static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) 792{ 793 return (r >> 0U) & 0x1U; 794} 795static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) 796{ 797 return 0x00000001U; 798} 799static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) 800{ 801 return 0x1U; 802} 803static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void) 804{ 805 return 0x0014058cU; 806} 807static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r) 808{ 809 return (r >> 0U) & 0xffffU; 810} 811static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r) 812{ 813 return (r >> 16U) & 0x1fU; 814} 815#endif
diff --git a/include/nvgpu/hw/gv11b/hw_mc_gv11b.h b/include/nvgpu/hw/gv11b/hw_mc_gv11b.h
deleted file mode 100644
index a1bf15b..0000000
--- a/include/nvgpu/hw/gv11b/hw_mc_gv11b.h
+++ /dev/null
@@ -1,231 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_mc_gv11b_h_ 57#define _hw_mc_gv11b_h_ 58 59static inline u32 mc_boot_0_r(void) 60{ 61 return 0x00000000U; 62} 63static inline u32 mc_boot_0_architecture_v(u32 r) 64{ 65 return (r >> 24U) & 0x1fU; 66} 67static inline u32 mc_boot_0_implementation_v(u32 r) 68{ 69 return (r >> 20U) & 0xfU; 70} 71static inline u32 mc_boot_0_major_revision_v(u32 r) 72{ 73 return (r >> 4U) & 0xfU; 74} 75static inline u32 mc_boot_0_minor_revision_v(u32 r) 76{ 77 return (r >> 0U) & 0xfU; 78} 79static inline u32 mc_intr_r(u32 i) 80{ 81 return 0x00000100U + i*4U; 82} 83static inline u32 mc_intr_pfifo_pending_f(void) 84{ 85 return 0x100U; 86} 87static inline u32 mc_intr_hub_pending_f(void) 88{ 89 return 0x200U; 90} 91static inline u32 mc_intr_pgraph_pending_f(void) 92{ 93 return 0x1000U; 94} 95static inline u32 mc_intr_pmu_pending_f(void) 96{ 97 return 0x1000000U; 98} 99static inline u32 mc_intr_ltc_pending_f(void) 100{ 101 return 0x2000000U; 102} 103static inline u32 mc_intr_priv_ring_pending_f(void) 104{ 105 return 0x40000000U; 106} 107static inline u32 mc_intr_pbus_pending_f(void) 108{ 109 return 0x10000000U; 110} 111static inline u32 mc_intr_en_r(u32 i) 112{ 113 return 0x00000140U + i*4U; 114} 115static inline u32 mc_intr_en_set_r(u32 i) 116{ 117 return 0x00000160U + i*4U; 118} 119static inline u32 mc_intr_en_clear_r(u32 i) 120{ 121 return 0x00000180U + i*4U; 122} 123static inline u32 mc_enable_r(void) 124{ 125 return 0x00000200U; 126} 127static inline u32 mc_enable_xbar_enabled_f(void) 128{ 129 return 0x4U; 130} 131static inline u32 mc_enable_l2_enabled_f(void) 132{ 133 return 0x8U; 134} 135static inline u32 mc_enable_pmedia_s(void) 136{ 137 return 1U; 138} 139static inline u32 mc_enable_pmedia_f(u32 v) 140{ 141 return (v & 0x1U) << 4U; 142} 143static inline u32 mc_enable_pmedia_m(void) 144{ 145 return 0x1U << 4U; 146} 147static inline u32 mc_enable_pmedia_v(u32 r) 148{ 149 return (r >> 4U) & 0x1U; 150} 151static inline u32 mc_enable_ce0_m(void) 152{ 153 return 0x1U << 6U; 154} 155static inline u32 mc_enable_pfifo_enabled_f(void) 156{ 157 return 0x100U; 158} 159static inline u32 mc_enable_pgraph_enabled_f(void) 160{ 161 return 0x1000U; 162} 163static inline u32 mc_enable_pwr_v(u32 r) 164{ 165 return (r >> 13U) & 0x1U; 166} 167static inline u32 mc_enable_pwr_disabled_v(void) 168{ 169 return 0x00000000U; 170} 171static inline u32 mc_enable_pwr_enabled_f(void) 172{ 173 return 0x2000U; 174} 175static inline u32 mc_enable_pfb_enabled_f(void) 176{ 177 return 0x100000U; 178} 179static inline u32 mc_enable_ce2_m(void) 180{ 181 return 0x1U << 21U; 182} 183static inline u32 mc_enable_ce2_enabled_f(void) 184{ 185 return 0x200000U; 186} 187static inline u32 mc_enable_blg_enabled_f(void) 188{ 189 return 0x8000000U; 190} 191static inline u32 mc_enable_perfmon_enabled_f(void) 192{ 193 return 0x10000000U; 194} 195static inline u32 mc_enable_hub_enabled_f(void) 196{ 197 return 0x20000000U; 198} 199static inline u32 mc_intr_ltc_r(void) 200{ 201 return 0x000001c0U; 202} 203static inline u32 mc_enable_pb_r(void) 204{ 205 return 0x00000204U; 206} 207static inline u32 mc_enable_pb_0_s(void) 208{ 209 return 1U; 210} 211static inline u32 mc_enable_pb_0_f(u32 v) 212{ 213 return (v & 0x1U) << 0U; 214} 215static inline u32 mc_enable_pb_0_m(void) 216{ 217 return 0x1U << 0U; 218} 219static inline u32 mc_enable_pb_0_v(u32 r) 220{ 221 return (r >> 0U) & 0x1U; 222} 223static inline u32 mc_enable_pb_0_enabled_v(void) 224{ 225 return 0x00000001U; 226} 227static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) 228{ 229 return (v & 0x1U) << (0U + i*1U); 230} 231#endif
diff --git a/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h b/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h
deleted file mode 100644
index c04d30a..0000000
--- a/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h
+++ /dev/null
@@ -1,651 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pbdma_gv11b_h_ 57#define _hw_pbdma_gv11b_h_ 58 59static inline u32 pbdma_gp_entry1_r(void) 60{ 61 return 0x10000004U; 62} 63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) 64{ 65 return (r >> 0U) & 0xffU; 66} 67static inline u32 pbdma_gp_entry1_length_f(u32 v) 68{ 69 return (v & 0x1fffffU) << 10U; 70} 71static inline u32 pbdma_gp_entry1_length_v(u32 r) 72{ 73 return (r >> 10U) & 0x1fffffU; 74} 75static inline u32 pbdma_gp_base_r(u32 i) 76{ 77 return 0x00040048U + i*8192U; 78} 79static inline u32 pbdma_gp_base__size_1_v(void) 80{ 81 return 0x00000003U; 82} 83static inline u32 pbdma_gp_base_offset_f(u32 v) 84{ 85 return (v & 0x1fffffffU) << 3U; 86} 87static inline u32 pbdma_gp_base_rsvd_s(void) 88{ 89 return 3U; 90} 91static inline u32 pbdma_gp_base_hi_r(u32 i) 92{ 93 return 0x0004004cU + i*8192U; 94} 95static inline u32 pbdma_gp_base_hi_offset_f(u32 v) 96{ 97 return (v & 0xffU) << 0U; 98} 99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) 100{ 101 return (v & 0x1fU) << 16U; 102} 103static inline u32 pbdma_gp_fetch_r(u32 i) 104{ 105 return 0x00040050U + i*8192U; 106} 107static inline u32 pbdma_gp_get_r(u32 i) 108{ 109 return 0x00040014U + i*8192U; 110} 111static inline u32 pbdma_gp_put_r(u32 i) 112{ 113 return 0x00040000U + i*8192U; 114} 115static inline u32 pbdma_pb_fetch_r(u32 i) 116{ 117 return 0x00040054U + i*8192U; 118} 119static inline u32 pbdma_pb_fetch_hi_r(u32 i) 120{ 121 return 0x00040058U + i*8192U; 122} 123static inline u32 pbdma_get_r(u32 i) 124{ 125 return 0x00040018U + i*8192U; 126} 127static inline u32 pbdma_get_hi_r(u32 i) 128{ 129 return 0x0004001cU + i*8192U; 130} 131static inline u32 pbdma_put_r(u32 i) 132{ 133 return 0x0004005cU + i*8192U; 134} 135static inline u32 pbdma_put_hi_r(u32 i) 136{ 137 return 0x00040060U + i*8192U; 138} 139static inline u32 pbdma_pb_header_r(u32 i) 140{ 141 return 0x00040084U + i*8192U; 142} 143static inline u32 pbdma_pb_header_priv_user_f(void) 144{ 145 return 0x0U; 146} 147static inline u32 pbdma_pb_header_method_zero_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 pbdma_pb_header_subchannel_zero_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 pbdma_pb_header_level_main_f(void) 156{ 157 return 0x0U; 158} 159static inline u32 pbdma_pb_header_first_true_f(void) 160{ 161 return 0x400000U; 162} 163static inline u32 pbdma_pb_header_type_inc_f(void) 164{ 165 return 0x20000000U; 166} 167static inline u32 pbdma_pb_header_type_non_inc_f(void) 168{ 169 return 0x60000000U; 170} 171static inline u32 pbdma_hdr_shadow_r(u32 i) 172{ 173 return 0x00040118U + i*8192U; 174} 175static inline u32 pbdma_gp_shadow_0_r(u32 i) 176{ 177 return 0x00040110U + i*8192U; 178} 179static inline u32 pbdma_gp_shadow_1_r(u32 i) 180{ 181 return 0x00040114U + i*8192U; 182} 183static inline u32 pbdma_subdevice_r(u32 i) 184{ 185 return 0x00040094U + i*8192U; 186} 187static inline u32 pbdma_subdevice_id_f(u32 v) 188{ 189 return (v & 0xfffU) << 0U; 190} 191static inline u32 pbdma_subdevice_status_active_f(void) 192{ 193 return 0x10000000U; 194} 195static inline u32 pbdma_subdevice_channel_dma_enable_f(void) 196{ 197 return 0x20000000U; 198} 199static inline u32 pbdma_method0_r(u32 i) 200{ 201 return 0x000400c0U + i*8192U; 202} 203static inline u32 pbdma_method0_fifo_size_v(void) 204{ 205 return 0x00000004U; 206} 207static inline u32 pbdma_method0_addr_f(u32 v) 208{ 209 return (v & 0xfffU) << 2U; 210} 211static inline u32 pbdma_method0_addr_v(u32 r) 212{ 213 return (r >> 2U) & 0xfffU; 214} 215static inline u32 pbdma_method0_subch_v(u32 r) 216{ 217 return (r >> 16U) & 0x7U; 218} 219static inline u32 pbdma_method0_first_true_f(void) 220{ 221 return 0x400000U; 222} 223static inline u32 pbdma_method0_valid_true_f(void) 224{ 225 return 0x80000000U; 226} 227static inline u32 pbdma_method1_r(u32 i) 228{ 229 return 0x000400c8U + i*8192U; 230} 231static inline u32 pbdma_method2_r(u32 i) 232{ 233 return 0x000400d0U + i*8192U; 234} 235static inline u32 pbdma_method3_r(u32 i) 236{ 237 return 0x000400d8U + i*8192U; 238} 239static inline u32 pbdma_data0_r(u32 i) 240{ 241 return 0x000400c4U + i*8192U; 242} 243static inline u32 pbdma_acquire_r(u32 i) 244{ 245 return 0x00040030U + i*8192U; 246} 247static inline u32 pbdma_acquire_retry_man_2_f(void) 248{ 249 return 0x2U; 250} 251static inline u32 pbdma_acquire_retry_exp_2_f(void) 252{ 253 return 0x100U; 254} 255static inline u32 pbdma_acquire_timeout_exp_f(u32 v) 256{ 257 return (v & 0xfU) << 11U; 258} 259static inline u32 pbdma_acquire_timeout_exp_max_v(void) 260{ 261 return 0x0000000fU; 262} 263static inline u32 pbdma_acquire_timeout_exp_max_f(void) 264{ 265 return 0x7800U; 266} 267static inline u32 pbdma_acquire_timeout_man_f(u32 v) 268{ 269 return (v & 0xffffU) << 15U; 270} 271static inline u32 pbdma_acquire_timeout_man_max_v(void) 272{ 273 return 0x0000ffffU; 274} 275static inline u32 pbdma_acquire_timeout_man_max_f(void) 276{ 277 return 0x7fff8000U; 278} 279static inline u32 pbdma_acquire_timeout_en_enable_f(void) 280{ 281 return 0x80000000U; 282} 283static inline u32 pbdma_acquire_timeout_en_disable_f(void) 284{ 285 return 0x0U; 286} 287static inline u32 pbdma_status_r(u32 i) 288{ 289 return 0x00040100U + i*8192U; 290} 291static inline u32 pbdma_channel_r(u32 i) 292{ 293 return 0x00040120U + i*8192U; 294} 295static inline u32 pbdma_signature_r(u32 i) 296{ 297 return 0x00040010U + i*8192U; 298} 299static inline u32 pbdma_signature_hw_valid_f(void) 300{ 301 return 0xfaceU; 302} 303static inline u32 pbdma_signature_sw_zero_f(void) 304{ 305 return 0x0U; 306} 307static inline u32 pbdma_userd_r(u32 i) 308{ 309 return 0x00040008U + i*8192U; 310} 311static inline u32 pbdma_userd_target_vid_mem_f(void) 312{ 313 return 0x0U; 314} 315static inline u32 pbdma_userd_target_sys_mem_coh_f(void) 316{ 317 return 0x2U; 318} 319static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) 320{ 321 return 0x3U; 322} 323static inline u32 pbdma_userd_addr_f(u32 v) 324{ 325 return (v & 0x7fffffU) << 9U; 326} 327static inline u32 pbdma_config_r(u32 i) 328{ 329 return 0x000400f4U + i*8192U; 330} 331static inline u32 pbdma_config_l2_evict_first_f(void) 332{ 333 return 0x0U; 334} 335static inline u32 pbdma_config_l2_evict_normal_f(void) 336{ 337 return 0x1U; 338} 339static inline u32 pbdma_config_ce_split_enable_f(void) 340{ 341 return 0x0U; 342} 343static inline u32 pbdma_config_ce_split_disable_f(void) 344{ 345 return 0x10U; 346} 347static inline u32 pbdma_config_auth_level_non_privileged_f(void) 348{ 349 return 0x0U; 350} 351static inline u32 pbdma_config_auth_level_privileged_f(void) 352{ 353 return 0x100U; 354} 355static inline u32 pbdma_config_userd_writeback_disable_f(void) 356{ 357 return 0x0U; 358} 359static inline u32 pbdma_config_userd_writeback_enable_f(void) 360{ 361 return 0x1000U; 362} 363static inline u32 pbdma_userd_hi_r(u32 i) 364{ 365 return 0x0004000cU + i*8192U; 366} 367static inline u32 pbdma_userd_hi_addr_f(u32 v) 368{ 369 return (v & 0xffU) << 0U; 370} 371static inline u32 pbdma_hce_ctrl_r(u32 i) 372{ 373 return 0x000400e4U + i*8192U; 374} 375static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) 376{ 377 return 0x20U; 378} 379static inline u32 pbdma_intr_0_r(u32 i) 380{ 381 return 0x00040108U + i*8192U; 382} 383static inline u32 pbdma_intr_0_memreq_v(u32 r) 384{ 385 return (r >> 0U) & 0x1U; 386} 387static inline u32 pbdma_intr_0_memreq_pending_f(void) 388{ 389 return 0x1U; 390} 391static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) 392{ 393 return 0x2U; 394} 395static inline u32 pbdma_intr_0_memack_extra_pending_f(void) 396{ 397 return 0x4U; 398} 399static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) 400{ 401 return 0x8U; 402} 403static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) 404{ 405 return 0x10U; 406} 407static inline u32 pbdma_intr_0_memflush_pending_f(void) 408{ 409 return 0x20U; 410} 411static inline u32 pbdma_intr_0_memop_pending_f(void) 412{ 413 return 0x40U; 414} 415static inline u32 pbdma_intr_0_lbconnect_pending_f(void) 416{ 417 return 0x80U; 418} 419static inline u32 pbdma_intr_0_lbreq_pending_f(void) 420{ 421 return 0x100U; 422} 423static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) 424{ 425 return 0x200U; 426} 427static inline u32 pbdma_intr_0_lback_extra_pending_f(void) 428{ 429 return 0x400U; 430} 431static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) 432{ 433 return 0x800U; 434} 435static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) 436{ 437 return 0x1000U; 438} 439static inline u32 pbdma_intr_0_gpfifo_pending_f(void) 440{ 441 return 0x2000U; 442} 443static inline u32 pbdma_intr_0_gpptr_pending_f(void) 444{ 445 return 0x4000U; 446} 447static inline u32 pbdma_intr_0_gpentry_pending_f(void) 448{ 449 return 0x8000U; 450} 451static inline u32 pbdma_intr_0_gpcrc_pending_f(void) 452{ 453 return 0x10000U; 454} 455static inline u32 pbdma_intr_0_pbptr_pending_f(void) 456{ 457 return 0x20000U; 458} 459static inline u32 pbdma_intr_0_pbentry_pending_f(void) 460{ 461 return 0x40000U; 462} 463static inline u32 pbdma_intr_0_pbcrc_pending_f(void) 464{ 465 return 0x80000U; 466} 467static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void) 468{ 469 return 0x100000U; 470} 471static inline u32 pbdma_intr_0_method_pending_f(void) 472{ 473 return 0x200000U; 474} 475static inline u32 pbdma_intr_0_methodcrc_pending_f(void) 476{ 477 return 0x400000U; 478} 479static inline u32 pbdma_intr_0_device_pending_f(void) 480{ 481 return 0x800000U; 482} 483static inline u32 pbdma_intr_0_eng_reset_pending_f(void) 484{ 485 return 0x1000000U; 486} 487static inline u32 pbdma_intr_0_semaphore_pending_f(void) 488{ 489 return 0x2000000U; 490} 491static inline u32 pbdma_intr_0_acquire_pending_f(void) 492{ 493 return 0x4000000U; 494} 495static inline u32 pbdma_intr_0_pri_pending_f(void) 496{ 497 return 0x8000000U; 498} 499static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) 500{ 501 return 0x20000000U; 502} 503static inline u32 pbdma_intr_0_pbseg_pending_f(void) 504{ 505 return 0x40000000U; 506} 507static inline u32 pbdma_intr_0_signature_pending_f(void) 508{ 509 return 0x80000000U; 510} 511static inline u32 pbdma_intr_1_r(u32 i) 512{ 513 return 0x00040148U + i*8192U; 514} 515static inline u32 pbdma_intr_1_ctxnotvalid_m(void) 516{ 517 return 0x1U << 31U; 518} 519static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void) 520{ 521 return 0x80000000U; 522} 523static inline u32 pbdma_intr_en_0_r(u32 i) 524{ 525 return 0x0004010cU + i*8192U; 526} 527static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) 528{ 529 return 0x100U; 530} 531static inline u32 pbdma_intr_en_1_r(u32 i) 532{ 533 return 0x0004014cU + i*8192U; 534} 535static inline u32 pbdma_intr_stall_r(u32 i) 536{ 537 return 0x0004013cU + i*8192U; 538} 539static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) 540{ 541 return 0x100U; 542} 543static inline u32 pbdma_intr_stall_1_r(u32 i) 544{ 545 return 0x00040140U + i*8192U; 546} 547static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void) 548{ 549 return 0x1U; 550} 551static inline u32 pbdma_udma_nop_r(void) 552{ 553 return 0x00000008U; 554} 555static inline u32 pbdma_runlist_timeslice_r(u32 i) 556{ 557 return 0x000400f8U + i*8192U; 558} 559static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) 560{ 561 return 0x80U; 562} 563static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) 564{ 565 return 0x3000U; 566} 567static inline u32 pbdma_runlist_timeslice_enable_true_f(void) 568{ 569 return 0x10000000U; 570} 571static inline u32 pbdma_target_r(u32 i) 572{ 573 return 0x000400acU + i*8192U; 574} 575static inline u32 pbdma_target_engine_sw_f(void) 576{ 577 return 0x1fU; 578} 579static inline u32 pbdma_target_eng_ctx_valid_true_f(void) 580{ 581 return 0x10000U; 582} 583static inline u32 pbdma_target_eng_ctx_valid_false_f(void) 584{ 585 return 0x0U; 586} 587static inline u32 pbdma_target_ce_ctx_valid_true_f(void) 588{ 589 return 0x20000U; 590} 591static inline u32 pbdma_target_ce_ctx_valid_false_f(void) 592{ 593 return 0x0U; 594} 595static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void) 596{ 597 return 0x0U; 598} 599static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void) 600{ 601 return 0x1000000U; 602} 603static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void) 604{ 605 return 0x2000000U; 606} 607static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void) 608{ 609 return 0x3000000U; 610} 611static inline u32 pbdma_target_should_send_tsg_event_true_f(void) 612{ 613 return 0x20000000U; 614} 615static inline u32 pbdma_target_should_send_tsg_event_false_f(void) 616{ 617 return 0x0U; 618} 619static inline u32 pbdma_target_needs_host_tsg_event_true_f(void) 620{ 621 return 0x80000000U; 622} 623static inline u32 pbdma_target_needs_host_tsg_event_false_f(void) 624{ 625 return 0x0U; 626} 627static inline u32 pbdma_set_channel_info_r(u32 i) 628{ 629 return 0x000400fcU + i*8192U; 630} 631static inline u32 pbdma_set_channel_info_veid_f(u32 v) 632{ 633 return (v & 0x3fU) << 8U; 634} 635static inline u32 pbdma_timeout_r(u32 i) 636{ 637 return 0x0004012cU + i*8192U; 638} 639static inline u32 pbdma_timeout_period_m(void) 640{ 641 return 0xffffffffU << 0U; 642} 643static inline u32 pbdma_timeout_period_max_f(void) 644{ 645 return 0xffffffffU; 646} 647static inline u32 pbdma_timeout_period_init_f(void) 648{ 649 return 0x10000U; 650} 651#endif
diff --git a/include/nvgpu/hw/gv11b/hw_perf_gv11b.h b/include/nvgpu/hw/gv11b/hw_perf_gv11b.h
deleted file mode 100644
index a3341df..0000000
--- a/include/nvgpu/hw/gv11b/hw_perf_gv11b.h
+++ /dev/null
@@ -1,263 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_perf_gv11b_h_ 57#define _hw_perf_gv11b_h_ 58 59static inline u32 perf_pmmgpc_perdomain_offset_v(void) 60{ 61 return 0x00000200U; 62} 63static inline u32 perf_pmmsys_perdomain_offset_v(void) 64{ 65 return 0x00000200U; 66} 67static inline u32 perf_pmmgpc_base_v(void) 68{ 69 return 0x00180000U; 70} 71static inline u32 perf_pmmgpc_extent_v(void) 72{ 73 return 0x00183fffU; 74} 75static inline u32 perf_pmmsys_base_v(void) 76{ 77 return 0x00240000U; 78} 79static inline u32 perf_pmmsys_extent_v(void) 80{ 81 return 0x00243fffU; 82} 83static inline u32 perf_pmmfbp_base_v(void) 84{ 85 return 0x00200000U; 86} 87static inline u32 perf_pmasys_control_r(void) 88{ 89 return 0x0024a000U; 90} 91static inline u32 perf_pmasys_control_membuf_status_v(u32 r) 92{ 93 return (r >> 4U) & 0x1U; 94} 95static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) 100{ 101 return 0x10U; 102} 103static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) 104{ 105 return (v & 0x1U) << 5U; 106} 107static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) 108{ 109 return (r >> 5U) & 0x1U; 110} 111static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) 112{ 113 return 0x00000001U; 114} 115static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) 116{ 117 return 0x20U; 118} 119static inline u32 perf_pmasys_mem_block_r(void) 120{ 121 return 0x0024a070U; 122} 123static inline u32 perf_pmasys_mem_block_base_f(u32 v) 124{ 125 return (v & 0xfffffffU) << 0U; 126} 127static inline u32 perf_pmasys_mem_block_target_f(u32 v) 128{ 129 return (v & 0x3U) << 28U; 130} 131static inline u32 perf_pmasys_mem_block_target_v(u32 r) 132{ 133 return (r >> 28U) & 0x3U; 134} 135static inline u32 perf_pmasys_mem_block_target_lfb_v(void) 136{ 137 return 0x00000000U; 138} 139static inline u32 perf_pmasys_mem_block_target_lfb_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) 144{ 145 return 0x00000002U; 146} 147static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) 148{ 149 return 0x20000000U; 150} 151static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) 152{ 153 return 0x00000003U; 154} 155static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) 156{ 157 return 0x30000000U; 158} 159static inline u32 perf_pmasys_mem_block_valid_f(u32 v) 160{ 161 return (v & 0x1U) << 31U; 162} 163static inline u32 perf_pmasys_mem_block_valid_v(u32 r) 164{ 165 return (r >> 31U) & 0x1U; 166} 167static inline u32 perf_pmasys_mem_block_valid_true_v(void) 168{ 169 return 0x00000001U; 170} 171static inline u32 perf_pmasys_mem_block_valid_true_f(void) 172{ 173 return 0x80000000U; 174} 175static inline u32 perf_pmasys_mem_block_valid_false_v(void) 176{ 177 return 0x00000000U; 178} 179static inline u32 perf_pmasys_mem_block_valid_false_f(void) 180{ 181 return 0x0U; 182} 183static inline u32 perf_pmasys_outbase_r(void) 184{ 185 return 0x0024a074U; 186} 187static inline u32 perf_pmasys_outbase_ptr_f(u32 v) 188{ 189 return (v & 0x7ffffffU) << 5U; 190} 191static inline u32 perf_pmasys_outbaseupper_r(void) 192{ 193 return 0x0024a078U; 194} 195static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) 196{ 197 return (v & 0xffU) << 0U; 198} 199static inline u32 perf_pmasys_outsize_r(void) 200{ 201 return 0x0024a07cU; 202} 203static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) 204{ 205 return (v & 0x7ffffffU) << 5U; 206} 207static inline u32 perf_pmasys_mem_bytes_r(void) 208{ 209 return 0x0024a084U; 210} 211static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) 212{ 213 return (v & 0xfffffffU) << 4U; 214} 215static inline u32 perf_pmasys_mem_bump_r(void) 216{ 217 return 0x0024a088U; 218} 219static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) 220{ 221 return (v & 0xfffffffU) << 4U; 222} 223static inline u32 perf_pmasys_enginestatus_r(void) 224{ 225 return 0x0024a0a4U; 226} 227static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) 228{ 229 return (v & 0x1U) << 4U; 230} 231static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) 232{ 233 return 0x00000001U; 234} 235static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) 236{ 237 return 0x10U; 238} 239static inline u32 perf_pmmsys_engine_sel_r(u32 i) 240{ 241 return 0x0024006cU + i*512U; 242} 243static inline u32 perf_pmmsys_engine_sel__size_1_v(void) 244{ 245 return 0x00000020U; 246} 247static inline u32 perf_pmmfbp_engine_sel_r(u32 i) 248{ 249 return 0x0020006cU + i*512U; 250} 251static inline u32 perf_pmmfbp_engine_sel__size_1_v(void) 252{ 253 return 0x00000020U; 254} 255static inline u32 perf_pmmgpc_engine_sel_r(u32 i) 256{ 257 return 0x0018006cU + i*512U; 258} 259static inline u32 perf_pmmgpc_engine_sel__size_1_v(void) 260{ 261 return 0x00000020U; 262} 263#endif
diff --git a/include/nvgpu/hw/gv11b/hw_pram_gv11b.h b/include/nvgpu/hw/gv11b/hw_pram_gv11b.h
deleted file mode 100644
index 456d631..0000000
--- a/include/nvgpu/hw/gv11b/hw_pram_gv11b.h
+++ /dev/null
@@ -1,63 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pram_gv11b_h_ 57#define _hw_pram_gv11b_h_ 58 59static inline u32 pram_data032_r(u32 i) 60{ 61 return 0x00700000U + i*4U; 62} 63#endif
diff --git a/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h b/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h
deleted file mode 100644
index a653681..0000000
--- a/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h
+++ /dev/null
@@ -1,167 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringmaster_gv11b_h_ 57#define _hw_pri_ringmaster_gv11b_h_ 58 59static inline u32 pri_ringmaster_command_r(void) 60{ 61 return 0x0012004cU; 62} 63static inline u32 pri_ringmaster_command_cmd_m(void) 64{ 65 return 0x3fU << 0U; 66} 67static inline u32 pri_ringmaster_command_cmd_v(u32 r) 68{ 69 return (r >> 0U) & 0x3fU; 70} 71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) 72{ 73 return 0x00000000U; 74} 75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) 76{ 77 return 0x1U; 78} 79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) 88{ 89 return 0x0U; 90} 91static inline u32 pri_ringmaster_command_data_r(void) 92{ 93 return 0x00120048U; 94} 95static inline u32 pri_ringmaster_start_results_r(void) 96{ 97 return 0x00120050U; 98} 99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) 100{ 101 return (r >> 0U) & 0x1U; 102} 103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) 104{ 105 return 0x00000001U; 106} 107static inline u32 pri_ringmaster_intr_status0_r(void) 108{ 109 return 0x00120058U; 110} 111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) 112{ 113 return (r >> 0U) & 0x1U; 114} 115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) 116{ 117 return (r >> 1U) & 0x1U; 118} 119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) 120{ 121 return (r >> 2U) & 0x1U; 122} 123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) 124{ 125 return (r >> 8U) & 0x1U; 126} 127static inline u32 pri_ringmaster_intr_status1_r(void) 128{ 129 return 0x0012005cU; 130} 131static inline u32 pri_ringmaster_global_ctl_r(void) 132{ 133 return 0x00120060U; 134} 135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) 136{ 137 return 0x1U; 138} 139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) 140{ 141 return 0x0U; 142} 143static inline u32 pri_ringmaster_enum_fbp_r(void) 144{ 145 return 0x00120074U; 146} 147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) 148{ 149 return (r >> 0U) & 0x1fU; 150} 151static inline u32 pri_ringmaster_enum_gpc_r(void) 152{ 153 return 0x00120078U; 154} 155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) 156{ 157 return (r >> 0U) & 0x1fU; 158} 159static inline u32 pri_ringmaster_enum_ltc_r(void) 160{ 161 return 0x0012006cU; 162} 163static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) 164{ 165 return (r >> 0U) & 0x1fU; 166} 167#endif
diff --git a/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h b/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h
deleted file mode 100644
index 47da22c..0000000
--- a/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h
+++ /dev/null
@@ -1,79 +0,0 @@ 1/* 2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_gpc_gv11b_h_ 57#define _hw_pri_ringstation_gpc_gv11b_h_ 58 59static inline u32 pri_ringstation_gpc_master_config_r(u32 i) 60{ 61 return 0x00128300U + i*4U; 62} 63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) 64{ 65 return 0x00128120U; 66} 67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) 68{ 69 return 0x00128124U; 70} 71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) 72{ 73 return 0x00128128U; 74} 75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) 76{ 77 return 0x0012812cU; 78} 79#endif
diff --git a/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h b/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h
deleted file mode 100644
index 622b6d7..0000000
--- a/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h
+++ /dev/null
@@ -1,91 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pri_ringstation_sys_gv11b_h_ 57#define _hw_pri_ringstation_sys_gv11b_h_ 58 59static inline u32 pri_ringstation_sys_master_config_r(u32 i) 60{ 61 return 0x00122300U + i*4U; 62} 63static inline u32 pri_ringstation_sys_decode_config_r(void) 64{ 65 return 0x00122204U; 66} 67static inline u32 pri_ringstation_sys_decode_config_ring_m(void) 68{ 69 return 0x7U << 0U; 70} 71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) 72{ 73 return 0x1U; 74} 75static inline u32 pri_ringstation_sys_priv_error_adr_r(void) 76{ 77 return 0x00122120U; 78} 79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) 80{ 81 return 0x00122124U; 82} 83static inline u32 pri_ringstation_sys_priv_error_info_r(void) 84{ 85 return 0x00122128U; 86} 87static inline u32 pri_ringstation_sys_priv_error_code_r(void) 88{ 89 return 0x0012212cU; 90} 91#endif
diff --git a/include/nvgpu/hw/gv11b/hw_proj_gv11b.h b/include/nvgpu/hw/gv11b/hw_proj_gv11b.h
deleted file mode 100644
index 7283237..0000000
--- a/include/nvgpu/hw/gv11b/hw_proj_gv11b.h
+++ /dev/null
@@ -1,191 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_proj_gv11b_h_ 57#define _hw_proj_gv11b_h_ 58 59static inline u32 proj_gpc_base_v(void) 60{ 61 return 0x00500000U; 62} 63static inline u32 proj_gpc_shared_base_v(void) 64{ 65 return 0x00418000U; 66} 67static inline u32 proj_gpc_stride_v(void) 68{ 69 return 0x00008000U; 70} 71static inline u32 proj_gpc_priv_stride_v(void) 72{ 73 return 0x00000800U; 74} 75static inline u32 proj_ltc_stride_v(void) 76{ 77 return 0x00002000U; 78} 79static inline u32 proj_lts_stride_v(void) 80{ 81 return 0x00000200U; 82} 83static inline u32 proj_fbpa_stride_v(void) 84{ 85 return 0x00004000U; 86} 87static inline u32 proj_ppc_in_gpc_base_v(void) 88{ 89 return 0x00003000U; 90} 91static inline u32 proj_ppc_in_gpc_shared_base_v(void) 92{ 93 return 0x00003e00U; 94} 95static inline u32 proj_ppc_in_gpc_stride_v(void) 96{ 97 return 0x00000200U; 98} 99static inline u32 proj_rop_base_v(void) 100{ 101 return 0x00410000U; 102} 103static inline u32 proj_rop_shared_base_v(void) 104{ 105 return 0x00408800U; 106} 107static inline u32 proj_rop_stride_v(void) 108{ 109 return 0x00000400U; 110} 111static inline u32 proj_tpc_in_gpc_base_v(void) 112{ 113 return 0x00004000U; 114} 115static inline u32 proj_tpc_in_gpc_stride_v(void) 116{ 117 return 0x00000800U; 118} 119static inline u32 proj_tpc_in_gpc_shared_base_v(void) 120{ 121 return 0x00001800U; 122} 123static inline u32 proj_smpc_base_v(void) 124{ 125 return 0x00000200U; 126} 127static inline u32 proj_smpc_shared_base_v(void) 128{ 129 return 0x00000300U; 130} 131static inline u32 proj_smpc_unique_base_v(void) 132{ 133 return 0x00000600U; 134} 135static inline u32 proj_smpc_stride_v(void) 136{ 137 return 0x00000100U; 138} 139static inline u32 proj_host_num_engines_v(void) 140{ 141 return 0x00000004U; 142} 143static inline u32 proj_host_num_pbdma_v(void) 144{ 145 return 0x00000003U; 146} 147static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) 148{ 149 return 0x00000004U; 150} 151static inline u32 proj_scal_litter_num_fbps_v(void) 152{ 153 return 0x00000001U; 154} 155static inline u32 proj_scal_litter_num_fbpas_v(void) 156{ 157 return 0x00000001U; 158} 159static inline u32 proj_scal_litter_num_gpcs_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) 164{ 165 return 0x00000002U; 166} 167static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) 168{ 169 return 0x00000002U; 170} 171static inline u32 proj_scal_litter_num_zcull_banks_v(void) 172{ 173 return 0x00000004U; 174} 175static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) 176{ 177 return 0x00000002U; 178} 179static inline u32 proj_scal_max_gpcs_v(void) 180{ 181 return 0x00000020U; 182} 183static inline u32 proj_scal_max_tpc_per_gpc_v(void) 184{ 185 return 0x00000008U; 186} 187static inline u32 proj_sm_stride_v(void) 188{ 189 return 0x00000080U; 190} 191#endif
diff --git a/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h b/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
deleted file mode 100644
index 1cda12d..0000000
--- a/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
+++ /dev/null
@@ -1,1219 +0,0 @@ 1/* 2 * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_pwr_gv11b_h_ 57#define _hw_pwr_gv11b_h_ 58 59static inline u32 pwr_falcon_irqsset_r(void) 60{ 61 return 0x0010a000U; 62} 63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) 64{ 65 return 0x40U; 66} 67static inline u32 pwr_falcon_irqsclr_r(void) 68{ 69 return 0x0010a004U; 70} 71static inline u32 pwr_falcon_irqstat_r(void) 72{ 73 return 0x0010a008U; 74} 75static inline u32 pwr_falcon_irqstat_halt_true_f(void) 76{ 77 return 0x10U; 78} 79static inline u32 pwr_falcon_irqstat_exterr_true_f(void) 80{ 81 return 0x20U; 82} 83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) 84{ 85 return 0x40U; 86} 87static inline u32 pwr_falcon_irqstat_ext_second_true_f(void) 88{ 89 return 0x800U; 90} 91static inline u32 pwr_falcon_irqstat_ext_ecc_parity_true_f(void) 92{ 93 return 0x400U; 94} 95static inline u32 pwr_pmu_ecc_intr_status_r(void) 96{ 97 return 0x0010abfcU; 98} 99static inline u32 pwr_pmu_ecc_intr_status_corrected_f(u32 v) 100{ 101 return (v & 0x1U) << 0U; 102} 103static inline u32 pwr_pmu_ecc_intr_status_corrected_m(void) 104{ 105 return 0x1U << 0U; 106} 107static inline u32 pwr_pmu_ecc_intr_status_uncorrected_f(u32 v) 108{ 109 return (v & 0x1U) << 1U; 110} 111static inline u32 pwr_pmu_ecc_intr_status_uncorrected_m(void) 112{ 113 return 0x1U << 1U; 114} 115static inline u32 pwr_falcon_irqmode_r(void) 116{ 117 return 0x0010a00cU; 118} 119static inline u32 pwr_falcon_irqmset_r(void) 120{ 121 return 0x0010a010U; 122} 123static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) 124{ 125 return (v & 0x1U) << 0U; 126} 127static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) 128{ 129 return (v & 0x1U) << 1U; 130} 131static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) 132{ 133 return (v & 0x1U) << 2U; 134} 135static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) 136{ 137 return (v & 0x1U) << 3U; 138} 139static inline u32 pwr_falcon_irqmset_halt_f(u32 v) 140{ 141 return (v & 0x1U) << 4U; 142} 143static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) 144{ 145 return (v & 0x1U) << 5U; 146} 147static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) 148{ 149 return (v & 0x1U) << 6U; 150} 151static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) 152{ 153 return (v & 0x1U) << 7U; 154} 155static inline u32 pwr_falcon_irqmset_ext_f(u32 v) 156{ 157 return (v & 0xffU) << 8U; 158} 159static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v) 160{ 161 return (v & 0x1U) << 8U; 162} 163static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v) 164{ 165 return (v & 0x1U) << 9U; 166} 167static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v) 168{ 169 return (v & 0x1U) << 11U; 170} 171static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v) 172{ 173 return (v & 0x1U) << 12U; 174} 175static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v) 176{ 177 return (v & 0x1U) << 13U; 178} 179static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v) 180{ 181 return (v & 0x1U) << 14U; 182} 183static inline u32 pwr_falcon_irqmset_ext_rsvd8_f(u32 v) 184{ 185 return (v & 0x1U) << 15U; 186} 187static inline u32 pwr_falcon_irqmset_ext_ecc_parity_f(u32 v) 188{ 189 return (v & 0x1U) << 10U; 190} 191static inline u32 pwr_falcon_irqmclr_r(void) 192{ 193 return 0x0010a014U; 194} 195static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) 196{ 197 return (v & 0x1U) << 0U; 198} 199static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) 200{ 201 return (v & 0x1U) << 1U; 202} 203static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) 204{ 205 return (v & 0x1U) << 2U; 206} 207static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) 208{ 209 return (v & 0x1U) << 3U; 210} 211static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) 212{ 213 return (v & 0x1U) << 4U; 214} 215static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) 216{ 217 return (v & 0x1U) << 5U; 218} 219static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) 220{ 221 return (v & 0x1U) << 6U; 222} 223static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) 224{ 225 return (v & 0x1U) << 7U; 226} 227static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) 228{ 229 return (v & 0xffU) << 8U; 230} 231static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v) 232{ 233 return (v & 0x1U) << 8U; 234} 235static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v) 236{ 237 return (v & 0x1U) << 9U; 238} 239static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v) 240{ 241 return (v & 0x1U) << 11U; 242} 243static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v) 244{ 245 return (v & 0x1U) << 12U; 246} 247static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v) 248{ 249 return (v & 0x1U) << 13U; 250} 251static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v) 252{ 253 return (v & 0x1U) << 14U; 254} 255static inline u32 pwr_falcon_irqmclr_ext_rsvd8_f(u32 v) 256{ 257 return (v & 0x1U) << 15U; 258} 259static inline u32 pwr_falcon_irqmclr_ext_ecc_parity_f(u32 v) 260{ 261 return (v & 0x1U) << 10U; 262} 263static inline u32 pwr_falcon_irqmask_r(void) 264{ 265 return 0x0010a018U; 266} 267static inline u32 pwr_falcon_irqdest_r(void) 268{ 269 return 0x0010a01cU; 270} 271static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) 272{ 273 return (v & 0x1U) << 0U; 274} 275static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) 276{ 277 return (v & 0x1U) << 1U; 278} 279static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) 280{ 281 return (v & 0x1U) << 2U; 282} 283static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) 284{ 285 return (v & 0x1U) << 3U; 286} 287static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) 288{ 289 return (v & 0x1U) << 4U; 290} 291static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) 292{ 293 return (v & 0x1U) << 5U; 294} 295static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) 296{ 297 return (v & 0x1U) << 6U; 298} 299static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) 300{ 301 return (v & 0x1U) << 7U; 302} 303static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) 304{ 305 return (v & 0xffU) << 8U; 306} 307static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v) 308{ 309 return (v & 0x1U) << 8U; 310} 311static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v) 312{ 313 return (v & 0x1U) << 9U; 314} 315static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v) 316{ 317 return (v & 0x1U) << 11U; 318} 319static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v) 320{ 321 return (v & 0x1U) << 12U; 322} 323static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v) 324{ 325 return (v & 0x1U) << 13U; 326} 327static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v) 328{ 329 return (v & 0x1U) << 14U; 330} 331static inline u32 pwr_falcon_irqdest_host_ext_rsvd8_f(u32 v) 332{ 333 return (v & 0x1U) << 15U; 334} 335static inline u32 pwr_falcon_irqdest_host_ext_ecc_parity_f(u32 v) 336{ 337 return (v & 0x1U) << 10U; 338} 339static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 340{ 341 return (v & 0x1U) << 16U; 342} 343static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) 344{ 345 return (v & 0x1U) << 17U; 346} 347static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) 348{ 349 return (v & 0x1U) << 18U; 350} 351static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) 352{ 353 return (v & 0x1U) << 19U; 354} 355static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) 356{ 357 return (v & 0x1U) << 20U; 358} 359static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) 360{ 361 return (v & 0x1U) << 21U; 362} 363static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) 364{ 365 return (v & 0x1U) << 22U; 366} 367static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) 368{ 369 return (v & 0x1U) << 23U; 370} 371static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) 372{ 373 return (v & 0xffU) << 24U; 374} 375static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v) 376{ 377 return (v & 0x1U) << 24U; 378} 379static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v) 380{ 381 return (v & 0x1U) << 25U; 382} 383static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v) 384{ 385 return (v & 0x1U) << 27U; 386} 387static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v) 388{ 389 return (v & 0x1U) << 28U; 390} 391static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v) 392{ 393 return (v & 0x1U) << 29U; 394} 395static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v) 396{ 397 return (v & 0x1U) << 30U; 398} 399static inline u32 pwr_falcon_irqdest_target_ext_rsvd8_f(u32 v) 400{ 401 return (v & 0x1U) << 31U; 402} 403static inline u32 pwr_falcon_irqdest_target_ext_ecc_parity_f(u32 v) 404{ 405 return (v & 0x1U) << 26U; 406} 407static inline u32 pwr_falcon_curctx_r(void) 408{ 409 return 0x0010a050U; 410} 411static inline u32 pwr_falcon_nxtctx_r(void) 412{ 413 return 0x0010a054U; 414} 415static inline u32 pwr_falcon_mailbox0_r(void) 416{ 417 return 0x0010a040U; 418} 419static inline u32 pwr_falcon_mailbox1_r(void) 420{ 421 return 0x0010a044U; 422} 423static inline u32 pwr_falcon_itfen_r(void) 424{ 425 return 0x0010a048U; 426} 427static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) 428{ 429 return 0x1U; 430} 431static inline u32 pwr_falcon_idlestate_r(void) 432{ 433 return 0x0010a04cU; 434} 435static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) 436{ 437 return (r >> 0U) & 0x1U; 438} 439static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) 440{ 441 return (r >> 1U) & 0x7fffU; 442} 443static inline u32 pwr_falcon_os_r(void) 444{ 445 return 0x0010a080U; 446} 447static inline u32 pwr_falcon_engctl_r(void) 448{ 449 return 0x0010a0a4U; 450} 451static inline u32 pwr_falcon_cpuctl_r(void) 452{ 453 return 0x0010a100U; 454} 455static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) 456{ 457 return (v & 0x1U) << 1U; 458} 459static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) 460{ 461 return (v & 0x1U) << 4U; 462} 463static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) 464{ 465 return 0x1U << 4U; 466} 467static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) 468{ 469 return (r >> 4U) & 0x1U; 470} 471static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) 472{ 473 return (v & 0x1U) << 6U; 474} 475static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) 476{ 477 return 0x1U << 6U; 478} 479static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) 480{ 481 return (r >> 6U) & 0x1U; 482} 483static inline u32 pwr_falcon_cpuctl_alias_r(void) 484{ 485 return 0x0010a130U; 486} 487static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) 488{ 489 return (v & 0x1U) << 1U; 490} 491static inline u32 pwr_pmu_scpctl_stat_r(void) 492{ 493 return 0x0010ac08U; 494} 495static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) 496{ 497 return (v & 0x1U) << 20U; 498} 499static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) 500{ 501 return 0x1U << 20U; 502} 503static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) 504{ 505 return (r >> 20U) & 0x1U; 506} 507static inline u32 pwr_falcon_imemc_r(u32 i) 508{ 509 return 0x0010a180U + i*16U; 510} 511static inline u32 pwr_falcon_imemc_offs_f(u32 v) 512{ 513 return (v & 0x3fU) << 2U; 514} 515static inline u32 pwr_falcon_imemc_blk_f(u32 v) 516{ 517 return (v & 0xffU) << 8U; 518} 519static inline u32 pwr_falcon_imemc_aincw_f(u32 v) 520{ 521 return (v & 0x1U) << 24U; 522} 523static inline u32 pwr_falcon_imemd_r(u32 i) 524{ 525 return 0x0010a184U + i*16U; 526} 527static inline u32 pwr_falcon_imemt_r(u32 i) 528{ 529 return 0x0010a188U + i*16U; 530} 531static inline u32 pwr_falcon_sctl_r(void) 532{ 533 return 0x0010a240U; 534} 535static inline u32 pwr_falcon_mmu_phys_sec_r(void) 536{ 537 return 0x00100ce4U; 538} 539static inline u32 pwr_falcon_bootvec_r(void) 540{ 541 return 0x0010a104U; 542} 543static inline u32 pwr_falcon_bootvec_vec_f(u32 v) 544{ 545 return (v & 0xffffffffU) << 0U; 546} 547static inline u32 pwr_falcon_dmactl_r(void) 548{ 549 return 0x0010a10cU; 550} 551static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) 552{ 553 return 0x1U << 1U; 554} 555static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) 556{ 557 return 0x1U << 2U; 558} 559static inline u32 pwr_falcon_hwcfg_r(void) 560{ 561 return 0x0010a108U; 562} 563static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) 564{ 565 return (r >> 0U) & 0x1ffU; 566} 567static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) 568{ 569 return (r >> 9U) & 0x1ffU; 570} 571static inline u32 pwr_falcon_dmatrfbase_r(void) 572{ 573 return 0x0010a110U; 574} 575static inline u32 pwr_falcon_dmatrfbase1_r(void) 576{ 577 return 0x0010a128U; 578} 579static inline u32 pwr_falcon_dmatrfmoffs_r(void) 580{ 581 return 0x0010a114U; 582} 583static inline u32 pwr_falcon_dmatrfcmd_r(void) 584{ 585 return 0x0010a118U; 586} 587static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) 588{ 589 return (v & 0x1U) << 4U; 590} 591static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) 592{ 593 return (v & 0x1U) << 5U; 594} 595static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) 596{ 597 return (v & 0x7U) << 8U; 598} 599static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) 600{ 601 return (v & 0x7U) << 12U; 602} 603static inline u32 pwr_falcon_dmatrffboffs_r(void) 604{ 605 return 0x0010a11cU; 606} 607static inline u32 pwr_falcon_exterraddr_r(void) 608{ 609 return 0x0010a168U; 610} 611static inline u32 pwr_falcon_exterrstat_r(void) 612{ 613 return 0x0010a16cU; 614} 615static inline u32 pwr_falcon_exterrstat_valid_m(void) 616{ 617 return 0x1U << 31U; 618} 619static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) 620{ 621 return (r >> 31U) & 0x1U; 622} 623static inline u32 pwr_falcon_exterrstat_valid_true_v(void) 624{ 625 return 0x00000001U; 626} 627static inline u32 pwr_pmu_falcon_icd_cmd_r(void) 628{ 629 return 0x0010a200U; 630} 631static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) 632{ 633 return 4U; 634} 635static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) 636{ 637 return (v & 0xfU) << 0U; 638} 639static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) 640{ 641 return 0xfU << 0U; 642} 643static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) 644{ 645 return (r >> 0U) & 0xfU; 646} 647static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) 648{ 649 return 0x8U; 650} 651static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) 652{ 653 return 0xeU; 654} 655static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) 656{ 657 return (v & 0x1fU) << 8U; 658} 659static inline u32 pwr_pmu_falcon_icd_rdata_r(void) 660{ 661 return 0x0010a20cU; 662} 663static inline u32 pwr_falcon_dmemc_r(u32 i) 664{ 665 return 0x0010a1c0U + i*8U; 666} 667static inline u32 pwr_falcon_dmemc_offs_f(u32 v) 668{ 669 return (v & 0x3fU) << 2U; 670} 671static inline u32 pwr_falcon_dmemc_offs_m(void) 672{ 673 return 0x3fU << 2U; 674} 675static inline u32 pwr_falcon_dmemc_blk_f(u32 v) 676{ 677 return (v & 0xffU) << 8U; 678} 679static inline u32 pwr_falcon_dmemc_blk_m(void) 680{ 681 return 0xffU << 8U; 682} 683static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) 684{ 685 return (v & 0x1U) << 24U; 686} 687static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) 688{ 689 return (v & 0x1U) << 25U; 690} 691static inline u32 pwr_falcon_dmemd_r(u32 i) 692{ 693 return 0x0010a1c4U + i*8U; 694} 695static inline u32 pwr_pmu_new_instblk_r(void) 696{ 697 return 0x0010a480U; 698} 699static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) 700{ 701 return (v & 0xfffffffU) << 0U; 702} 703static inline u32 pwr_pmu_new_instblk_target_fb_f(void) 704{ 705 return 0x0U; 706} 707static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) 708{ 709 return 0x20000000U; 710} 711static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) 712{ 713 return 0x30000000U; 714} 715static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) 716{ 717 return (v & 0x1U) << 30U; 718} 719static inline u32 pwr_pmu_mutex_id_r(void) 720{ 721 return 0x0010a488U; 722} 723static inline u32 pwr_pmu_mutex_id_value_v(u32 r) 724{ 725 return (r >> 0U) & 0xffU; 726} 727static inline u32 pwr_pmu_mutex_id_value_init_v(void) 728{ 729 return 0x00000000U; 730} 731static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) 732{ 733 return 0x000000ffU; 734} 735static inline u32 pwr_pmu_mutex_id_release_r(void) 736{ 737 return 0x0010a48cU; 738} 739static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) 740{ 741 return (v & 0xffU) << 0U; 742} 743static inline u32 pwr_pmu_mutex_id_release_value_m(void) 744{ 745 return 0xffU << 0U; 746} 747static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) 748{ 749 return 0x00000000U; 750} 751static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) 752{ 753 return 0x0U; 754} 755static inline u32 pwr_pmu_mutex_r(u32 i) 756{ 757 return 0x0010a580U + i*4U; 758} 759static inline u32 pwr_pmu_mutex__size_1_v(void) 760{ 761 return 0x00000010U; 762} 763static inline u32 pwr_pmu_mutex_value_f(u32 v) 764{ 765 return (v & 0xffU) << 0U; 766} 767static inline u32 pwr_pmu_mutex_value_v(u32 r) 768{ 769 return (r >> 0U) & 0xffU; 770} 771static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) 772{ 773 return 0x0U; 774} 775static inline u32 pwr_pmu_queue_head_r(u32 i) 776{ 777 return 0x0010a800U + i*4U; 778} 779static inline u32 pwr_pmu_queue_head__size_1_v(void) 780{ 781 return 0x00000008U; 782} 783static inline u32 pwr_pmu_queue_head_address_f(u32 v) 784{ 785 return (v & 0xffffffffU) << 0U; 786} 787static inline u32 pwr_pmu_queue_head_address_v(u32 r) 788{ 789 return (r >> 0U) & 0xffffffffU; 790} 791static inline u32 pwr_pmu_queue_tail_r(u32 i) 792{ 793 return 0x0010a820U + i*4U; 794} 795static inline u32 pwr_pmu_queue_tail__size_1_v(void) 796{ 797 return 0x00000008U; 798} 799static inline u32 pwr_pmu_queue_tail_address_f(u32 v) 800{ 801 return (v & 0xffffffffU) << 0U; 802} 803static inline u32 pwr_pmu_queue_tail_address_v(u32 r) 804{ 805 return (r >> 0U) & 0xffffffffU; 806} 807static inline u32 pwr_pmu_msgq_head_r(void) 808{ 809 return 0x0010a4c8U; 810} 811static inline u32 pwr_pmu_msgq_head_val_f(u32 v) 812{ 813 return (v & 0xffffffffU) << 0U; 814} 815static inline u32 pwr_pmu_msgq_head_val_v(u32 r) 816{ 817 return (r >> 0U) & 0xffffffffU; 818} 819static inline u32 pwr_pmu_msgq_tail_r(void) 820{ 821 return 0x0010a4ccU; 822} 823static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) 824{ 825 return (v & 0xffffffffU) << 0U; 826} 827static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) 828{ 829 return (r >> 0U) & 0xffffffffU; 830} 831static inline u32 pwr_pmu_idle_mask_r(u32 i) 832{ 833 return 0x0010a504U + i*16U; 834} 835static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) 836{ 837 return 0x1U; 838} 839static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) 840{ 841 return 0x200000U; 842} 843static inline u32 pwr_pmu_idle_mask_1_r(u32 i) 844{ 845 return 0x0010aa34U + i*8U; 846} 847static inline u32 pwr_pmu_idle_mask_2_r(u32 i) 848{ 849 return 0x0010a840U + i*4U; 850} 851static inline u32 pwr_pmu_idle_count_r(u32 i) 852{ 853 return 0x0010a508U + i*16U; 854} 855static inline u32 pwr_pmu_idle_count_value_f(u32 v) 856{ 857 return (v & 0x7fffffffU) << 0U; 858} 859static inline u32 pwr_pmu_idle_count_value_v(u32 r) 860{ 861 return (r >> 0U) & 0x7fffffffU; 862} 863static inline u32 pwr_pmu_idle_count_reset_f(u32 v) 864{ 865 return (v & 0x1U) << 31U; 866} 867static inline u32 pwr_pmu_idle_ctrl_r(u32 i) 868{ 869 return 0x0010a50cU + i*16U; 870} 871static inline u32 pwr_pmu_idle_ctrl_value_m(void) 872{ 873 return 0x3U << 0U; 874} 875static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) 876{ 877 return 0x2U; 878} 879static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) 880{ 881 return 0x3U; 882} 883static inline u32 pwr_pmu_idle_ctrl_filter_m(void) 884{ 885 return 0x1U << 2U; 886} 887static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) 888{ 889 return 0x0U; 890} 891static inline u32 pwr_pmu_idle_threshold_r(u32 i) 892{ 893 return 0x0010a8a0U + i*4U; 894} 895static inline u32 pwr_pmu_idle_threshold_value_f(u32 v) 896{ 897 return (v & 0x7fffffffU) << 0U; 898} 899static inline u32 pwr_pmu_idle_intr_r(void) 900{ 901 return 0x0010a9e8U; 902} 903static inline u32 pwr_pmu_idle_intr_en_f(u32 v) 904{ 905 return (v & 0x1U) << 0U; 906} 907static inline u32 pwr_pmu_idle_intr_en_disabled_v(void) 908{ 909 return 0x00000000U; 910} 911static inline u32 pwr_pmu_idle_intr_en_enabled_v(void) 912{ 913 return 0x00000001U; 914} 915static inline u32 pwr_pmu_idle_intr_status_r(void) 916{ 917 return 0x0010a9ecU; 918} 919static inline u32 pwr_pmu_idle_intr_status_intr_f(u32 v) 920{ 921 return (v & 0x1U) << 0U; 922} 923static inline u32 pwr_pmu_idle_intr_status_intr_m(void) 924{ 925 return 0x1U << 0U; 926} 927static inline u32 pwr_pmu_idle_intr_status_intr_v(u32 r) 928{ 929 return (r >> 0U) & 0x1U; 930} 931static inline u32 pwr_pmu_idle_intr_status_intr_pending_v(void) 932{ 933 return 0x00000001U; 934} 935static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void) 936{ 937 return 0x00000001U; 938} 939static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) 940{ 941 return 0x0010a9f0U + i*8U; 942} 943static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) 944{ 945 return 0x0010a9f4U + i*8U; 946} 947static inline u32 pwr_pmu_idle_mask_2_supp_r(u32 i) 948{ 949 return 0x0010a690U + i*4U; 950} 951static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) 952{ 953 return 0x0010aa30U + i*8U; 954} 955static inline u32 pwr_pmu_debug_r(u32 i) 956{ 957 return 0x0010a5c0U + i*4U; 958} 959static inline u32 pwr_pmu_debug__size_1_v(void) 960{ 961 return 0x00000004U; 962} 963static inline u32 pwr_pmu_mailbox_r(u32 i) 964{ 965 return 0x0010a450U + i*4U; 966} 967static inline u32 pwr_pmu_mailbox__size_1_v(void) 968{ 969 return 0x0000000cU; 970} 971static inline u32 pwr_pmu_bar0_addr_r(void) 972{ 973 return 0x0010a7a0U; 974} 975static inline u32 pwr_pmu_bar0_data_r(void) 976{ 977 return 0x0010a7a4U; 978} 979static inline u32 pwr_pmu_bar0_ctl_r(void) 980{ 981 return 0x0010a7acU; 982} 983static inline u32 pwr_pmu_bar0_timeout_r(void) 984{ 985 return 0x0010a7a8U; 986} 987static inline u32 pwr_pmu_bar0_fecs_error_r(void) 988{ 989 return 0x0010a988U; 990} 991static inline u32 pwr_pmu_bar0_error_status_r(void) 992{ 993 return 0x0010a7b0U; 994} 995static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) 996{ 997 return 0x0010a6c0U + i*4U; 998} 999static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) 1000{ 1001 return 0x0010a6e8U + i*4U; 1002} 1003static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) 1004{ 1005 return 0x0010a710U + i*4U; 1006} 1007static inline u32 pwr_pmu_pg_intren_r(u32 i) 1008{ 1009 return 0x0010a760U + i*4U; 1010} 1011static inline u32 pwr_pmu_falcon_ecc_status_r(void) 1012{ 1013 return 0x0010a6b0U; 1014} 1015static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_imem_f(u32 v) 1016{ 1017 return (v & 0x1U) << 0U; 1018} 1019static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_imem_m(void) 1020{ 1021 return 0x1U << 0U; 1022} 1023static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_dmem_f(u32 v) 1024{ 1025 return (v & 0x1U) << 1U; 1026} 1027static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_dmem_m(void) 1028{ 1029 return 0x1U << 1U; 1030} 1031static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_imem_f(u32 v) 1032{ 1033 return (v & 0x1U) << 8U; 1034} 1035static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m(void) 1036{ 1037 return 0x1U << 8U; 1038} 1039static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_f(u32 v) 1040{ 1041 return (v & 0x1U) << 9U; 1042} 1043static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m(void) 1044{ 1045 return 0x1U << 9U; 1046} 1047static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v) 1048{ 1049 return (v & 0x1U) << 16U; 1050} 1051static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_m(void) 1052{ 1053 return 0x1U << 16U; 1054} 1055static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) 1056{ 1057 return (v & 0x1U) << 18U; 1058} 1059static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void) 1060{ 1061 return 0x1U << 18U; 1062} 1063static inline u32 pwr_pmu_falcon_ecc_status_reset_f(u32 v) 1064{ 1065 return (v & 0x1U) << 31U; 1066} 1067static inline u32 pwr_pmu_falcon_ecc_status_reset_task_f(void) 1068{ 1069 return 0x80000000U; 1070} 1071static inline u32 pwr_pmu_falcon_ecc_address_r(void) 1072{ 1073 return 0x0010a6b4U; 1074} 1075static inline u32 pwr_pmu_falcon_ecc_address_index_f(u32 v) 1076{ 1077 return (v & 0xffffffU) << 0U; 1078} 1079static inline u32 pwr_pmu_falcon_ecc_address_type_f(u32 v) 1080{ 1081 return (v & 0xfU) << 20U; 1082} 1083static inline u32 pwr_pmu_falcon_ecc_address_type_imem_f(void) 1084{ 1085 return 0x0U; 1086} 1087static inline u32 pwr_pmu_falcon_ecc_address_type_dmem_f(void) 1088{ 1089 return 0x100000U; 1090} 1091static inline u32 pwr_pmu_falcon_ecc_address_row_address_s(void) 1092{ 1093 return 16U; 1094} 1095static inline u32 pwr_pmu_falcon_ecc_address_row_address_f(u32 v) 1096{ 1097 return (v & 0xffffU) << 0U; 1098} 1099static inline u32 pwr_pmu_falcon_ecc_address_row_address_m(void) 1100{ 1101 return 0xffffU << 0U; 1102} 1103static inline u32 pwr_pmu_falcon_ecc_address_row_address_v(u32 r) 1104{ 1105 return (r >> 0U) & 0xffffU; 1106} 1107static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_r(void) 1108{ 1109 return 0x0010a6b8U; 1110} 1111static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_s(void) 1112{ 1113 return 16U; 1114} 1115static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_f(u32 v) 1116{ 1117 return (v & 0xffffU) << 0U; 1118} 1119static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_m(void) 1120{ 1121 return 0xffffU << 0U; 1122} 1123static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_v(u32 r) 1124{ 1125 return (r >> 0U) & 0xffffU; 1126} 1127static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_s(void) 1128{ 1129 return 16U; 1130} 1131static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_f(u32 v) 1132{ 1133 return (v & 0xffffU) << 16U; 1134} 1135static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_m(void) 1136{ 1137 return 0xffffU << 16U; 1138} 1139static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_v(u32 r) 1140{ 1141 return (r >> 16U) & 0xffffU; 1142} 1143static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_r(void) 1144{ 1145 return 0x0010a6bcU; 1146} 1147static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_s(void) 1148{ 1149 return 16U; 1150} 1151static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_f(u32 v) 1152{ 1153 return (v & 0xffffU) << 0U; 1154} 1155static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_m(void) 1156{ 1157 return 0xffffU << 0U; 1158} 1159static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_v(u32 r) 1160{ 1161 return (r >> 0U) & 0xffffU; 1162} 1163static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_s(void) 1164{ 1165 return 16U; 1166} 1167static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v) 1168{ 1169 return (v & 0xffffU) << 16U; 1170} 1171static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_m(void) 1172{ 1173 return 0xffffU << 16U; 1174} 1175static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r) 1176{ 1177 return (r >> 16U) & 0xffffU; 1178} 1179static inline u32 pwr_fbif_transcfg_r(u32 i) 1180{ 1181 return 0x0010ae00U + i*4U; 1182} 1183static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) 1184{ 1185 return 0x0U; 1186} 1187static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) 1188{ 1189 return 0x1U; 1190} 1191static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) 1192{ 1193 return 0x2U; 1194} 1195static inline u32 pwr_fbif_transcfg_mem_type_s(void) 1196{ 1197 return 1U; 1198} 1199static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) 1200{ 1201 return (v & 0x1U) << 2U; 1202} 1203static inline u32 pwr_fbif_transcfg_mem_type_m(void) 1204{ 1205 return 0x1U << 2U; 1206} 1207static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) 1208{ 1209 return (r >> 2U) & 0x1U; 1210} 1211static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) 1212{ 1213 return 0x0U; 1214} 1215static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) 1216{ 1217 return 0x4U; 1218} 1219#endif
diff --git a/include/nvgpu/hw/gv11b/hw_ram_gv11b.h b/include/nvgpu/hw/gv11b/hw_ram_gv11b.h
deleted file mode 100644
index 59c6d88..0000000
--- a/include/nvgpu/hw/gv11b/hw_ram_gv11b.h
+++ /dev/null
@@ -1,791 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_ram_gv11b_h_ 57#define _hw_ram_gv11b_h_ 58 59static inline u32 ram_in_ramfc_s(void) 60{ 61 return 4096U; 62} 63static inline u32 ram_in_ramfc_w(void) 64{ 65 return 0U; 66} 67static inline u32 ram_in_page_dir_base_target_f(u32 v) 68{ 69 return (v & 0x3U) << 0U; 70} 71static inline u32 ram_in_page_dir_base_target_w(void) 72{ 73 return 128U; 74} 75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) 76{ 77 return 0x0U; 78} 79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) 80{ 81 return 0x2U; 82} 83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) 84{ 85 return 0x3U; 86} 87static inline u32 ram_in_page_dir_base_vol_w(void) 88{ 89 return 128U; 90} 91static inline u32 ram_in_page_dir_base_vol_true_f(void) 92{ 93 return 0x4U; 94} 95static inline u32 ram_in_page_dir_base_vol_false_f(void) 96{ 97 return 0x0U; 98} 99static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v) 100{ 101 return (v & 0x1U) << 4U; 102} 103static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void) 104{ 105 return 0x1U << 4U; 106} 107static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void) 108{ 109 return 128U; 110} 111static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void) 112{ 113 return 0x10U; 114} 115static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v) 116{ 117 return (v & 0x1U) << 5U; 118} 119static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void) 120{ 121 return 0x1U << 5U; 122} 123static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void) 124{ 125 return 128U; 126} 127static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void) 128{ 129 return 0x20U; 130} 131static inline u32 ram_in_use_ver2_pt_format_f(u32 v) 132{ 133 return (v & 0x1U) << 10U; 134} 135static inline u32 ram_in_use_ver2_pt_format_m(void) 136{ 137 return 0x1U << 10U; 138} 139static inline u32 ram_in_use_ver2_pt_format_w(void) 140{ 141 return 128U; 142} 143static inline u32 ram_in_use_ver2_pt_format_true_f(void) 144{ 145 return 0x400U; 146} 147static inline u32 ram_in_use_ver2_pt_format_false_f(void) 148{ 149 return 0x0U; 150} 151static inline u32 ram_in_big_page_size_f(u32 v) 152{ 153 return (v & 0x1U) << 11U; 154} 155static inline u32 ram_in_big_page_size_m(void) 156{ 157 return 0x1U << 11U; 158} 159static inline u32 ram_in_big_page_size_w(void) 160{ 161 return 128U; 162} 163static inline u32 ram_in_big_page_size_128kb_f(void) 164{ 165 return 0x0U; 166} 167static inline u32 ram_in_big_page_size_64kb_f(void) 168{ 169 return 0x800U; 170} 171static inline u32 ram_in_page_dir_base_lo_f(u32 v) 172{ 173 return (v & 0xfffffU) << 12U; 174} 175static inline u32 ram_in_page_dir_base_lo_w(void) 176{ 177 return 128U; 178} 179static inline u32 ram_in_page_dir_base_hi_f(u32 v) 180{ 181 return (v & 0xffffffffU) << 0U; 182} 183static inline u32 ram_in_page_dir_base_hi_w(void) 184{ 185 return 129U; 186} 187static inline u32 ram_in_engine_cs_w(void) 188{ 189 return 132U; 190} 191static inline u32 ram_in_engine_cs_wfi_v(void) 192{ 193 return 0x00000000U; 194} 195static inline u32 ram_in_engine_cs_wfi_f(void) 196{ 197 return 0x0U; 198} 199static inline u32 ram_in_engine_cs_fg_v(void) 200{ 201 return 0x00000001U; 202} 203static inline u32 ram_in_engine_cs_fg_f(void) 204{ 205 return 0x8U; 206} 207static inline u32 ram_in_engine_wfi_mode_f(u32 v) 208{ 209 return (v & 0x1U) << 2U; 210} 211static inline u32 ram_in_engine_wfi_mode_w(void) 212{ 213 return 132U; 214} 215static inline u32 ram_in_engine_wfi_mode_physical_v(void) 216{ 217 return 0x00000000U; 218} 219static inline u32 ram_in_engine_wfi_mode_virtual_v(void) 220{ 221 return 0x00000001U; 222} 223static inline u32 ram_in_engine_wfi_target_f(u32 v) 224{ 225 return (v & 0x3U) << 0U; 226} 227static inline u32 ram_in_engine_wfi_target_w(void) 228{ 229 return 132U; 230} 231static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void) 232{ 233 return 0x00000002U; 234} 235static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void) 236{ 237 return 0x00000003U; 238} 239static inline u32 ram_in_engine_wfi_target_local_mem_v(void) 240{ 241 return 0x00000000U; 242} 243static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v) 244{ 245 return (v & 0xfffffU) << 12U; 246} 247static inline u32 ram_in_engine_wfi_ptr_lo_w(void) 248{ 249 return 132U; 250} 251static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v) 252{ 253 return (v & 0xffU) << 0U; 254} 255static inline u32 ram_in_engine_wfi_ptr_hi_w(void) 256{ 257 return 133U; 258} 259static inline u32 ram_in_engine_wfi_veid_f(u32 v) 260{ 261 return (v & 0x3fU) << 0U; 262} 263static inline u32 ram_in_engine_wfi_veid_w(void) 264{ 265 return 134U; 266} 267static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v) 268{ 269 return (v & 0xffffffffU) << 0U; 270} 271static inline u32 ram_in_eng_method_buffer_addr_lo_w(void) 272{ 273 return 136U; 274} 275static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v) 276{ 277 return (v & 0x1ffffU) << 0U; 278} 279static inline u32 ram_in_eng_method_buffer_addr_hi_w(void) 280{ 281 return 137U; 282} 283static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i) 284{ 285 return (v & 0x3U) << (0U + i*0U); 286} 287static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void) 288{ 289 return 0x00000040U; 290} 291static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void) 292{ 293 return 0x00000000U; 294} 295static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void) 296{ 297 return 0x00000001U; 298} 299static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void) 300{ 301 return 0x00000002U; 302} 303static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void) 304{ 305 return 0x00000003U; 306} 307static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i) 308{ 309 return (v & 0x1U) << (2U + i*0U); 310} 311static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void) 312{ 313 return 0x00000040U; 314} 315static inline u32 ram_in_sc_page_dir_base_vol_true_v(void) 316{ 317 return 0x00000001U; 318} 319static inline u32 ram_in_sc_page_dir_base_vol_false_v(void) 320{ 321 return 0x00000000U; 322} 323static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i) 324{ 325 return (v & 0x1U) << (4U + i*0U); 326} 327static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void) 328{ 329 return 0x00000040U; 330} 331static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void) 332{ 333 return 0x00000001U; 334} 335static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void) 336{ 337 return 0x00000000U; 338} 339static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i) 340{ 341 return (v & 0x1U) << (5U + i*0U); 342} 343static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void) 344{ 345 return 0x00000040U; 346} 347static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void) 348{ 349 return 0x00000001U; 350} 351static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void) 352{ 353 return 0x00000000U; 354} 355static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i) 356{ 357 return (v & 0x1U) << (10U + i*0U); 358} 359static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void) 360{ 361 return 0x00000040U; 362} 363static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void) 364{ 365 return 0x00000000U; 366} 367static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void) 368{ 369 return 0x00000001U; 370} 371static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i) 372{ 373 return (v & 0x1U) << (11U + i*0U); 374} 375static inline u32 ram_in_sc_big_page_size__size_1_v(void) 376{ 377 return 0x00000040U; 378} 379static inline u32 ram_in_sc_big_page_size_64kb_v(void) 380{ 381 return 0x00000001U; 382} 383static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i) 384{ 385 return (v & 0xfffffU) << (12U + i*0U); 386} 387static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void) 388{ 389 return 0x00000040U; 390} 391static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i) 392{ 393 return (v & 0xffffffffU) << (0U + i*0U); 394} 395static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void) 396{ 397 return 0x00000040U; 398} 399static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v) 400{ 401 return (v & 0x3U) << 0U; 402} 403static inline u32 ram_in_sc_page_dir_base_target_0_w(void) 404{ 405 return 168U; 406} 407static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v) 408{ 409 return (v & 0x1U) << 2U; 410} 411static inline u32 ram_in_sc_page_dir_base_vol_0_w(void) 412{ 413 return 168U; 414} 415static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v) 416{ 417 return (v & 0x1U) << 4U; 418} 419static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void) 420{ 421 return 168U; 422} 423static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v) 424{ 425 return (v & 0x1U) << 5U; 426} 427static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void) 428{ 429 return 168U; 430} 431static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v) 432{ 433 return (v & 0x1U) << 10U; 434} 435static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void) 436{ 437 return 168U; 438} 439static inline u32 ram_in_sc_big_page_size_0_f(u32 v) 440{ 441 return (v & 0x1U) << 11U; 442} 443static inline u32 ram_in_sc_big_page_size_0_w(void) 444{ 445 return 168U; 446} 447static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v) 448{ 449 return (v & 0xfffffU) << 12U; 450} 451static inline u32 ram_in_sc_page_dir_base_lo_0_w(void) 452{ 453 return 168U; 454} 455static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v) 456{ 457 return (v & 0xffffffffU) << 0U; 458} 459static inline u32 ram_in_sc_page_dir_base_hi_0_w(void) 460{ 461 return 169U; 462} 463static inline u32 ram_in_base_shift_v(void) 464{ 465 return 0x0000000cU; 466} 467static inline u32 ram_in_alloc_size_v(void) 468{ 469 return 0x00001000U; 470} 471static inline u32 ram_fc_size_val_v(void) 472{ 473 return 0x00000200U; 474} 475static inline u32 ram_fc_gp_put_w(void) 476{ 477 return 0U; 478} 479static inline u32 ram_fc_userd_w(void) 480{ 481 return 2U; 482} 483static inline u32 ram_fc_userd_hi_w(void) 484{ 485 return 3U; 486} 487static inline u32 ram_fc_signature_w(void) 488{ 489 return 4U; 490} 491static inline u32 ram_fc_gp_get_w(void) 492{ 493 return 5U; 494} 495static inline u32 ram_fc_pb_get_w(void) 496{ 497 return 6U; 498} 499static inline u32 ram_fc_pb_get_hi_w(void) 500{ 501 return 7U; 502} 503static inline u32 ram_fc_pb_top_level_get_w(void) 504{ 505 return 8U; 506} 507static inline u32 ram_fc_pb_top_level_get_hi_w(void) 508{ 509 return 9U; 510} 511static inline u32 ram_fc_acquire_w(void) 512{ 513 return 12U; 514} 515static inline u32 ram_fc_sem_addr_hi_w(void) 516{ 517 return 14U; 518} 519static inline u32 ram_fc_sem_addr_lo_w(void) 520{ 521 return 15U; 522} 523static inline u32 ram_fc_sem_payload_lo_w(void) 524{ 525 return 16U; 526} 527static inline u32 ram_fc_sem_payload_hi_w(void) 528{ 529 return 39U; 530} 531static inline u32 ram_fc_sem_execute_w(void) 532{ 533 return 17U; 534} 535static inline u32 ram_fc_gp_base_w(void) 536{ 537 return 18U; 538} 539static inline u32 ram_fc_gp_base_hi_w(void) 540{ 541 return 19U; 542} 543static inline u32 ram_fc_gp_fetch_w(void) 544{ 545 return 20U; 546} 547static inline u32 ram_fc_pb_fetch_w(void) 548{ 549 return 21U; 550} 551static inline u32 ram_fc_pb_fetch_hi_w(void) 552{ 553 return 22U; 554} 555static inline u32 ram_fc_pb_put_w(void) 556{ 557 return 23U; 558} 559static inline u32 ram_fc_pb_put_hi_w(void) 560{ 561 return 24U; 562} 563static inline u32 ram_fc_pb_header_w(void) 564{ 565 return 33U; 566} 567static inline u32 ram_fc_pb_count_w(void) 568{ 569 return 34U; 570} 571static inline u32 ram_fc_subdevice_w(void) 572{ 573 return 37U; 574} 575static inline u32 ram_fc_target_w(void) 576{ 577 return 43U; 578} 579static inline u32 ram_fc_hce_ctrl_w(void) 580{ 581 return 57U; 582} 583static inline u32 ram_fc_chid_w(void) 584{ 585 return 58U; 586} 587static inline u32 ram_fc_chid_id_f(u32 v) 588{ 589 return (v & 0xfffU) << 0U; 590} 591static inline u32 ram_fc_chid_id_w(void) 592{ 593 return 0U; 594} 595static inline u32 ram_fc_config_w(void) 596{ 597 return 61U; 598} 599static inline u32 ram_fc_runlist_timeslice_w(void) 600{ 601 return 62U; 602} 603static inline u32 ram_fc_set_channel_info_w(void) 604{ 605 return 63U; 606} 607static inline u32 ram_userd_base_shift_v(void) 608{ 609 return 0x00000009U; 610} 611static inline u32 ram_userd_chan_size_v(void) 612{ 613 return 0x00000200U; 614} 615static inline u32 ram_userd_put_w(void) 616{ 617 return 16U; 618} 619static inline u32 ram_userd_get_w(void) 620{ 621 return 17U; 622} 623static inline u32 ram_userd_ref_w(void) 624{ 625 return 18U; 626} 627static inline u32 ram_userd_put_hi_w(void) 628{ 629 return 19U; 630} 631static inline u32 ram_userd_ref_threshold_w(void) 632{ 633 return 20U; 634} 635static inline u32 ram_userd_top_level_get_w(void) 636{ 637 return 22U; 638} 639static inline u32 ram_userd_top_level_get_hi_w(void) 640{ 641 return 23U; 642} 643static inline u32 ram_userd_get_hi_w(void) 644{ 645 return 24U; 646} 647static inline u32 ram_userd_gp_get_w(void) 648{ 649 return 34U; 650} 651static inline u32 ram_userd_gp_put_w(void) 652{ 653 return 35U; 654} 655static inline u32 ram_userd_gp_top_level_get_w(void) 656{ 657 return 22U; 658} 659static inline u32 ram_userd_gp_top_level_get_hi_w(void) 660{ 661 return 23U; 662} 663static inline u32 ram_rl_entry_size_v(void) 664{ 665 return 0x00000010U; 666} 667static inline u32 ram_rl_entry_type_f(u32 v) 668{ 669 return (v & 0x1U) << 0U; 670} 671static inline u32 ram_rl_entry_type_channel_v(void) 672{ 673 return 0x00000000U; 674} 675static inline u32 ram_rl_entry_type_tsg_v(void) 676{ 677 return 0x00000001U; 678} 679static inline u32 ram_rl_entry_id_f(u32 v) 680{ 681 return (v & 0xfffU) << 0U; 682} 683static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v) 684{ 685 return (v & 0x1U) << 1U; 686} 687static inline u32 ram_rl_entry_chan_inst_target_f(u32 v) 688{ 689 return (v & 0x3U) << 4U; 690} 691static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void) 692{ 693 return 0x00000003U; 694} 695static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void) 696{ 697 return 0x00000002U; 698} 699static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void) 700{ 701 return 0x00000000U; 702} 703static inline u32 ram_rl_entry_chan_userd_target_f(u32 v) 704{ 705 return (v & 0x3U) << 6U; 706} 707static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void) 708{ 709 return 0x00000000U; 710} 711static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void) 712{ 713 return 0x00000001U; 714} 715static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void) 716{ 717 return 0x00000002U; 718} 719static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void) 720{ 721 return 0x00000003U; 722} 723static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v) 724{ 725 return (v & 0xffffffU) << 8U; 726} 727static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v) 728{ 729 return (v & 0xffffffffU) << 0U; 730} 731static inline u32 ram_rl_entry_chid_f(u32 v) 732{ 733 return (v & 0xfffU) << 0U; 734} 735static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v) 736{ 737 return (v & 0xfffffU) << 12U; 738} 739static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v) 740{ 741 return (v & 0xffffffffU) << 0U; 742} 743static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v) 744{ 745 return (v & 0xfU) << 16U; 746} 747static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void) 748{ 749 return 0x00000003U; 750} 751static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v) 752{ 753 return (v & 0xffU) << 24U; 754} 755static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void) 756{ 757 return 0x00000080U; 758} 759static inline u32 ram_rl_entry_tsg_length_f(u32 v) 760{ 761 return (v & 0xffU) << 0U; 762} 763static inline u32 ram_rl_entry_tsg_length_init_v(void) 764{ 765 return 0x00000000U; 766} 767static inline u32 ram_rl_entry_tsg_length_min_v(void) 768{ 769 return 0x00000001U; 770} 771static inline u32 ram_rl_entry_tsg_length_max_v(void) 772{ 773 return 0x00000080U; 774} 775static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v) 776{ 777 return (v & 0xfffU) << 0U; 778} 779static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void) 780{ 781 return 0x00000008U; 782} 783static inline u32 ram_rl_entry_chan_userd_align_shift_v(void) 784{ 785 return 0x00000008U; 786} 787static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void) 788{ 789 return 0x0000000cU; 790} 791#endif
diff --git a/include/nvgpu/hw/gv11b/hw_therm_gv11b.h b/include/nvgpu/hw/gv11b/hw_therm_gv11b.h
deleted file mode 100644
index 0050083..0000000
--- a/include/nvgpu/hw/gv11b/hw_therm_gv11b.h
+++ /dev/null
@@ -1,487 +0,0 @@ 1/* 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_therm_gv11b_h_ 57#define _hw_therm_gv11b_h_ 58 59static inline u32 therm_use_a_r(void) 60{ 61 return 0x00020798U; 62} 63static inline u32 therm_use_a_ext_therm_0_enable_f(void) 64{ 65 return 0x1U; 66} 67static inline u32 therm_use_a_ext_therm_1_enable_f(void) 68{ 69 return 0x2U; 70} 71static inline u32 therm_use_a_ext_therm_2_enable_f(void) 72{ 73 return 0x4U; 74} 75static inline u32 therm_evt_ext_therm_0_r(void) 76{ 77 return 0x00020700U; 78} 79static inline u32 therm_evt_ext_therm_0_slow_factor_f(u32 v) 80{ 81 return (v & 0x3fU) << 24U; 82} 83static inline u32 therm_evt_ext_therm_0_slow_factor_init_v(void) 84{ 85 return 0x00000001U; 86} 87static inline u32 therm_evt_ext_therm_0_mode_f(u32 v) 88{ 89 return (v & 0x3U) << 30U; 90} 91static inline u32 therm_evt_ext_therm_0_mode_normal_v(void) 92{ 93 return 0x00000000U; 94} 95static inline u32 therm_evt_ext_therm_0_mode_inverted_v(void) 96{ 97 return 0x00000001U; 98} 99static inline u32 therm_evt_ext_therm_0_mode_forced_v(void) 100{ 101 return 0x00000002U; 102} 103static inline u32 therm_evt_ext_therm_0_mode_cleared_v(void) 104{ 105 return 0x00000003U; 106} 107static inline u32 therm_evt_ext_therm_1_r(void) 108{ 109 return 0x00020704U; 110} 111static inline u32 therm_evt_ext_therm_1_slow_factor_f(u32 v) 112{ 113 return (v & 0x3fU) << 24U; 114} 115static inline u32 therm_evt_ext_therm_1_slow_factor_init_v(void) 116{ 117 return 0x00000002U; 118} 119static inline u32 therm_evt_ext_therm_1_mode_f(u32 v) 120{ 121 return (v & 0x3U) << 30U; 122} 123static inline u32 therm_evt_ext_therm_1_mode_normal_v(void) 124{ 125 return 0x00000000U; 126} 127static inline u32 therm_evt_ext_therm_1_mode_inverted_v(void) 128{ 129 return 0x00000001U; 130} 131static inline u32 therm_evt_ext_therm_1_mode_forced_v(void) 132{ 133 return 0x00000002U; 134} 135static inline u32 therm_evt_ext_therm_1_mode_cleared_v(void) 136{ 137 return 0x00000003U; 138} 139static inline u32 therm_evt_ext_therm_2_r(void) 140{ 141 return 0x00020708U; 142} 143static inline u32 therm_evt_ext_therm_2_slow_factor_f(u32 v) 144{ 145 return (v & 0x3fU) << 24U; 146} 147static inline u32 therm_evt_ext_therm_2_slow_factor_init_v(void) 148{ 149 return 0x00000003U; 150} 151static inline u32 therm_evt_ext_therm_2_mode_f(u32 v) 152{ 153 return (v & 0x3U) << 30U; 154} 155static inline u32 therm_evt_ext_therm_2_mode_normal_v(void) 156{ 157 return 0x00000000U; 158} 159static inline u32 therm_evt_ext_therm_2_mode_inverted_v(void) 160{ 161 return 0x00000001U; 162} 163static inline u32 therm_evt_ext_therm_2_mode_forced_v(void) 164{ 165 return 0x00000002U; 166} 167static inline u32 therm_evt_ext_therm_2_mode_cleared_v(void) 168{ 169 return 0x00000003U; 170} 171static inline u32 therm_weight_1_r(void) 172{ 173 return 0x00020024U; 174} 175static inline u32 therm_config1_r(void) 176{ 177 return 0x00020050U; 178} 179static inline u32 therm_config2_r(void) 180{ 181 return 0x00020130U; 182} 183static inline u32 therm_config2_grad_step_duration_f(u32 v) 184{ 185 return (v & 0xfU) << 8U; 186} 187static inline u32 therm_config2_grad_step_duration_m(void) 188{ 189 return 0xfU << 8U; 190} 191static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) 192{ 193 return (v & 0x1U) << 24U; 194} 195static inline u32 therm_config2_grad_enable_f(u32 v) 196{ 197 return (v & 0x1U) << 31U; 198} 199static inline u32 therm_gate_ctrl_r(u32 i) 200{ 201 return 0x00020200U + i*4U; 202} 203static inline u32 therm_gate_ctrl_eng_clk_m(void) 204{ 205 return 0x3U << 0U; 206} 207static inline u32 therm_gate_ctrl_eng_clk_run_f(void) 208{ 209 return 0x0U; 210} 211static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) 212{ 213 return 0x1U; 214} 215static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) 216{ 217 return 0x2U; 218} 219static inline u32 therm_gate_ctrl_blk_clk_m(void) 220{ 221 return 0x3U << 2U; 222} 223static inline u32 therm_gate_ctrl_blk_clk_run_f(void) 224{ 225 return 0x0U; 226} 227static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) 228{ 229 return 0x4U; 230} 231static inline u32 therm_gate_ctrl_idle_holdoff_m(void) 232{ 233 return 0x1U << 4U; 234} 235static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void) 236{ 237 return 0x0U; 238} 239static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void) 240{ 241 return 0x10U; 242} 243static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) 244{ 245 return (v & 0x1fU) << 8U; 246} 247static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) 248{ 249 return 0x1fU << 8U; 250} 251static inline u32 therm_gate_ctrl_eng_idle_filt_exp__prod_f(void) 252{ 253 return 0x200U; 254} 255static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) 256{ 257 return (v & 0x7U) << 13U; 258} 259static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) 260{ 261 return 0x7U << 13U; 262} 263static inline u32 therm_gate_ctrl_eng_idle_filt_mant__prod_f(void) 264{ 265 return 0x2000U; 266} 267static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) 268{ 269 return (v & 0xfU) << 16U; 270} 271static inline u32 therm_gate_ctrl_eng_delay_before_m(void) 272{ 273 return 0xfU << 16U; 274} 275static inline u32 therm_gate_ctrl_eng_delay_before__prod_f(void) 276{ 277 return 0x40000U; 278} 279static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) 280{ 281 return (v & 0xfU) << 20U; 282} 283static inline u32 therm_gate_ctrl_eng_delay_after_m(void) 284{ 285 return 0xfU << 20U; 286} 287static inline u32 therm_gate_ctrl_eng_delay_after__prod_f(void) 288{ 289 return 0x0U; 290} 291static inline u32 therm_fecs_idle_filter_r(void) 292{ 293 return 0x00020288U; 294} 295static inline u32 therm_fecs_idle_filter_value_m(void) 296{ 297 return 0xffffffffU << 0U; 298} 299static inline u32 therm_fecs_idle_filter_value__prod_f(void) 300{ 301 return 0x0U; 302} 303static inline u32 therm_hubmmu_idle_filter_r(void) 304{ 305 return 0x0002028cU; 306} 307static inline u32 therm_hubmmu_idle_filter_value_m(void) 308{ 309 return 0xffffffffU << 0U; 310} 311static inline u32 therm_hubmmu_idle_filter_value__prod_f(void) 312{ 313 return 0x0U; 314} 315static inline u32 therm_clk_slowdown_r(u32 i) 316{ 317 return 0x00020160U + i*4U; 318} 319static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) 320{ 321 return (v & 0x3fU) << 16U; 322} 323static inline u32 therm_clk_slowdown_idle_factor_m(void) 324{ 325 return 0x3fU << 16U; 326} 327static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) 328{ 329 return (r >> 16U) & 0x3fU; 330} 331static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) 332{ 333 return 0x0U; 334} 335static inline u32 therm_clk_slowdown_2_r(u32 i) 336{ 337 return 0x000201a0U + i*4U; 338} 339static inline u32 therm_clk_slowdown_2_idle_condition_a_select_f(u32 v) 340{ 341 return (v & 0xfU) << 0U; 342} 343static inline u32 therm_clk_slowdown_2_idle_condition_a_type_f(u32 v) 344{ 345 return (v & 0x7U) << 4U; 346} 347static inline u32 therm_clk_slowdown_2_idle_condition_a_type_v(u32 r) 348{ 349 return (r >> 4U) & 0x7U; 350} 351static inline u32 therm_clk_slowdown_2_idle_condition_a_type_never_f(void) 352{ 353 return 0x40U; 354} 355static inline u32 therm_clk_slowdown_2_idle_condition_b_type_f(u32 v) 356{ 357 return (v & 0x7U) << 12U; 358} 359static inline u32 therm_clk_slowdown_2_idle_condition_b_type_v(u32 r) 360{ 361 return (r >> 12U) & 0x7U; 362} 363static inline u32 therm_clk_slowdown_2_idle_condition_b_type_never_f(void) 364{ 365 return 0x4000U; 366} 367static inline u32 therm_grad_stepping_table_r(u32 i) 368{ 369 return 0x000202c8U + i*4U; 370} 371static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) 372{ 373 return (v & 0x3fU) << 0U; 374} 375static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) 376{ 377 return 0x3fU << 0U; 378} 379static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1_f(void) 380{ 381 return 0x0U; 382} 383static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) 384{ 385 return 0x1U; 386} 387static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) 388{ 389 return 0x2U; 390} 391static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) 392{ 393 return 0x6U; 394} 395static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) 396{ 397 return 0xeU; 398} 399static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by16_f(void) 400{ 401 return 0x1eU; 402} 403static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by32_f(void) 404{ 405 return 0x3eU; 406} 407static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) 408{ 409 return (v & 0x3fU) << 6U; 410} 411static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) 412{ 413 return 0x3fU << 6U; 414} 415static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) 416{ 417 return (v & 0x3fU) << 12U; 418} 419static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) 420{ 421 return 0x3fU << 12U; 422} 423static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) 424{ 425 return (v & 0x3fU) << 18U; 426} 427static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) 428{ 429 return 0x3fU << 18U; 430} 431static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) 432{ 433 return (v & 0x3fU) << 24U; 434} 435static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) 436{ 437 return 0x3fU << 24U; 438} 439static inline u32 therm_grad_stepping0_r(void) 440{ 441 return 0x000202c0U; 442} 443static inline u32 therm_grad_stepping0_feature_s(void) 444{ 445 return 1U; 446} 447static inline u32 therm_grad_stepping0_feature_f(u32 v) 448{ 449 return (v & 0x1U) << 0U; 450} 451static inline u32 therm_grad_stepping0_feature_m(void) 452{ 453 return 0x1U << 0U; 454} 455static inline u32 therm_grad_stepping0_feature_v(u32 r) 456{ 457 return (r >> 0U) & 0x1U; 458} 459static inline u32 therm_grad_stepping0_feature_enable_f(void) 460{ 461 return 0x1U; 462} 463static inline u32 therm_grad_stepping1_r(void) 464{ 465 return 0x000202c4U; 466} 467static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) 468{ 469 return (v & 0x1ffffU) << 0U; 470} 471static inline u32 therm_clk_timing_r(u32 i) 472{ 473 return 0x000203c0U + i*4U; 474} 475static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) 476{ 477 return (v & 0x1U) << 16U; 478} 479static inline u32 therm_clk_timing_grad_slowdown_m(void) 480{ 481 return 0x1U << 16U; 482} 483static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) 484{ 485 return 0x10000U; 486} 487#endif
diff --git a/include/nvgpu/hw/gv11b/hw_timer_gv11b.h b/include/nvgpu/hw/gv11b/hw_timer_gv11b.h
deleted file mode 100644
index 34285b3..0000000
--- a/include/nvgpu/hw/gv11b/hw_timer_gv11b.h
+++ /dev/null
@@ -1,127 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_timer_gv11b_h_ 57#define _hw_timer_gv11b_h_ 58 59static inline u32 timer_pri_timeout_r(void) 60{ 61 return 0x00009080U; 62} 63static inline u32 timer_pri_timeout_period_f(u32 v) 64{ 65 return (v & 0xffffffU) << 0U; 66} 67static inline u32 timer_pri_timeout_period_m(void) 68{ 69 return 0xffffffU << 0U; 70} 71static inline u32 timer_pri_timeout_period_v(u32 r) 72{ 73 return (r >> 0U) & 0xffffffU; 74} 75static inline u32 timer_pri_timeout_en_f(u32 v) 76{ 77 return (v & 0x1U) << 31U; 78} 79static inline u32 timer_pri_timeout_en_m(void) 80{ 81 return 0x1U << 31U; 82} 83static inline u32 timer_pri_timeout_en_v(u32 r) 84{ 85 return (r >> 31U) & 0x1U; 86} 87static inline u32 timer_pri_timeout_en_en_enabled_f(void) 88{ 89 return 0x80000000U; 90} 91static inline u32 timer_pri_timeout_en_en_disabled_f(void) 92{ 93 return 0x0U; 94} 95static inline u32 timer_pri_timeout_save_0_r(void) 96{ 97 return 0x00009084U; 98} 99static inline u32 timer_pri_timeout_save_0_fecs_tgt_v(u32 r) 100{ 101 return (r >> 31U) & 0x1U; 102} 103static inline u32 timer_pri_timeout_save_0_addr_v(u32 r) 104{ 105 return (r >> 2U) & 0x3fffffU; 106} 107static inline u32 timer_pri_timeout_save_0_write_v(u32 r) 108{ 109 return (r >> 1U) & 0x1U; 110} 111static inline u32 timer_pri_timeout_save_1_r(void) 112{ 113 return 0x00009088U; 114} 115static inline u32 timer_pri_timeout_fecs_errcode_r(void) 116{ 117 return 0x0000908cU; 118} 119static inline u32 timer_time_0_r(void) 120{ 121 return 0x00009400U; 122} 123static inline u32 timer_time_1_r(void) 124{ 125 return 0x00009410U; 126} 127#endif
diff --git a/include/nvgpu/hw/gv11b/hw_top_gv11b.h b/include/nvgpu/hw/gv11b/hw_top_gv11b.h
deleted file mode 100644
index 89e4aeb..0000000
--- a/include/nvgpu/hw/gv11b/hw_top_gv11b.h
+++ /dev/null
@@ -1,235 +0,0 @@ 1/* 2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_top_gv11b_h_ 57#define _hw_top_gv11b_h_ 58 59static inline u32 top_num_gpcs_r(void) 60{ 61 return 0x00022430U; 62} 63static inline u32 top_num_gpcs_value_v(u32 r) 64{ 65 return (r >> 0U) & 0x1fU; 66} 67static inline u32 top_tpc_per_gpc_r(void) 68{ 69 return 0x00022434U; 70} 71static inline u32 top_tpc_per_gpc_value_v(u32 r) 72{ 73 return (r >> 0U) & 0x1fU; 74} 75static inline u32 top_num_fbps_r(void) 76{ 77 return 0x00022438U; 78} 79static inline u32 top_num_fbps_value_v(u32 r) 80{ 81 return (r >> 0U) & 0x1fU; 82} 83static inline u32 top_ltc_per_fbp_r(void) 84{ 85 return 0x00022450U; 86} 87static inline u32 top_ltc_per_fbp_value_v(u32 r) 88{ 89 return (r >> 0U) & 0x1fU; 90} 91static inline u32 top_slices_per_ltc_r(void) 92{ 93 return 0x0002245cU; 94} 95static inline u32 top_slices_per_ltc_value_v(u32 r) 96{ 97 return (r >> 0U) & 0x1fU; 98} 99static inline u32 top_num_ltcs_r(void) 100{ 101 return 0x00022454U; 102} 103static inline u32 top_num_ces_r(void) 104{ 105 return 0x00022444U; 106} 107static inline u32 top_num_ces_value_v(u32 r) 108{ 109 return (r >> 0U) & 0x1fU; 110} 111static inline u32 top_device_info_r(u32 i) 112{ 113 return 0x00022700U + i*4U; 114} 115static inline u32 top_device_info__size_1_v(void) 116{ 117 return 0x00000040U; 118} 119static inline u32 top_device_info_chain_v(u32 r) 120{ 121 return (r >> 31U) & 0x1U; 122} 123static inline u32 top_device_info_chain_enable_v(void) 124{ 125 return 0x00000001U; 126} 127static inline u32 top_device_info_engine_enum_v(u32 r) 128{ 129 return (r >> 26U) & 0xfU; 130} 131static inline u32 top_device_info_runlist_enum_v(u32 r) 132{ 133 return (r >> 21U) & 0xfU; 134} 135static inline u32 top_device_info_intr_enum_v(u32 r) 136{ 137 return (r >> 15U) & 0x1fU; 138} 139static inline u32 top_device_info_reset_enum_v(u32 r) 140{ 141 return (r >> 9U) & 0x1fU; 142} 143static inline u32 top_device_info_type_enum_v(u32 r) 144{ 145 return (r >> 2U) & 0x1fffffffU; 146} 147static inline u32 top_device_info_type_enum_graphics_v(void) 148{ 149 return 0x00000000U; 150} 151static inline u32 top_device_info_type_enum_graphics_f(void) 152{ 153 return 0x0U; 154} 155static inline u32 top_device_info_type_enum_copy2_v(void) 156{ 157 return 0x00000003U; 158} 159static inline u32 top_device_info_type_enum_copy2_f(void) 160{ 161 return 0xcU; 162} 163static inline u32 top_device_info_type_enum_lce_v(void) 164{ 165 return 0x00000013U; 166} 167static inline u32 top_device_info_type_enum_lce_f(void) 168{ 169 return 0x4cU; 170} 171static inline u32 top_device_info_engine_v(u32 r) 172{ 173 return (r >> 5U) & 0x1U; 174} 175static inline u32 top_device_info_runlist_v(u32 r) 176{ 177 return (r >> 4U) & 0x1U; 178} 179static inline u32 top_device_info_intr_v(u32 r) 180{ 181 return (r >> 3U) & 0x1U; 182} 183static inline u32 top_device_info_reset_v(u32 r) 184{ 185 return (r >> 2U) & 0x1U; 186} 187static inline u32 top_device_info_entry_v(u32 r) 188{ 189 return (r >> 0U) & 0x3U; 190} 191static inline u32 top_device_info_entry_not_valid_v(void) 192{ 193 return 0x00000000U; 194} 195static inline u32 top_device_info_entry_enum_v(void) 196{ 197 return 0x00000002U; 198} 199static inline u32 top_device_info_entry_data_v(void) 200{ 201 return 0x00000001U; 202} 203static inline u32 top_device_info_data_type_v(u32 r) 204{ 205 return (r >> 30U) & 0x1U; 206} 207static inline u32 top_device_info_data_type_enum2_v(void) 208{ 209 return 0x00000000U; 210} 211static inline u32 top_device_info_data_inst_id_v(u32 r) 212{ 213 return (r >> 26U) & 0xfU; 214} 215static inline u32 top_device_info_data_pri_base_v(u32 r) 216{ 217 return (r >> 12U) & 0xfffU; 218} 219static inline u32 top_device_info_data_pri_base_align_v(void) 220{ 221 return 0x0000000cU; 222} 223static inline u32 top_device_info_data_fault_id_enum_v(u32 r) 224{ 225 return (r >> 3U) & 0x7fU; 226} 227static inline u32 top_device_info_data_fault_id_v(u32 r) 228{ 229 return (r >> 2U) & 0x1U; 230} 231static inline u32 top_device_info_data_fault_id_valid_v(void) 232{ 233 return 0x00000001U; 234} 235#endif
diff --git a/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h b/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h
deleted file mode 100644
index e374969..0000000
--- a/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h
+++ /dev/null
@@ -1,95 +0,0 @@ 1/* 2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22/* 23 * Function naming determines intended use: 24 * 25 * <x>_r(void) : Returns the offset for register <x>. 26 * 27 * <x>_o(void) : Returns the offset for element <x>. 28 * 29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 30 * 31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 32 * 33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 34 * and masked to place it at field <y> of register <x>. This value 35 * can be |'d with others to produce a full register value for 36 * register <x>. 37 * 38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 39 * value can be ~'d and then &'d to clear the value of field <y> for 40 * register <x>. 41 * 42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 43 * to place it at field <y> of register <x>. This value can be |'d 44 * with others to produce a full register value for <x>. 45 * 46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 47 * <x> value 'r' after being shifted to place its LSB at bit 0. 48 * This value is suitable for direct comparison with other unshifted 49 * values appropriate for use in field <y> of register <x>. 50 * 51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 52 * field <y> of register <x>. This value is suitable for direct 53 * comparison with unshifted values appropriate for use in field <y> 54 * of register <x>. 55 */ 56#ifndef _hw_usermode_gv11b_h_ 57#define _hw_usermode_gv11b_h_ 58 59static inline u32 usermode_cfg0_r(void) 60{ 61 return 0x00810000; 62} 63static inline u32 usermode_cfg0_usermode_class_id_f(u32 v) 64{ 65 return (v & 0xffff) << 0; 66} 67static inline u32 usermode_cfg0_usermode_class_id_value_v(void) 68{ 69 return 0x0000c361; 70} 71static inline u32 usermode_time_0_r(void) 72{ 73 return 0x00810080; 74} 75static inline u32 usermode_time_0_nsec_f(u32 v) 76{ 77 return (v & 0x7ffffff) << 5; 78} 79static inline u32 usermode_time_1_r(void) 80{ 81 return 0x00810084; 82} 83static inline u32 usermode_time_1_nsec_f(u32 v) 84{ 85 return (v & 0x1fffffff) << 0; 86} 87static inline u32 usermode_notify_channel_pending_r(void) 88{ 89 return 0x00810090; 90} 91static inline u32 usermode_notify_channel_pending_id_f(u32 v) 92{ 93 return (v & 0xffffffff) << 0; 94} 95#endif
diff --git a/include/nvgpu/hw_sim.h b/include/nvgpu/hw_sim.h
deleted file mode 100644
index 89ce6da..0000000
--- a/include/nvgpu/hw_sim.h
+++ /dev/null
@@ -1,2153 +0,0 @@ 1/* 2 * Copyright (c) 2012-2018, NVIDIA Corporation. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23 /* 24 * Function naming determines intended use: 25 * 26 * <x>_r(void) : Returns the offset for register <x>. 27 * 28 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. 29 * 30 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. 31 * 32 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted 33 * and masked to place it at field <y> of register <x>. This value 34 * can be |'d with others to produce a full register value for 35 * register <x>. 36 * 37 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This 38 * value can be ~'d and then &'d to clear the value of field <y> for 39 * register <x>. 40 * 41 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted 42 * to place it at field <y> of register <x>. This value can be |'d 43 * with others to produce a full register value for <x>. 44 * 45 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register 46 * <x> value 'r' after being shifted to place its LSB at bit 0. 47 * This value is suitable for direct comparison with other unshifted 48 * values appropriate for use in field <y> of register <x>. 49 * 50 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for 51 * field <y> of register <x>. This value is suitable for direct 52 * comparison with unshifted values appropriate for use in field <y> 53 * of register <x>. 54 */ 55 56#ifndef __hw_sim_h__ 57#define __hw_sim_h__ 58/*This file is autogenerated. Do not edit. */ 59 60static inline u32 sim_send_ring_r(void) 61{ 62 return 0x00000000; 63} 64static inline u32 sim_send_ring_target_s(void) 65{ 66 return 2; 67} 68static inline u32 sim_send_ring_target_f(u32 v) 69{ 70 return (v & 0x3) << 0; 71} 72static inline u32 sim_send_ring_target_m(void) 73{ 74 return 0x3 << 0; 75} 76static inline u32 sim_send_ring_target_v(u32 r) 77{ 78 return (r >> 0) & 0x3; 79} 80static inline u32 sim_send_ring_target_phys_init_v(void) 81{ 82 return 0x00000001; 83} 84static inline u32 sim_send_ring_target_phys_init_f(void) 85{ 86 return 0x1; 87} 88static inline u32 sim_send_ring_target_phys__init_v(void) 89{ 90 return 0x00000001; 91} 92static inline u32 sim_send_ring_target_phys__init_f(void) 93{ 94 return 0x1; 95} 96static inline u32 sim_send_ring_target_phys__prod_v(void) 97{ 98 return 0x00000001; 99} 100static inline u32 sim_send_ring_target_phys__prod_f(void) 101{ 102 return 0x1; 103} 104static inline u32 sim_send_ring_target_phys_nvm_v(void) 105{ 106 return 0x00000001; 107} 108static inline u32 sim_send_ring_target_phys_nvm_f(void) 109{ 110 return 0x1; 111} 112static inline u32 sim_send_ring_target_phys_pci_v(void) 113{ 114 return 0x00000002; 115} 116static inline u32 sim_send_ring_target_phys_pci_f(void) 117{ 118 return 0x2; 119} 120static inline u32 sim_send_ring_target_phys_pci_coherent_v(void) 121{ 122 return 0x00000003; 123} 124static inline u32 sim_send_ring_target_phys_pci_coherent_f(void) 125{ 126 return 0x3; 127} 128static inline u32 sim_send_ring_status_s(void) 129{ 130 return 1; 131} 132static inline u32 sim_send_ring_status_f(u32 v) 133{ 134 return (v & 0x1) << 3; 135} 136static inline u32 sim_send_ring_status_m(void) 137{ 138 return 0x1 << 3; 139} 140static inline u32 sim_send_ring_status_v(u32 r) 141{ 142 return (r >> 3) & 0x1; 143} 144static inline u32 sim_send_ring_status_init_v(void) 145{ 146 return 0x00000000; 147} 148static inline u32 sim_send_ring_status_init_f(void) 149{ 150 return 0x0; 151} 152static inline u32 sim_send_ring_status__init_v(void) 153{ 154 return 0x00000000; 155} 156static inline u32 sim_send_ring_status__init_f(void) 157{ 158 return 0x0; 159} 160static inline u32 sim_send_ring_status__prod_v(void) 161{ 162 return 0x00000000; 163} 164static inline u32 sim_send_ring_status__prod_f(void) 165{ 166 return 0x0; 167} 168static inline u32 sim_send_ring_status_invalid_v(void) 169{ 170 return 0x00000000; 171} 172static inline u32 sim_send_ring_status_invalid_f(void) 173{ 174 return 0x0; 175} 176static inline u32 sim_send_ring_status_valid_v(void) 177{ 178 return 0x00000001; 179} 180static inline u32 sim_send_ring_status_valid_f(void) 181{ 182 return 0x8; 183} 184static inline u32 sim_send_ring_size_s(void) 185{ 186 return 2; 187} 188static inline u32 sim_send_ring_size_f(u32 v) 189{ 190 return (v & 0x3) << 4; 191} 192static inline u32 sim_send_ring_size_m(void) 193{ 194 return 0x3 << 4; 195} 196static inline u32 sim_send_ring_size_v(u32 r) 197{ 198 return (r >> 4) & 0x3; 199} 200static inline u32 sim_send_ring_size_init_v(void) 201{ 202 return 0x00000000; 203} 204static inline u32 sim_send_ring_size_init_f(void) 205{ 206 return 0x0; 207} 208static inline u32 sim_send_ring_size__init_v(void) 209{ 210 return 0x00000000; 211} 212static inline u32 sim_send_ring_size__init_f(void) 213{ 214 return 0x0; 215} 216static inline u32 sim_send_ring_size__prod_v(void) 217{ 218 return 0x00000000; 219} 220static inline u32 sim_send_ring_size__prod_f(void) 221{ 222 return 0x0; 223} 224static inline u32 sim_send_ring_size_4kb_v(void) 225{ 226 return 0x00000000; 227} 228static inline u32 sim_send_ring_size_4kb_f(void) 229{ 230 return 0x0; 231} 232static inline u32 sim_send_ring_size_8kb_v(void) 233{ 234 return 0x00000001; 235} 236static inline u32 sim_send_ring_size_8kb_f(void) 237{ 238 return 0x10; 239} 240static inline u32 sim_send_ring_size_12kb_v(void) 241{ 242 return 0x00000002; 243} 244static inline u32 sim_send_ring_size_12kb_f(void) 245{ 246 return 0x20; 247} 248static inline u32 sim_send_ring_size_16kb_v(void) 249{ 250 return 0x00000003; 251} 252static inline u32 sim_send_ring_size_16kb_f(void) 253{ 254 return 0x30; 255} 256static inline u32 sim_send_ring_gp_in_ring_s(void) 257{ 258 return 1; 259} 260static inline u32 sim_send_ring_gp_in_ring_f(u32 v) 261{ 262 return (v & 0x1) << 11; 263} 264static inline u32 sim_send_ring_gp_in_ring_m(void) 265{ 266 return 0x1 << 11; 267} 268static inline u32 sim_send_ring_gp_in_ring_v(u32 r) 269{ 270 return (r >> 11) & 0x1; 271} 272static inline u32 sim_send_ring_gp_in_ring__init_v(void) 273{ 274 return 0x00000000; 275} 276static inline u32 sim_send_ring_gp_in_ring__init_f(void) 277{ 278 return 0x0; 279} 280static inline u32 sim_send_ring_gp_in_ring__prod_v(void) 281{ 282 return 0x00000000; 283} 284static inline u32 sim_send_ring_gp_in_ring__prod_f(void) 285{ 286 return 0x0; 287} 288static inline u32 sim_send_ring_gp_in_ring_no_v(void) 289{ 290 return 0x00000000; 291} 292static inline u32 sim_send_ring_gp_in_ring_no_f(void) 293{ 294 return 0x0; 295} 296static inline u32 sim_send_ring_gp_in_ring_yes_v(void) 297{ 298 return 0x00000001; 299} 300static inline u32 sim_send_ring_gp_in_ring_yes_f(void) 301{ 302 return 0x800; 303} 304static inline u32 sim_send_ring_addr_lo_s(void) 305{ 306 return 20; 307} 308static inline u32 sim_send_ring_addr_lo_f(u32 v) 309{ 310 return (v & 0xfffff) << 12; 311} 312static inline u32 sim_send_ring_addr_lo_m(void) 313{ 314 return 0xfffff << 12; 315} 316static inline u32 sim_send_ring_addr_lo_v(u32 r) 317{ 318 return (r >> 12) & 0xfffff; 319} 320static inline u32 sim_send_ring_addr_lo__init_v(void) 321{ 322 return 0x00000000; 323} 324static inline u32 sim_send_ring_addr_lo__init_f(void) 325{ 326 return 0x0; 327} 328static inline u32 sim_send_ring_addr_lo__prod_v(void) 329{ 330 return 0x00000000; 331} 332static inline u32 sim_send_ring_addr_lo__prod_f(void) 333{ 334 return 0x0; 335} 336static inline u32 sim_send_ring_hi_r(void) 337{ 338 return 0x00000004; 339} 340static inline u32 sim_send_ring_hi_addr_s(void) 341{ 342 return 20; 343} 344static inline u32 sim_send_ring_hi_addr_f(u32 v) 345{ 346 return (v & 0xfffff) << 0; 347} 348static inline u32 sim_send_ring_hi_addr_m(void) 349{ 350 return 0xfffff << 0; 351} 352static inline u32 sim_send_ring_hi_addr_v(u32 r) 353{ 354 return (r >> 0) & 0xfffff; 355} 356static inline u32 sim_send_ring_hi_addr__init_v(void) 357{ 358 return 0x00000000; 359} 360static inline u32 sim_send_ring_hi_addr__init_f(void) 361{ 362 return 0x0; 363} 364static inline u32 sim_send_ring_hi_addr__prod_v(void) 365{ 366 return 0x00000000; 367} 368static inline u32 sim_send_ring_hi_addr__prod_f(void) 369{ 370 return 0x0; 371} 372static inline u32 sim_send_put_r(void) 373{ 374 return 0x00000008; 375} 376static inline u32 sim_send_put_pointer_s(void) 377{ 378 return 29; 379} 380static inline u32 sim_send_put_pointer_f(u32 v) 381{ 382 return (v & 0x1fffffff) << 3; 383} 384static inline u32 sim_send_put_pointer_m(void) 385{ 386 return 0x1fffffff << 3; 387} 388static inline u32 sim_send_put_pointer_v(u32 r) 389{ 390 return (r >> 3) & 0x1fffffff; 391} 392static inline u32 sim_send_get_r(void) 393{ 394 return 0x0000000c; 395} 396static inline u32 sim_send_get_pointer_s(void) 397{ 398 return 29; 399} 400static inline u32 sim_send_get_pointer_f(u32 v) 401{ 402 return (v & 0x1fffffff) << 3; 403} 404static inline u32 sim_send_get_pointer_m(void) 405{ 406 return 0x1fffffff << 3; 407} 408static inline u32 sim_send_get_pointer_v(u32 r) 409{ 410 return (r >> 3) & 0x1fffffff; 411} 412static inline u32 sim_recv_ring_r(void) 413{ 414 return 0x00000010; 415} 416static inline u32 sim_recv_ring_target_s(void) 417{ 418 return 2; 419} 420static inline u32 sim_recv_ring_target_f(u32 v) 421{ 422 return (v & 0x3) << 0; 423} 424static inline u32 sim_recv_ring_target_m(void) 425{ 426 return 0x3 << 0; 427} 428static inline u32 sim_recv_ring_target_v(u32 r) 429{ 430 return (r >> 0) & 0x3; 431} 432static inline u32 sim_recv_ring_target_phys_init_v(void) 433{ 434 return 0x00000001; 435} 436static inline u32 sim_recv_ring_target_phys_init_f(void) 437{ 438 return 0x1; 439} 440static inline u32 sim_recv_ring_target_phys__init_v(void) 441{ 442 return 0x00000001; 443} 444static inline u32 sim_recv_ring_target_phys__init_f(void) 445{ 446 return 0x1; 447} 448static inline u32 sim_recv_ring_target_phys__prod_v(void) 449{ 450 return 0x00000001; 451} 452static inline u32 sim_recv_ring_target_phys__prod_f(void) 453{ 454 return 0x1; 455} 456static inline u32 sim_recv_ring_target_phys_nvm_v(void) 457{ 458 return 0x00000001; 459} 460static inline u32 sim_recv_ring_target_phys_nvm_f(void) 461{ 462 return 0x1; 463} 464static inline u32 sim_recv_ring_target_phys_pci_v(void) 465{ 466 return 0x00000002; 467} 468static inline u32 sim_recv_ring_target_phys_pci_f(void) 469{ 470 return 0x2; 471} 472static inline u32 sim_recv_ring_target_phys_pci_coherent_v(void) 473{ 474 return 0x00000003; 475} 476static inline u32 sim_recv_ring_target_phys_pci_coherent_f(void) 477{ 478 return 0x3; 479} 480static inline u32 sim_recv_ring_status_s(void) 481{ 482 return 1; 483} 484static inline u32 sim_recv_ring_status_f(u32 v) 485{ 486 return (v & 0x1) << 3; 487} 488static inline u32 sim_recv_ring_status_m(void) 489{ 490 return 0x1 << 3; 491} 492static inline u32 sim_recv_ring_status_v(u32 r) 493{ 494 return (r >> 3) & 0x1; 495} 496static inline u32 sim_recv_ring_status_init_v(void) 497{ 498 return 0x00000000; 499} 500static inline u32 sim_recv_ring_status_init_f(void) 501{ 502 return 0x0; 503} 504static inline u32 sim_recv_ring_status__init_v(void) 505{ 506 return 0x00000000; 507} 508static inline u32 sim_recv_ring_status__init_f(void) 509{ 510 return 0x0; 511} 512static inline u32 sim_recv_ring_status__prod_v(void) 513{ 514 return 0x00000000; 515} 516static inline u32 sim_recv_ring_status__prod_f(void) 517{ 518 return 0x0; 519} 520static inline u32 sim_recv_ring_status_invalid_v(void) 521{ 522 return 0x00000000; 523} 524static inline u32 sim_recv_ring_status_invalid_f(void) 525{ 526 return 0x0; 527} 528static inline u32 sim_recv_ring_status_valid_v(void) 529{ 530 return 0x00000001; 531} 532static inline u32 sim_recv_ring_status_valid_f(void) 533{ 534 return 0x8; 535} 536static inline u32 sim_recv_ring_size_s(void) 537{ 538 return 2; 539} 540static inline u32 sim_recv_ring_size_f(u32 v) 541{ 542 return (v & 0x3) << 4; 543} 544static inline u32 sim_recv_ring_size_m(void) 545{ 546 return 0x3 << 4; 547} 548static inline u32 sim_recv_ring_size_v(u32 r) 549{ 550 return (r >> 4) & 0x3; 551} 552static inline u32 sim_recv_ring_size_init_v(void) 553{ 554 return 0x00000000; 555} 556static inline u32 sim_recv_ring_size_init_f(void) 557{ 558 return 0x0; 559} 560static inline u32 sim_recv_ring_size__init_v(void) 561{ 562 return 0x00000000; 563} 564static inline u32 sim_recv_ring_size__init_f(void) 565{ 566 return 0x0; 567} 568static inline u32 sim_recv_ring_size__prod_v(void) 569{ 570 return 0x00000000; 571} 572static inline u32 sim_recv_ring_size__prod_f(void) 573{ 574 return 0x0; 575} 576static inline u32 sim_recv_ring_size_4kb_v(void) 577{ 578 return 0x00000000; 579} 580static inline u32 sim_recv_ring_size_4kb_f(void) 581{ 582 return 0x0; 583} 584static inline u32 sim_recv_ring_size_8kb_v(void) 585{ 586 return 0x00000001; 587} 588static inline u32 sim_recv_ring_size_8kb_f(void) 589{ 590 return 0x10; 591} 592static inline u32 sim_recv_ring_size_12kb_v(void) 593{ 594 return 0x00000002; 595} 596static inline u32 sim_recv_ring_size_12kb_f(void) 597{ 598 return 0x20; 599} 600static inline u32 sim_recv_ring_size_16kb_v(void) 601{ 602 return 0x00000003; 603} 604static inline u32 sim_recv_ring_size_16kb_f(void) 605{ 606 return 0x30; 607} 608static inline u32 sim_recv_ring_gp_in_ring_s(void) 609{ 610 return 1; 611} 612static inline u32 sim_recv_ring_gp_in_ring_f(u32 v) 613{ 614 return (v & 0x1) << 11; 615} 616static inline u32 sim_recv_ring_gp_in_ring_m(void) 617{ 618 return 0x1 << 11; 619} 620static inline u32 sim_recv_ring_gp_in_ring_v(u32 r) 621{ 622 return (r >> 11) & 0x1; 623} 624static inline u32 sim_recv_ring_gp_in_ring__init_v(void) 625{ 626 return 0x00000000; 627} 628static inline u32 sim_recv_ring_gp_in_ring__init_f(void) 629{ 630 return 0x0; 631} 632static inline u32 sim_recv_ring_gp_in_ring__prod_v(void) 633{ 634 return 0x00000000; 635} 636static inline u32 sim_recv_ring_gp_in_ring__prod_f(void) 637{ 638 return 0x0; 639} 640static inline u32 sim_recv_ring_gp_in_ring_no_v(void) 641{ 642 return 0x00000000; 643} 644static inline u32 sim_recv_ring_gp_in_ring_no_f(void) 645{ 646 return 0x0; 647} 648static inline u32 sim_recv_ring_gp_in_ring_yes_v(void) 649{ 650 return 0x00000001; 651} 652static inline u32 sim_recv_ring_gp_in_ring_yes_f(void) 653{ 654 return 0x800; 655} 656static inline u32 sim_recv_ring_addr_lo_s(void) 657{ 658 return 20; 659} 660static inline u32 sim_recv_ring_addr_lo_f(u32 v) 661{ 662 return (v & 0xfffff) << 12; 663} 664static inline u32 sim_recv_ring_addr_lo_m(void) 665{ 666 return 0xfffff << 12; 667} 668static inline u32 sim_recv_ring_addr_lo_v(u32 r) 669{ 670 return (r >> 12) & 0xfffff; 671} 672static inline u32 sim_recv_ring_addr_lo__init_v(void) 673{ 674 return 0x00000000; 675} 676static inline u32 sim_recv_ring_addr_lo__init_f(void) 677{ 678 return 0x0; 679} 680static inline u32 sim_recv_ring_addr_lo__prod_v(void) 681{ 682 return 0x00000000; 683} 684static inline u32 sim_recv_ring_addr_lo__prod_f(void) 685{ 686 return 0x0; 687} 688static inline u32 sim_recv_ring_hi_r(void) 689{ 690 return 0x00000014; 691} 692static inline u32 sim_recv_ring_hi_addr_s(void) 693{ 694 return 20; 695} 696static inline u32 sim_recv_ring_hi_addr_f(u32 v) 697{ 698 return (v & 0xfffff) << 0; 699} 700static inline u32 sim_recv_ring_hi_addr_m(void) 701{ 702 return 0xfffff << 0; 703} 704static inline u32 sim_recv_ring_hi_addr_v(u32 r) 705{ 706 return (r >> 0) & 0xfffff; 707} 708static inline u32 sim_recv_ring_hi_addr__init_v(void) 709{ 710 return 0x00000000; 711} 712static inline u32 sim_recv_ring_hi_addr__init_f(void) 713{ 714 return 0x0; 715} 716static inline u32 sim_recv_ring_hi_addr__prod_v(void) 717{ 718 return 0x00000000; 719} 720static inline u32 sim_recv_ring_hi_addr__prod_f(void) 721{ 722 return 0x0; 723} 724static inline u32 sim_recv_put_r(void) 725{ 726 return 0x00000018; 727} 728static inline u32 sim_recv_put_pointer_s(void) 729{ 730 return 11; 731} 732static inline u32 sim_recv_put_pointer_f(u32 v) 733{ 734 return (v & 0x7ff) << 3; 735} 736static inline u32 sim_recv_put_pointer_m(void) 737{ 738 return 0x7ff << 3; 739} 740static inline u32 sim_recv_put_pointer_v(u32 r) 741{ 742 return (r >> 3) & 0x7ff; 743} 744static inline u32 sim_recv_get_r(void) 745{ 746 return 0x0000001c; 747} 748static inline u32 sim_recv_get_pointer_s(void) 749{ 750 return 11; 751} 752static inline u32 sim_recv_get_pointer_f(u32 v) 753{ 754 return (v & 0x7ff) << 3; 755} 756static inline u32 sim_recv_get_pointer_m(void) 757{ 758 return 0x7ff << 3; 759} 760static inline u32 sim_recv_get_pointer_v(u32 r) 761{ 762 return (r >> 3) & 0x7ff; 763} 764static inline u32 sim_config_r(void) 765{ 766 return 0x00000020; 767} 768static inline u32 sim_config_mode_s(void) 769{ 770 return 1; 771} 772static inline u32 sim_config_mode_f(u32 v) 773{ 774 return (v & 0x1) << 0; 775} 776static inline u32 sim_config_mode_m(void) 777{ 778 return 0x1 << 0; 779} 780static inline u32 sim_config_mode_v(u32 r) 781{ 782 return (r >> 0) & 0x1; 783} 784static inline u32 sim_config_mode_disabled_v(void) 785{ 786 return 0x00000000; 787} 788static inline u32 sim_config_mode_disabled_f(void) 789{ 790 return 0x0; 791} 792static inline u32 sim_config_mode_enabled_v(void) 793{ 794 return 0x00000001; 795} 796static inline u32 sim_config_mode_enabled_f(void) 797{ 798 return 0x1; 799} 800static inline u32 sim_config_channels_s(void) 801{ 802 return 7; 803} 804static inline u32 sim_config_channels_f(u32 v) 805{ 806 return (v & 0x7f) << 1; 807} 808static inline u32 sim_config_channels_m(void) 809{ 810 return 0x7f << 1; 811} 812static inline u32 sim_config_channels_v(u32 r) 813{ 814 return (r >> 1) & 0x7f; 815} 816static inline u32 sim_config_channels_none_v(void) 817{ 818 return 0x00000000; 819} 820static inline u32 sim_config_channels_none_f(void) 821{ 822 return 0x0; 823} 824static inline u32 sim_config_cached_only_s(void) 825{ 826 return 1; 827} 828static inline u32 sim_config_cached_only_f(u32 v) 829{ 830 return (v & 0x1) << 8; 831} 832static inline u32 sim_config_cached_only_m(void) 833{ 834 return 0x1 << 8; 835} 836static inline u32 sim_config_cached_only_v(u32 r) 837{ 838 return (r >> 8) & 0x1; 839} 840static inline u32 sim_config_cached_only_disabled_v(void) 841{ 842 return 0x00000000; 843} 844static inline u32 sim_config_cached_only_disabled_f(void) 845{ 846 return 0x0; 847} 848static inline u32 sim_config_cached_only_enabled_v(void) 849{ 850 return 0x00000001; 851} 852static inline u32 sim_config_cached_only_enabled_f(void) 853{ 854 return 0x100; 855} 856static inline u32 sim_config_validity_s(void) 857{ 858 return 2; 859} 860static inline u32 sim_config_validity_f(u32 v) 861{ 862 return (v & 0x3) << 9; 863} 864static inline u32 sim_config_validity_m(void) 865{ 866 return 0x3 << 9; 867} 868static inline u32 sim_config_validity_v(u32 r) 869{ 870 return (r >> 9) & 0x3; 871} 872static inline u32 sim_config_validity__init_v(void) 873{ 874 return 0x00000001; 875} 876static inline u32 sim_config_validity__init_f(void) 877{ 878 return 0x200; 879} 880static inline u32 sim_config_validity_valid_v(void) 881{ 882 return 0x00000001; 883} 884static inline u32 sim_config_validity_valid_f(void) 885{ 886 return 0x200; 887} 888static inline u32 sim_config_simulation_s(void) 889{ 890 return 2; 891} 892static inline u32 sim_config_simulation_f(u32 v) 893{ 894 return (v & 0x3) << 12; 895} 896static inline u32 sim_config_simulation_m(void) 897{ 898 return 0x3 << 12; 899} 900static inline u32 sim_config_simulation_v(u32 r) 901{ 902 return (r >> 12) & 0x3; 903} 904static inline u32 sim_config_simulation_disabled_v(void) 905{ 906 return 0x00000000; 907} 908static inline u32 sim_config_simulation_disabled_f(void) 909{ 910 return 0x0; 911} 912static inline u32 sim_config_simulation_fmodel_v(void) 913{ 914 return 0x00000001; 915} 916static inline u32 sim_config_simulation_fmodel_f(void) 917{ 918 return 0x1000; 919} 920static inline u32 sim_config_simulation_rtlsim_v(void) 921{ 922 return 0x00000002; 923} 924static inline u32 sim_config_simulation_rtlsim_f(void) 925{ 926 return 0x2000; 927} 928static inline u32 sim_config_secondary_display_s(void) 929{ 930 return 1; 931} 932static inline u32 sim_config_secondary_display_f(u32 v) 933{ 934 return (v & 0x1) << 14; 935} 936static inline u32 sim_config_secondary_display_m(void) 937{ 938 return 0x1 << 14; 939} 940static inline u32 sim_config_secondary_display_v(u32 r) 941{ 942 return (r >> 14) & 0x1; 943} 944static inline u32 sim_config_secondary_display_disabled_v(void) 945{ 946 return 0x00000000; 947} 948static inline u32 sim_config_secondary_display_disabled_f(void) 949{ 950 return 0x0; 951} 952static inline u32 sim_config_secondary_display_enabled_v(void) 953{ 954 return 0x00000001; 955} 956static inline u32 sim_config_secondary_display_enabled_f(void) 957{ 958 return 0x4000; 959} 960static inline u32 sim_config_num_heads_s(void) 961{ 962 return 8; 963} 964static inline u32 sim_config_num_heads_f(u32 v) 965{ 966 return (v & 0xff) << 17; 967} 968static inline u32 sim_config_num_heads_m(void) 969{ 970 return 0xff << 17; 971} 972static inline u32 sim_config_num_heads_v(u32 r) 973{ 974 return (r >> 17) & 0xff; 975} 976static inline u32 sim_event_ring_r(void) 977{ 978 return 0x00000030; 979} 980static inline u32 sim_event_ring_target_s(void) 981{ 982 return 2; 983} 984static inline u32 sim_event_ring_target_f(u32 v) 985{ 986 return (v & 0x3) << 0; 987} 988static inline u32 sim_event_ring_target_m(void) 989{ 990 return 0x3 << 0; 991} 992static inline u32 sim_event_ring_target_v(u32 r) 993{ 994 return (r >> 0) & 0x3; 995} 996static inline u32 sim_event_ring_target_phys_init_v(void) 997{ 998 return 0x00000001; 999} 1000static inline u32 sim_event_ring_target_phys_init_f(void) 1001{ 1002 return 0x1; 1003} 1004static inline u32 sim_event_ring_target_phys__init_v(void) 1005{ 1006 return 0x00000001; 1007} 1008static inline u32 sim_event_ring_target_phys__init_f(void) 1009{ 1010 return 0x1; 1011} 1012static inline u32 sim_event_ring_target_phys__prod_v(void) 1013{ 1014 return 0x00000001; 1015} 1016static inline u32 sim_event_ring_target_phys__prod_f(void) 1017{ 1018 return 0x1; 1019} 1020static inline u32 sim_event_ring_target_phys_nvm_v(void) 1021{ 1022 return 0x00000001; 1023} 1024static inline u32 sim_event_ring_target_phys_nvm_f(void) 1025{ 1026 return 0x1; 1027} 1028static inline u32 sim_event_ring_target_phys_pci_v(void) 1029{ 1030 return 0x00000002; 1031} 1032static inline u32 sim_event_ring_target_phys_pci_f(void) 1033{ 1034 return 0x2; 1035} 1036static inline u32 sim_event_ring_target_phys_pci_coherent_v(void) 1037{ 1038 return 0x00000003; 1039} 1040static inline u32 sim_event_ring_target_phys_pci_coherent_f(void) 1041{ 1042 return 0x3; 1043} 1044static inline u32 sim_event_ring_status_s(void) 1045{ 1046 return 1; 1047} 1048static inline u32 sim_event_ring_status_f(u32 v) 1049{ 1050 return (v & 0x1) << 3; 1051} 1052static inline u32 sim_event_ring_status_m(void) 1053{ 1054 return 0x1 << 3; 1055} 1056static inline u32 sim_event_ring_status_v(u32 r) 1057{ 1058 return (r >> 3) & 0x1; 1059} 1060static inline u32 sim_event_ring_status_init_v(void) 1061{ 1062 return 0x00000000; 1063} 1064static inline u32 sim_event_ring_status_init_f(void) 1065{ 1066 return 0x0; 1067} 1068static inline u32 sim_event_ring_status__init_v(void) 1069{ 1070 return 0x00000000; 1071} 1072static inline u32 sim_event_ring_status__init_f(void) 1073{ 1074 return 0x0; 1075} 1076static inline u32 sim_event_ring_status__prod_v(void) 1077{ 1078 return 0x00000000; 1079} 1080static inline u32 sim_event_ring_status__prod_f(void) 1081{ 1082 return 0x0; 1083} 1084static inline u32 sim_event_ring_status_invalid_v(void) 1085{ 1086 return 0x00000000; 1087} 1088static inline u32 sim_event_ring_status_invalid_f(void) 1089{ 1090 return 0x0; 1091} 1092static inline u32 sim_event_ring_status_valid_v(void) 1093{ 1094 return 0x00000001; 1095} 1096static inline u32 sim_event_ring_status_valid_f(void) 1097{ 1098 return 0x8; 1099} 1100static inline u32 sim_event_ring_size_s(void) 1101{ 1102 return 2; 1103} 1104static inline u32 sim_event_ring_size_f(u32 v) 1105{ 1106 return (v & 0x3) << 4; 1107} 1108static inline u32 sim_event_ring_size_m(void) 1109{ 1110 return 0x3 << 4; 1111} 1112static inline u32 sim_event_ring_size_v(u32 r) 1113{ 1114 return (r >> 4) & 0x3; 1115} 1116static inline u32 sim_event_ring_size_init_v(void) 1117{ 1118 return 0x00000000; 1119} 1120static inline u32 sim_event_ring_size_init_f(void) 1121{ 1122 return 0x0; 1123} 1124static inline u32 sim_event_ring_size__init_v(void) 1125{ 1126 return 0x00000000; 1127} 1128static inline u32 sim_event_ring_size__init_f(void) 1129{ 1130 return 0x0; 1131} 1132static inline u32 sim_event_ring_size__prod_v(void) 1133{ 1134 return 0x00000000; 1135} 1136static inline u32 sim_event_ring_size__prod_f(void) 1137{ 1138 return 0x0; 1139} 1140static inline u32 sim_event_ring_size_4kb_v(void) 1141{ 1142 return 0x00000000; 1143} 1144static inline u32 sim_event_ring_size_4kb_f(void) 1145{ 1146 return 0x0; 1147} 1148static inline u32 sim_event_ring_size_8kb_v(void) 1149{ 1150 return 0x00000001; 1151} 1152static inline u32 sim_event_ring_size_8kb_f(void) 1153{ 1154 return 0x10; 1155} 1156static inline u32 sim_event_ring_size_12kb_v(void) 1157{ 1158 return 0x00000002; 1159} 1160static inline u32 sim_event_ring_size_12kb_f(void) 1161{ 1162 return 0x20; 1163} 1164static inline u32 sim_event_ring_size_16kb_v(void) 1165{ 1166 return 0x00000003; 1167} 1168static inline u32 sim_event_ring_size_16kb_f(void) 1169{ 1170 return 0x30; 1171} 1172static inline u32 sim_event_ring_gp_in_ring_s(void) 1173{ 1174 return 1; 1175} 1176static inline u32 sim_event_ring_gp_in_ring_f(u32 v) 1177{ 1178 return (v & 0x1) << 11; 1179} 1180static inline u32 sim_event_ring_gp_in_ring_m(void) 1181{ 1182 return 0x1 << 11; 1183} 1184static inline u32 sim_event_ring_gp_in_ring_v(u32 r) 1185{ 1186 return (r >> 11) & 0x1; 1187} 1188static inline u32 sim_event_ring_gp_in_ring__init_v(void) 1189{ 1190 return 0x00000000; 1191} 1192static inline u32 sim_event_ring_gp_in_ring__init_f(void) 1193{ 1194 return 0x0; 1195} 1196static inline u32 sim_event_ring_gp_in_ring__prod_v(void) 1197{ 1198 return 0x00000000; 1199} 1200static inline u32 sim_event_ring_gp_in_ring__prod_f(void) 1201{ 1202 return 0x0; 1203} 1204static inline u32 sim_event_ring_gp_in_ring_no_v(void) 1205{ 1206 return 0x00000000; 1207} 1208static inline u32 sim_event_ring_gp_in_ring_no_f(void) 1209{ 1210 return 0x0; 1211} 1212static inline u32 sim_event_ring_gp_in_ring_yes_v(void) 1213{ 1214 return 0x00000001; 1215} 1216static inline u32 sim_event_ring_gp_in_ring_yes_f(void) 1217{ 1218 return 0x800; 1219} 1220static inline u32 sim_event_ring_addr_lo_s(void) 1221{ 1222 return 20; 1223} 1224static inline u32 sim_event_ring_addr_lo_f(u32 v) 1225{ 1226 return (v & 0xfffff) << 12; 1227} 1228static inline u32 sim_event_ring_addr_lo_m(void) 1229{ 1230 return 0xfffff << 12; 1231} 1232static inline u32 sim_event_ring_addr_lo_v(u32 r) 1233{ 1234 return (r >> 12) & 0xfffff; 1235} 1236static inline u32 sim_event_ring_addr_lo__init_v(void) 1237{ 1238 return 0x00000000; 1239} 1240static inline u32 sim_event_ring_addr_lo__init_f(void) 1241{ 1242 return 0x0; 1243} 1244static inline u32 sim_event_ring_addr_lo__prod_v(void) 1245{ 1246 return 0x00000000; 1247} 1248static inline u32 sim_event_ring_addr_lo__prod_f(void) 1249{ 1250 return 0x0; 1251} 1252static inline u32 sim_event_ring_hi_v(void) 1253{ 1254 return 0x00000034; 1255} 1256static inline u32 sim_event_ring_hi_addr_s(void) 1257{ 1258 return 20; 1259} 1260static inline u32 sim_event_ring_hi_addr_f(u32 v) 1261{ 1262 return (v & 0xfffff) << 0; 1263} 1264static inline u32 sim_event_ring_hi_addr_m(void) 1265{ 1266 return 0xfffff << 0; 1267} 1268static inline u32 sim_event_ring_hi_addr_v(u32 r) 1269{ 1270 return (r >> 0) & 0xfffff; 1271} 1272static inline u32 sim_event_ring_hi_addr__init_v(void) 1273{ 1274 return 0x00000000; 1275} 1276static inline u32 sim_event_ring_hi_addr__init_f(void) 1277{ 1278 return 0x0; 1279} 1280static inline u32 sim_event_ring_hi_addr__prod_v(void) 1281{ 1282 return 0x00000000; 1283} 1284static inline u32 sim_event_ring_hi_addr__prod_f(void) 1285{ 1286 return 0x0; 1287} 1288static inline u32 sim_event_put_r(void) 1289{ 1290 return 0x00000038; 1291} 1292static inline u32 sim_event_put_pointer_s(void) 1293{ 1294 return 30; 1295} 1296static inline u32 sim_event_put_pointer_f(u32 v) 1297{ 1298 return (v & 0x3fffffff) << 2; 1299} 1300static inline u32 sim_event_put_pointer_m(void) 1301{ 1302 return 0x3fffffff << 2; 1303} 1304static inline u32 sim_event_put_pointer_v(u32 r) 1305{ 1306 return (r >> 2) & 0x3fffffff; 1307} 1308static inline u32 sim_event_get_r(void) 1309{ 1310 return 0x0000003c; 1311} 1312static inline u32 sim_event_get_pointer_s(void) 1313{ 1314 return 30; 1315} 1316static inline u32 sim_event_get_pointer_f(u32 v) 1317{ 1318 return (v & 0x3fffffff) << 2; 1319} 1320static inline u32 sim_event_get_pointer_m(void) 1321{ 1322 return 0x3fffffff << 2; 1323} 1324static inline u32 sim_event_get_pointer_v(u32 r) 1325{ 1326 return (r >> 2) & 0x3fffffff; 1327} 1328static inline u32 sim_status_r(void) 1329{ 1330 return 0x00000028; 1331} 1332static inline u32 sim_status_send_put_s(void) 1333{ 1334 return 1; 1335} 1336static inline u32 sim_status_send_put_f(u32 v) 1337{ 1338 return (v & 0x1) << 0; 1339} 1340static inline u32 sim_status_send_put_m(void) 1341{ 1342 return 0x1 << 0; 1343} 1344static inline u32 sim_status_send_put_v(u32 r) 1345{ 1346 return (r >> 0) & 0x1; 1347} 1348static inline u32 sim_status_send_put__init_v(void) 1349{ 1350 return 0x00000000; 1351} 1352static inline u32 sim_status_send_put__init_f(void) 1353{ 1354 return 0x0; 1355} 1356static inline u32 sim_status_send_put_idle_v(void) 1357{ 1358 return 0x00000000; 1359} 1360static inline u32 sim_status_send_put_idle_f(void) 1361{ 1362 return 0x0; 1363} 1364static inline u32 sim_status_send_put_pending_v(void) 1365{ 1366 return 0x00000001; 1367} 1368static inline u32 sim_status_send_put_pending_f(void) 1369{ 1370 return 0x1; 1371} 1372static inline u32 sim_status_send_get_s(void) 1373{ 1374 return 1; 1375} 1376static inline u32 sim_status_send_get_f(u32 v) 1377{ 1378 return (v & 0x1) << 1; 1379} 1380static inline u32 sim_status_send_get_m(void) 1381{ 1382 return 0x1 << 1; 1383} 1384static inline u32 sim_status_send_get_v(u32 r) 1385{ 1386 return (r >> 1) & 0x1; 1387} 1388static inline u32 sim_status_send_get__init_v(void) 1389{ 1390 return 0x00000000; 1391} 1392static inline u32 sim_status_send_get__init_f(void) 1393{ 1394 return 0x0; 1395} 1396static inline u32 sim_status_send_get_idle_v(void) 1397{ 1398 return 0x00000000; 1399} 1400static inline u32 sim_status_send_get_idle_f(void) 1401{ 1402 return 0x0; 1403} 1404static inline u32 sim_status_send_get_pending_v(void) 1405{ 1406 return 0x00000001; 1407} 1408static inline u32 sim_status_send_get_pending_f(void) 1409{ 1410 return 0x2; 1411} 1412static inline u32 sim_status_send_get_clear_v(void) 1413{ 1414 return 0x00000001; 1415} 1416static inline u32 sim_status_send_get_clear_f(void) 1417{ 1418 return 0x2; 1419} 1420static inline u32 sim_status_recv_put_s(void) 1421{ 1422 return 1; 1423} 1424static inline u32 sim_status_recv_put_f(u32 v) 1425{ 1426 return (v & 0x1) << 2; 1427} 1428static inline u32 sim_status_recv_put_m(void) 1429{ 1430 return 0x1 << 2; 1431} 1432static inline u32 sim_status_recv_put_v(u32 r) 1433{ 1434 return (r >> 2) & 0x1; 1435} 1436static inline u32 sim_status_recv_put__init_v(void) 1437{ 1438 return 0x00000000; 1439} 1440static inline u32 sim_status_recv_put__init_f(void) 1441{ 1442 return 0x0; 1443} 1444static inline u32 sim_status_recv_put_idle_v(void) 1445{ 1446 return 0x00000000; 1447} 1448static inline u32 sim_status_recv_put_idle_f(void) 1449{ 1450 return 0x0; 1451} 1452static inline u32 sim_status_recv_put_pending_v(void) 1453{ 1454 return 0x00000001; 1455} 1456static inline u32 sim_status_recv_put_pending_f(void) 1457{ 1458 return 0x4; 1459} 1460static inline u32 sim_status_recv_put_clear_v(void) 1461{ 1462 return 0x00000001; 1463} 1464static inline u32 sim_status_recv_put_clear_f(void) 1465{ 1466 return 0x4; 1467} 1468static inline u32 sim_status_recv_get_s(void) 1469{ 1470 return 1; 1471} 1472static inline u32 sim_status_recv_get_f(u32 v) 1473{ 1474 return (v & 0x1) << 3; 1475} 1476static inline u32 sim_status_recv_get_m(void) 1477{ 1478 return 0x1 << 3; 1479} 1480static inline u32 sim_status_recv_get_v(u32 r) 1481{ 1482 return (r >> 3) & 0x1; 1483} 1484static inline u32 sim_status_recv_get__init_v(void) 1485{ 1486 return 0x00000000; 1487} 1488static inline u32 sim_status_recv_get__init_f(void) 1489{ 1490 return 0x0; 1491} 1492static inline u32 sim_status_recv_get_idle_v(void) 1493{ 1494 return 0x00000000; 1495} 1496static inline u32 sim_status_recv_get_idle_f(void) 1497{ 1498 return 0x0; 1499} 1500static inline u32 sim_status_recv_get_pending_v(void) 1501{ 1502 return 0x00000001; 1503} 1504static inline u32 sim_status_recv_get_pending_f(void) 1505{ 1506 return 0x8; 1507} 1508static inline u32 sim_status_event_put_s(void) 1509{ 1510 return 1; 1511} 1512static inline u32 sim_status_event_put_f(u32 v) 1513{ 1514 return (v & 0x1) << 4; 1515} 1516static inline u32 sim_status_event_put_m(void) 1517{ 1518 return 0x1 << 4; 1519} 1520static inline u32 sim_status_event_put_v(u32 r) 1521{ 1522 return (r >> 4) & 0x1; 1523} 1524static inline u32 sim_status_event_put__init_v(void) 1525{ 1526 return 0x00000000; 1527} 1528static inline u32 sim_status_event_put__init_f(void) 1529{ 1530 return 0x0; 1531} 1532static inline u32 sim_status_event_put_idle_v(void) 1533{ 1534 return 0x00000000; 1535} 1536static inline u32 sim_status_event_put_idle_f(void) 1537{ 1538 return 0x0; 1539} 1540static inline u32 sim_status_event_put_pending_v(void) 1541{ 1542 return 0x00000001; 1543} 1544static inline u32 sim_status_event_put_pending_f(void) 1545{ 1546 return 0x10; 1547} 1548static inline u32 sim_status_event_put_clear_v(void) 1549{ 1550 return 0x00000001; 1551} 1552static inline u32 sim_status_event_put_clear_f(void) 1553{ 1554 return 0x10; 1555} 1556static inline u32 sim_status_event_get_s(void) 1557{ 1558 return 1; 1559} 1560static inline u32 sim_status_event_get_f(u32 v) 1561{ 1562 return (v & 0x1) << 5; 1563} 1564static inline u32 sim_status_event_get_m(void) 1565{ 1566 return 0x1 << 5; 1567} 1568static inline u32 sim_status_event_get_v(u32 r) 1569{ 1570 return (r >> 5) & 0x1; 1571} 1572static inline u32 sim_status_event_get__init_v(void) 1573{ 1574 return 0x00000000; 1575} 1576static inline u32 sim_status_event_get__init_f(void) 1577{ 1578 return 0x0; 1579} 1580static inline u32 sim_status_event_get_idle_v(void) 1581{ 1582 return 0x00000000; 1583} 1584static inline u32 sim_status_event_get_idle_f(void) 1585{ 1586 return 0x0; 1587} 1588static inline u32 sim_status_event_get_pending_v(void) 1589{ 1590 return 0x00000001; 1591} 1592static inline u32 sim_status_event_get_pending_f(void) 1593{ 1594 return 0x20; 1595} 1596static inline u32 sim_control_r(void) 1597{ 1598 return 0x0000002c; 1599} 1600static inline u32 sim_control_send_put_s(void) 1601{ 1602 return 1; 1603} 1604static inline u32 sim_control_send_put_f(u32 v) 1605{ 1606 return (v & 0x1) << 0; 1607} 1608static inline u32 sim_control_send_put_m(void) 1609{ 1610 return 0x1 << 0; 1611} 1612static inline u32 sim_control_send_put_v(u32 r) 1613{ 1614 return (r >> 0) & 0x1; 1615} 1616static inline u32 sim_control_send_put__init_v(void) 1617{ 1618 return 0x00000000; 1619} 1620static inline u32 sim_control_send_put__init_f(void) 1621{ 1622 return 0x0; 1623} 1624static inline u32 sim_control_send_put_disabled_v(void) 1625{ 1626 return 0x00000000; 1627} 1628static inline u32 sim_control_send_put_disabled_f(void) 1629{ 1630 return 0x0; 1631} 1632static inline u32 sim_control_send_put_enabled_v(void) 1633{ 1634 return 0x00000001; 1635} 1636static inline u32 sim_control_send_put_enabled_f(void) 1637{ 1638 return 0x1; 1639} 1640static inline u32 sim_control_send_get_s(void) 1641{ 1642 return 1; 1643} 1644static inline u32 sim_control_send_get_f(u32 v) 1645{ 1646 return (v & 0x1) << 1; 1647} 1648static inline u32 sim_control_send_get_m(void) 1649{ 1650 return 0x1 << 1; 1651} 1652static inline u32 sim_control_send_get_v(u32 r) 1653{ 1654 return (r >> 1) & 0x1; 1655} 1656static inline u32 sim_control_send_get__init_v(void) 1657{ 1658 return 0x00000000; 1659} 1660static inline u32 sim_control_send_get__init_f(void) 1661{ 1662 return 0x0; 1663} 1664static inline u32 sim_control_send_get_disabled_v(void) 1665{ 1666 return 0x00000000; 1667} 1668static inline u32 sim_control_send_get_disabled_f(void) 1669{ 1670 return 0x0; 1671} 1672static inline u32 sim_control_send_get_enabled_v(void) 1673{ 1674 return 0x00000001; 1675} 1676static inline u32 sim_control_send_get_enabled_f(void) 1677{ 1678 return 0x2; 1679} 1680static inline u32 sim_control_recv_put_s(void) 1681{ 1682 return 1; 1683} 1684static inline u32 sim_control_recv_put_f(u32 v) 1685{ 1686 return (v & 0x1) << 2; 1687} 1688static inline u32 sim_control_recv_put_m(void) 1689{ 1690 return 0x1 << 2; 1691} 1692static inline u32 sim_control_recv_put_v(u32 r) 1693{ 1694 return (r >> 2) & 0x1; 1695} 1696static inline u32 sim_control_recv_put__init_v(void) 1697{ 1698 return 0x00000000; 1699} 1700static inline u32 sim_control_recv_put__init_f(void) 1701{ 1702 return 0x0; 1703} 1704static inline u32 sim_control_recv_put_disabled_v(void) 1705{ 1706 return 0x00000000; 1707} 1708static inline u32 sim_control_recv_put_disabled_f(void) 1709{ 1710 return 0x0; 1711} 1712static inline u32 sim_control_recv_put_enabled_v(void) 1713{ 1714 return 0x00000001; 1715} 1716static inline u32 sim_control_recv_put_enabled_f(void) 1717{ 1718 return 0x4; 1719} 1720static inline u32 sim_control_recv_get_s(void) 1721{ 1722 return 1; 1723} 1724static inline u32 sim_control_recv_get_f(u32 v) 1725{ 1726 return (v & 0x1) << 3; 1727} 1728static inline u32 sim_control_recv_get_m(void) 1729{ 1730 return 0x1 << 3; 1731} 1732static inline u32 sim_control_recv_get_v(u32 r) 1733{ 1734 return (r >> 3) & 0x1; 1735} 1736static inline u32 sim_control_recv_get__init_v(void) 1737{ 1738 return 0x00000000; 1739} 1740static inline u32 sim_control_recv_get__init_f(void) 1741{ 1742 return 0x0; 1743} 1744static inline u32 sim_control_recv_get_disabled_v(void) 1745{ 1746 return 0x00000000; 1747} 1748static inline u32 sim_control_recv_get_disabled_f(void) 1749{ 1750 return 0x0; 1751} 1752static inline u32 sim_control_recv_get_enabled_v(void) 1753{ 1754 return 0x00000001; 1755} 1756static inline u32 sim_control_recv_get_enabled_f(void) 1757{ 1758 return 0x8; 1759} 1760static inline u32 sim_control_event_put_s(void) 1761{ 1762 return 1; 1763} 1764static inline u32 sim_control_event_put_f(u32 v) 1765{ 1766 return (v & 0x1) << 4; 1767} 1768static inline u32 sim_control_event_put_m(void) 1769{ 1770 return 0x1 << 4; 1771} 1772static inline u32 sim_control_event_put_v(u32 r) 1773{ 1774 return (r >> 4) & 0x1; 1775} 1776static inline u32 sim_control_event_put__init_v(void) 1777{ 1778 return 0x00000000; 1779} 1780static inline u32 sim_control_event_put__init_f(void) 1781{ 1782 return 0x0; 1783} 1784static inline u32 sim_control_event_put_disabled_v(void) 1785{ 1786 return 0x00000000; 1787} 1788static inline u32 sim_control_event_put_disabled_f(void) 1789{ 1790 return 0x0; 1791} 1792static inline u32 sim_control_event_put_enabled_v(void) 1793{ 1794 return 0x00000001; 1795} 1796static inline u32 sim_control_event_put_enabled_f(void) 1797{ 1798 return 0x10; 1799} 1800static inline u32 sim_control_event_get_s(void) 1801{ 1802 return 1; 1803} 1804static inline u32 sim_control_event_get_f(u32 v) 1805{ 1806 return (v & 0x1) << 5; 1807} 1808static inline u32 sim_control_event_get_m(void) 1809{ 1810 return 0x1 << 5; 1811} 1812static inline u32 sim_control_event_get_v(u32 r) 1813{ 1814 return (r >> 5) & 0x1; 1815} 1816static inline u32 sim_control_event_get__init_v(void) 1817{ 1818 return 0x00000000; 1819} 1820static inline u32 sim_control_event_get__init_f(void) 1821{ 1822 return 0x0; 1823} 1824static inline u32 sim_control_event_get_disabled_v(void) 1825{ 1826 return 0x00000000; 1827} 1828static inline u32 sim_control_event_get_disabled_f(void) 1829{ 1830 return 0x0; 1831} 1832static inline u32 sim_control_event_get_enabled_v(void) 1833{ 1834 return 0x00000001; 1835} 1836static inline u32 sim_control_event_get_enabled_f(void) 1837{ 1838 return 0x20; 1839} 1840static inline u32 sim_dma_r(void) 1841{ 1842 return 0x00000000; 1843} 1844static inline u32 sim_dma_target_s(void) 1845{ 1846 return 2; 1847} 1848static inline u32 sim_dma_target_f(u32 v) 1849{ 1850 return (v & 0x3) << 0; 1851} 1852static inline u32 sim_dma_target_m(void) 1853{ 1854 return 0x3 << 0; 1855} 1856static inline u32 sim_dma_target_v(u32 r) 1857{ 1858 return (r >> 0) & 0x3; 1859} 1860static inline u32 sim_dma_target_phys_init_v(void) 1861{ 1862 return 0x00000001; 1863} 1864static inline u32 sim_dma_target_phys_init_f(void) 1865{ 1866 return 0x1; 1867} 1868static inline u32 sim_dma_target_phys__init_v(void) 1869{ 1870 return 0x00000001; 1871} 1872static inline u32 sim_dma_target_phys__init_f(void) 1873{ 1874 return 0x1; 1875} 1876static inline u32 sim_dma_target_phys__prod_v(void) 1877{ 1878 return 0x00000001; 1879} 1880static inline u32 sim_dma_target_phys__prod_f(void) 1881{ 1882 return 0x1; 1883} 1884static inline u32 sim_dma_target_phys_nvm_v(void) 1885{ 1886 return 0x00000001; 1887} 1888static inline u32 sim_dma_target_phys_nvm_f(void) 1889{ 1890 return 0x1; 1891} 1892static inline u32 sim_dma_target_phys_pci_v(void) 1893{ 1894 return 0x00000002; 1895} 1896static inline u32 sim_dma_target_phys_pci_f(void) 1897{ 1898 return 0x2; 1899} 1900static inline u32 sim_dma_target_phys_pci_coherent_v(void) 1901{ 1902 return 0x00000003; 1903} 1904static inline u32 sim_dma_target_phys_pci_coherent_f(void) 1905{ 1906 return 0x3; 1907} 1908static inline u32 sim_dma_status_s(void) 1909{ 1910 return 1; 1911} 1912static inline u32 sim_dma_status_f(u32 v) 1913{ 1914 return (v & 0x1) << 3; 1915} 1916static inline u32 sim_dma_status_m(void) 1917{ 1918 return 0x1 << 3; 1919} 1920static inline u32 sim_dma_status_v(u32 r) 1921{ 1922 return (r >> 3) & 0x1; 1923} 1924static inline u32 sim_dma_status_init_v(void) 1925{ 1926 return 0x00000000; 1927} 1928static inline u32 sim_dma_status_init_f(void) 1929{ 1930 return 0x0; 1931} 1932static inline u32 sim_dma_status__init_v(void) 1933{ 1934 return 0x00000000; 1935} 1936static inline u32 sim_dma_status__init_f(void) 1937{ 1938 return 0x0; 1939} 1940static inline u32 sim_dma_status__prod_v(void) 1941{ 1942 return 0x00000000; 1943} 1944static inline u32 sim_dma_status__prod_f(void) 1945{ 1946 return 0x0; 1947} 1948static inline u32 sim_dma_status_invalid_v(void) 1949{ 1950 return 0x00000000; 1951} 1952static inline u32 sim_dma_status_invalid_f(void) 1953{ 1954 return 0x0; 1955} 1956static inline u32 sim_dma_status_valid_v(void) 1957{ 1958 return 0x00000001; 1959} 1960static inline u32 sim_dma_status_valid_f(void) 1961{ 1962 return 0x8; 1963} 1964static inline u32 sim_dma_size_s(void) 1965{ 1966 return 2; 1967} 1968static inline u32 sim_dma_size_f(u32 v) 1969{ 1970 return (v & 0x3) << 4; 1971} 1972static inline u32 sim_dma_size_m(void) 1973{ 1974 return 0x3 << 4; 1975} 1976static inline u32 sim_dma_size_v(u32 r) 1977{ 1978 return (r >> 4) & 0x3; 1979} 1980static inline u32 sim_dma_size_init_v(void) 1981{ 1982 return 0x00000000; 1983} 1984static inline u32 sim_dma_size_init_f(void) 1985{ 1986 return 0x0; 1987} 1988static inline u32 sim_dma_size__init_v(void) 1989{ 1990 return 0x00000000; 1991} 1992static inline u32 sim_dma_size__init_f(void) 1993{ 1994 return 0x0; 1995} 1996static inline u32 sim_dma_size__prod_v(void) 1997{ 1998 return 0x00000000; 1999} 2000static inline u32 sim_dma_size__prod_f(void) 2001{ 2002 return 0x0; 2003} 2004static inline u32 sim_dma_size_4kb_v(void) 2005{ 2006 return 0x00000000; 2007} 2008static inline u32 sim_dma_size_4kb_f(void) 2009{ 2010 return 0x0; 2011} 2012static inline u32 sim_dma_size_8kb_v(void) 2013{ 2014 return 0x00000001; 2015} 2016static inline u32 sim_dma_size_8kb_f(void) 2017{ 2018 return 0x10; 2019} 2020static inline u32 sim_dma_size_12kb_v(void) 2021{ 2022 return 0x00000002; 2023} 2024static inline u32 sim_dma_size_12kb_f(void) 2025{ 2026 return 0x20; 2027} 2028static inline u32 sim_dma_size_16kb_v(void) 2029{ 2030 return 0x00000003; 2031} 2032static inline u32 sim_dma_size_16kb_f(void) 2033{ 2034 return 0x30; 2035} 2036static inline u32 sim_dma_addr_lo_s(void) 2037{ 2038 return 20; 2039} 2040static inline u32 sim_dma_addr_lo_f(u32 v) 2041{ 2042 return (v & 0xfffff) << 12; 2043} 2044static inline u32 sim_dma_addr_lo_m(void) 2045{ 2046 return 0xfffff << 12; 2047} 2048static inline u32 sim_dma_addr_lo_v(u32 r) 2049{ 2050 return (r >> 12) & 0xfffff; 2051} 2052static inline u32 sim_dma_addr_lo__init_v(void) 2053{ 2054 return 0x00000000; 2055} 2056static inline u32 sim_dma_addr_lo__init_f(void) 2057{ 2058 return 0x0; 2059} 2060static inline u32 sim_dma_addr_lo__prod_v(void) 2061{ 2062 return 0x00000000; 2063} 2064static inline u32 sim_dma_addr_lo__prod_f(void) 2065{ 2066 return 0x0; 2067} 2068static inline u32 sim_dma_hi_r(void) 2069{ 2070 return 0x00000004; 2071} 2072static inline u32 sim_dma_hi_addr_s(void) 2073{ 2074 return 20; 2075} 2076static inline u32 sim_dma_hi_addr_f(u32 v) 2077{ 2078 return (v & 0xfffff) << 0; 2079} 2080static inline u32 sim_dma_hi_addr_m(void) 2081{ 2082 return 0xfffff << 0; 2083} 2084static inline u32 sim_dma_hi_addr_v(u32 r) 2085{ 2086 return (r >> 0) & 0xfffff; 2087} 2088static inline u32 sim_dma_hi_addr__init_v(void) 2089{ 2090 return 0x00000000; 2091} 2092static inline u32 sim_dma_hi_addr__init_f(void) 2093{ 2094 return 0x0; 2095} 2096static inline u32 sim_dma_hi_addr__prod_v(void) 2097{ 2098 return 0x00000000; 2099} 2100static inline u32 sim_dma_hi_addr__prod_f(void) 2101{ 2102 return 0x0; 2103} 2104static inline u32 sim_msg_signature_r(void) 2105{ 2106 return 0x00000000; 2107} 2108static inline u32 sim_msg_signature_valid_v(void) 2109{ 2110 return 0x43505256; 2111} 2112static inline u32 sim_msg_length_r(void) 2113{ 2114 return 0x00000004; 2115} 2116static inline u32 sim_msg_function_r(void) 2117{ 2118 return 0x00000008; 2119} 2120static inline u32 sim_msg_function_sim_escape_read_v(void) 2121{ 2122 return 0x00000023; 2123} 2124static inline u32 sim_msg_function_sim_escape_write_v(void) 2125{ 2126 return 0x00000024; 2127} 2128static inline u32 sim_msg_result_r(void) 2129{ 2130 return 0x0000000c; 2131} 2132static inline u32 sim_msg_result_success_v(void)