diff options
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ae228ec54e94..3ae644e7e860 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -283,10 +283,10 @@ static struct mutex epmutex; | |||
283 | static struct poll_safewake psw; | 283 | static struct poll_safewake psw; |
284 | 284 | ||
285 | /* Slab cache used to allocate "struct epitem" */ | 285 | /* Slab cache used to allocate "struct epitem" */ |
286 | static kmem_cache_t *epi_cache __read_mostly; | 286 | static struct kmem_cache *epi_cache __read_mostly; |
287 | 287 | ||
288 | /* Slab cache used to allocate "struct eppoll_entry" */ | 288 | /* Slab cache used to allocate "struct eppoll_entry" */ |
289 | static kmem_cache_t *pwq_cache __read_mostly; | 289 | static struct kmem_cache *pwq_cache __read_mostly; |
290 | 290 | ||
291 | /* Virtual fs used to allocate inodes for eventpoll files */ | 291 | /* Virtual fs used to allocate inodes for eventpoll files */ |
292 | static struct vfsmount *eventpoll_mnt __read_mostly; | 292 | static struct vfsmount *eventpoll_mnt __read_mostly; |
@@ -795,8 +795,8 @@ static int ep_getfd(int *efd, struct inode **einode, struct file **efile, | |||
795 | goto eexit_4; | 795 | goto eexit_4; |
796 | dentry->d_op = &eventpollfs_dentry_operations; | 796 | dentry->d_op = &eventpollfs_dentry_operations; |
797 | d_add(dentry, inode); | 797 | d_add(dentry, inode); |
798 | file->f_vfsmnt = mntget(eventpoll_mnt); | 798 | file->f_path.mnt = mntget(eventpoll_mnt); |
799 | file->f_dentry = dentry; | 799 | file->f_path.dentry = dentry; |
800 | file->f_mapping = inode->i_mapping; | 800 | file->f_mapping = inode->i_mapping; |
801 | 801 | ||
802 | file->f_pos = 0; | 802 | file->f_pos = 0; |
@@ -961,7 +961,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, | |||
961 | struct epitem *epi = ep_item_from_epqueue(pt); | 961 | struct epitem *epi = ep_item_from_epqueue(pt); |
962 | struct eppoll_entry *pwq; | 962 | struct eppoll_entry *pwq; |
963 | 963 | ||
964 | if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { | 964 | if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { |
965 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); | 965 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); |
966 | pwq->whead = whead; | 966 | pwq->whead = whead; |
967 | pwq->base = epi; | 967 | pwq->base = epi; |
@@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
1004 | struct ep_pqueue epq; | 1004 | struct ep_pqueue epq; |
1005 | 1005 | ||
1006 | error = -ENOMEM; | 1006 | error = -ENOMEM; |
1007 | if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) | 1007 | if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) |
1008 | goto eexit_1; | 1008 | goto eexit_1; |
1009 | 1009 | ||
1010 | /* Item initialization follow here ... */ | 1010 | /* Item initialization follow here ... */ |