From 7a8b12ab634c05cd39c08e704c28ee3f4e111c7f Mon Sep 17 00:00:00 2001 From: Thomas Fleury Date: Fri, 30 Sep 2016 16:40:29 -0700 Subject: gpu: nvgpu: clk requests completion and event fds Install one completion fd per SET request. Notifications on dedicated event fd. Changed frequencies unit to Hz from MHz. Remove sequence numbers from dummy arbiter. Added effective clock type (query frequency from counters). Jira DNVGPU-125 Change-Id: Id5445c6ae1d6bf06f7f59c82ff6c5d3b34e26483 Signed-off-by: Thomas Fleury Reviewed-on: http://git-master/r/1230239 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom (cherry picked from commit d17083f4ceb69725c661678607a3e43148d38560) Reviewed-on: http://git-master/r/1243106 --- drivers/gpu/nvgpu/clk/clk_arb.c | 326 +++++++++++++++++++++++----------------- 1 file changed, 191 insertions(+), 135 deletions(-) (limited to 'drivers/gpu/nvgpu/clk/clk_arb.c') diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c index 1d02c7d7..98b7cb5f 100644 --- a/drivers/gpu/nvgpu/clk/clk_arb.c +++ b/drivers/gpu/nvgpu/clk/clk_arb.c @@ -21,50 +21,63 @@ #include "clk/clk_arb.h" -static int nvgpu_clk_arb_release_session_dev(struct inode *inode, struct file *filp); -static unsigned int nvgpu_clk_arb_poll_session_dev(struct file *filp, poll_table *wait); +static int nvgpu_clk_arb_release_event_dev(struct inode *inode, + struct file *filp); +static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, + struct file *filp); +static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait); static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work); struct nvgpu_clk_arb { - struct mutex wlock; + struct mutex req_lock; struct mutex users_lock; struct list_head users; - u32 gpc2clk_current_mhz; - u32 gpc2clk_target_mhz; - u32 gpc2clk_default_mhz; - u32 mclk_current_mhz; - u32 mclk_target_mhz; - u32 mclk_default_mhz; + struct list_head requests; + + u64 gpc2clk_current_hz; + u64 gpc2clk_target_hz; + u64 gpc2clk_default_hz; + u64 mclk_current_hz; + u64 mclk_target_hz; + u64 mclk_default_hz; atomic_t usercount; struct work_struct update_fn_work; +}; + - atomic_t req_nr; /* for allocations */ - atomic_t last_req_nr; /* last completed by arbiter */ +struct nvgpu_clk_dev { + struct nvgpu_clk_session *session; + struct list_head link; + wait_queue_head_t readout_wq; + atomic_t poll_mask; }; struct nvgpu_clk_session { + bool zombie; struct gk20a *g; - int fd; - atomic_t req_nr; struct kref refcount; - wait_queue_head_t readout_wq; - atomic_t poll_mask; - struct list_head user; - u32 gpc2clk_target_mhz; - u32 mclk_target_mhz; + + u64 gpc2clk_target_hz; + u64 mclk_target_hz; }; -const struct file_operations clk_dev_ops = { +static const struct file_operations completion_dev_ops = { .owner = THIS_MODULE, - .release = nvgpu_clk_arb_release_session_dev, - .poll = nvgpu_clk_arb_poll_session_dev, + .release = nvgpu_clk_arb_release_completion_dev, + .poll = nvgpu_clk_arb_poll_dev, +}; + +static const struct file_operations event_dev_ops = { + .owner = THIS_MODULE, + .release = nvgpu_clk_arb_release_event_dev, + .poll = nvgpu_clk_arb_poll_dev, }; int nvgpu_clk_arb_init_arbiter(struct gk20a *g) { struct nvgpu_clk_arb *arb; - u16 default_mhz; + u64 default_hz; int err; gk20a_dbg_fn(""); @@ -78,32 +91,31 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g) g->clk_arb = arb; - mutex_init(&arb->wlock); + mutex_init(&arb->req_lock); mutex_init(&arb->users_lock); err = g->ops.clk_arb.get_arbiter_clk_default(g, - NVGPU_GPU_CLK_DOMAIN_MCLK, &default_mhz); + NVGPU_GPU_CLK_DOMAIN_MCLK, &default_hz); if (err) return -EINVAL; - arb->mclk_target_mhz = default_mhz; - arb->mclk_current_mhz = default_mhz; - arb->mclk_default_mhz = default_mhz; + arb->mclk_target_hz = default_hz; + arb->mclk_current_hz = default_hz; + arb->mclk_default_hz = default_hz; err = g->ops.clk_arb.get_arbiter_clk_default(g, - NVGPU_GPU_CLK_DOMAIN_GPC2CLK, &default_mhz); + NVGPU_GPU_CLK_DOMAIN_GPC2CLK, &default_hz); if (err) return -EINVAL; - arb->gpc2clk_target_mhz = default_mhz; - arb->gpc2clk_current_mhz = default_mhz; - arb->gpc2clk_default_mhz = default_mhz; + arb->gpc2clk_target_hz = default_hz; + arb->gpc2clk_current_hz = default_hz; + arb->gpc2clk_default_hz = default_hz; atomic_set(&arb->usercount, 0); - atomic_set(&arb->req_nr, 0); - atomic_set(&arb->last_req_nr, 0); INIT_LIST_HEAD(&arb->users); + INIT_LIST_HEAD(&arb->requests); INIT_WORK(&arb->update_fn_work, nvgpu_clk_arb_run_arbiter_cb); return 0; @@ -114,44 +126,50 @@ void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g) kfree(g->clk_arb); } - -int nvgpu_clk_arb_install_session_fd(struct gk20a *g, - struct nvgpu_clk_session *session) +static int nvgpu_clk_arb_install_fd(struct gk20a *g, + struct nvgpu_clk_session *session, + const struct file_operations *fops, + struct nvgpu_clk_dev **_dev) { struct file *file; char *name; int fd; int err; + struct nvgpu_clk_dev *dev; gk20a_dbg_fn(""); - if (session->fd >= 0) - goto done; + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; fd = get_unused_fd_flags(O_RDWR); if (fd < 0) return fd; name = kasprintf(GFP_KERNEL, "%s-clk-fd%d", dev_name(g->dev), fd); - file = anon_inode_getfile(name, &clk_dev_ops, session, O_RDWR); + file = anon_inode_getfile(name, fops, dev, O_RDWR); kfree(name); if (IS_ERR(file)) { err = PTR_ERR(file); - goto clean_up_fd; + goto fail; } - BUG_ON(file->private_data != session); - fd_install(fd, file); + + init_waitqueue_head(&dev->readout_wq); + atomic_set(&dev->poll_mask, 0); + + dev->session = session; kref_get(&session->refcount); - session->fd = fd; -done: - return session->fd; + *_dev = dev; -clean_up_fd: - put_unused_fd(fd); + return fd; +fail: + kfree(dev); + put_unused_fd(fd); return err; } @@ -163,8 +181,6 @@ int nvgpu_clk_arb_init_session(struct gk20a *g, gk20a_dbg_fn(""); - *_session = NULL; - if (!g->ops.clk_arb.get_arbiter_clk_domains) return 0; @@ -172,23 +188,14 @@ int nvgpu_clk_arb_init_session(struct gk20a *g, if (!session) return -ENOMEM; session->g = g; - session->fd = -1; kref_init(&session->refcount); - init_waitqueue_head(&session->readout_wq); - - atomic_set(&session->poll_mask, 0); - atomic_set(&session->req_nr, 0); - mutex_lock(&arb->users_lock); - list_add_tail(&session->user, &arb->users); - mutex_unlock(&arb->users_lock); atomic_inc(&arb->usercount); - mutex_lock(&arb->wlock); - session->mclk_target_mhz = arb->mclk_default_mhz; - session->gpc2clk_target_mhz = arb->gpc2clk_default_mhz; - mutex_unlock(&arb->wlock); + session->zombie = false; + session->mclk_target_hz = arb->mclk_default_hz; + session->gpc2clk_target_hz = arb->gpc2clk_default_hz; *_session = session; @@ -199,23 +206,41 @@ void nvgpu_clk_arb_free_session(struct kref *refcount) { struct nvgpu_clk_session *session = container_of(refcount, struct nvgpu_clk_session, refcount); - struct gk20a *g = session->g; - struct nvgpu_clk_arb *arb = g->clk_arb; - - mutex_lock(&arb->users_lock); - list_del_init(&session->user); - mutex_unlock(&arb->users_lock); - - if (atomic_dec_and_test(&arb->usercount)) - nvgpu_clk_arb_apply_session_constraints(g, NULL); kfree(session); } -void nvgpu_clk_arb_cleanup_session(struct gk20a *g, - struct nvgpu_clk_session *session) +void nvgpu_clk_arb_release_session(struct gk20a *g, + struct nvgpu_clk_session *session) { + struct nvgpu_clk_arb *arb = g->clk_arb; + + session->zombie = true; kref_put(&session->refcount, nvgpu_clk_arb_free_session); + + /* schedule arbiter if no more user */ + if (!atomic_dec_and_test(&arb->usercount)) + schedule_work(&arb->update_fn_work); +} + +int nvgpu_clk_arb_install_event_fd(struct gk20a *g, + struct nvgpu_clk_session *session, int *event_fd) +{ + struct nvgpu_clk_arb *arb = g->clk_arb; + struct nvgpu_clk_dev *dev; + int fd; + + fd = nvgpu_clk_arb_install_fd(g, session, &event_dev_ops, &dev); + if (fd < 0) + return fd; + + mutex_lock(&arb->users_lock); + list_add_tail(&dev->link, &arb->users); + mutex_unlock(&arb->users_lock); + + *event_fd = fd; + + return 0; } static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work) @@ -223,88 +248,125 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work) struct nvgpu_clk_arb *arb = container_of(work, struct nvgpu_clk_arb, update_fn_work); struct nvgpu_clk_session *session; + struct nvgpu_clk_dev *dev; + struct nvgpu_clk_dev *tmp; + + mutex_lock(&arb->req_lock); - mutex_lock(&arb->wlock); + arb->mclk_target_hz = arb->mclk_default_hz; + arb->gpc2clk_target_hz = arb->gpc2clk_default_hz; + + list_for_each_entry(dev, &arb->requests, link) { + session = dev->session; + if (!session->zombie) { + /* TODO: arbiter policy. For now last request wins */ + + arb->mclk_target_hz = session->mclk_target_hz; + arb->gpc2clk_target_hz = session->gpc2clk_target_hz; + } + } /* TODO: loop up higher or equal VF points */ - arb->mclk_current_mhz = arb->mclk_target_mhz; - arb->gpc2clk_current_mhz = arb->gpc2clk_target_mhz; + arb->mclk_current_hz = arb->mclk_target_hz; + arb->gpc2clk_current_hz = arb->gpc2clk_target_hz; /* TODO: actually program the clocks */ - atomic_set(&arb->last_req_nr, atomic_read(&arb->req_nr)); - mutex_unlock(&arb->wlock); + /* notify completion for all requests */ + list_for_each_entry_safe(dev, tmp, &arb->requests, link) { + atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); + wake_up_interruptible(&dev->readout_wq); + list_del_init(&dev->link); + } + mutex_unlock(&arb->req_lock); + /* notify event for all users */ mutex_lock(&arb->users_lock); - list_for_each_entry(session, &arb->users, user) { - atomic_set(&session->poll_mask, POLLIN | POLLRDNORM); - wake_up_interruptible(&session->readout_wq); + list_for_each_entry(dev, &arb->users, link) { + atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); + wake_up_interruptible(&dev->readout_wq); } mutex_unlock(&arb->users_lock); } -void nvgpu_clk_arb_apply_session_constraints(struct gk20a *g, - struct nvgpu_clk_session *session) +int nvgpu_clk_arb_apply_session_constraints(struct gk20a *g, + struct nvgpu_clk_session *session, int *completion_fd) { struct nvgpu_clk_arb *arb = g->clk_arb; + struct nvgpu_clk_dev *dev; + int fd; - mutex_lock(&arb->wlock); - atomic_inc(&arb->req_nr); - - /* TODO: arbitration between users. - For now, last session to run arbiter wins. - */ + fd = nvgpu_clk_arb_install_fd(g, session, &completion_dev_ops, &dev); + if (fd < 0) + return fd; - if (session) { - arb->mclk_target_mhz = session->mclk_target_mhz; - arb->gpc2clk_target_mhz = session->gpc2clk_target_mhz; + *completion_fd = fd; - atomic_set(&session->req_nr, atomic_read(&arb->req_nr)); - } else { - arb->mclk_target_mhz = arb->mclk_default_mhz; - arb->gpc2clk_target_mhz = arb->gpc2clk_default_mhz; - } - mutex_unlock(&arb->wlock); + mutex_lock(&arb->req_lock); + list_add_tail(&dev->link, &arb->requests); + mutex_unlock(&arb->req_lock); schedule_work(&arb->update_fn_work); + + return 0; } -static unsigned int nvgpu_clk_arb_poll_session_dev(struct file *filp, poll_table *wait) +static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait) { - struct nvgpu_clk_session *session = filp->private_data; + struct nvgpu_clk_dev *dev = filp->private_data; gk20a_dbg_fn(""); - poll_wait(filp, &session->readout_wq, wait); - return atomic_xchg(&session->poll_mask, 0); + poll_wait(filp, &dev->readout_wq, wait); + return atomic_xchg(&dev->poll_mask, 0); } -static int nvgpu_clk_arb_release_session_dev(struct inode *inode, struct file *filp) +static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, + struct file *filp) { - struct nvgpu_clk_session *session = filp->private_data; - struct gk20a *g = session->g; + struct nvgpu_clk_dev *dev = filp->private_data; + struct nvgpu_clk_session *session = dev->session; - session->fd = -1; - nvgpu_clk_arb_cleanup_session(g, session); + gk20a_dbg_fn(""); + + kref_put(&session->refcount, nvgpu_clk_arb_free_session); + kfree(dev); + return 0; +} + +static int nvgpu_clk_arb_release_event_dev(struct inode *inode, + struct file *filp) +{ + struct nvgpu_clk_dev *dev = filp->private_data; + struct nvgpu_clk_session *session = dev->session; + struct nvgpu_clk_arb *arb = session->g->clk_arb; + + gk20a_dbg_fn(""); + mutex_lock(&arb->users_lock); + list_del_init(&dev->link); + mutex_unlock(&arb->users_lock); + + kref_put(&session->refcount, nvgpu_clk_arb_free_session); + kfree(dev); return 0; } -int nvgpu_clk_arb_set_session_target_mhz(struct nvgpu_clk_session *session, - u32 api_domain, u16 target_mhz) +int nvgpu_clk_arb_set_session_target_hz(struct nvgpu_clk_session *session, + u32 api_domain, u64 target_hz) { - gk20a_dbg_fn("domain=0x%08x target_mhz=%u", api_domain, target_mhz); + gk20a_dbg_fn("domain=0x%08x target_hz=%llu", api_domain, target_hz); switch (api_domain) { case NVGPU_GPU_CLK_DOMAIN_MCLK: - session->mclk_target_mhz = target_mhz; + session->mclk_target_hz = target_hz; return 0; case NVGPU_GPU_CLK_DOMAIN_GPC2CLK: - session->gpc2clk_target_mhz = target_mhz; + session->gpc2clk_target_hz = target_hz; return 0; default: @@ -312,61 +374,61 @@ int nvgpu_clk_arb_set_session_target_mhz(struct nvgpu_clk_session *session, } } -int nvgpu_clk_arb_get_session_target_mhz(struct nvgpu_clk_session *session, - u32 api_domain, u16 *target_mhz) +int nvgpu_clk_arb_get_session_target_hz(struct nvgpu_clk_session *session, + u32 api_domain, u64 *freq_hz) { switch (api_domain) { case NVGPU_GPU_CLK_DOMAIN_MCLK: - *target_mhz = session->mclk_target_mhz; + *freq_hz = session->mclk_target_hz; return 0; case NVGPU_GPU_CLK_DOMAIN_GPC2CLK: - *target_mhz = session->gpc2clk_target_mhz; + *freq_hz = session->gpc2clk_target_hz; return 0; default: - *target_mhz = 0; + *freq_hz = 0; return -EINVAL; } } -int nvgpu_clk_arb_get_arbiter_actual_mhz(struct gk20a *g, - u32 api_domain, u16 *actual_mhz) +int nvgpu_clk_arb_get_arbiter_actual_hz(struct gk20a *g, + u32 api_domain, u64 *freq_hz) { struct nvgpu_clk_arb *arb = g->clk_arb; int err = 0; - mutex_lock(&arb->wlock); + mutex_lock(&arb->req_lock); switch (api_domain) { case NVGPU_GPU_CLK_DOMAIN_MCLK: - *actual_mhz = arb->mclk_current_mhz; + *freq_hz = arb->mclk_current_hz; break; case NVGPU_GPU_CLK_DOMAIN_GPC2CLK: - *actual_mhz = arb->gpc2clk_current_mhz; + *freq_hz = arb->gpc2clk_current_hz; break; default: - *actual_mhz = 0; + *freq_hz = 0; err = -EINVAL; } - mutex_unlock(&arb->wlock); + mutex_unlock(&arb->req_lock); return err; } -u32 nvgpu_clk_arb_get_arbiter_req_nr(struct gk20a *g) +int nvgpu_clk_arb_get_arbiter_effective_hz(struct gk20a *g, + u32 api_domain, u64 *freq_hz) { - struct nvgpu_clk_arb *arb = g->clk_arb; - - return atomic_read(&arb->last_req_nr); + /* TODO: measure clocks from counters */ + return nvgpu_clk_arb_get_arbiter_actual_hz(g, api_domain, freq_hz); } int nvgpu_clk_arb_get_arbiter_clk_range(struct gk20a *g, u32 api_domain, - u16 *min_mhz, u16 *max_mhz) + u64 *min_hz, u64 *max_hz) { return g->ops.clk_arb.get_arbiter_clk_range(g, api_domain, - min_mhz, max_mhz); + min_hz, max_hz); } u32 nvgpu_clk_arb_get_arbiter_clk_domains(struct gk20a *g) @@ -374,12 +436,6 @@ u32 nvgpu_clk_arb_get_arbiter_clk_domains(struct gk20a *g) return g->ops.clk_arb.get_arbiter_clk_domains(g); } -u32 nvgpu_clk_arb_get_session_req_nr(struct gk20a *g, - struct nvgpu_clk_session *session) -{ - return atomic_read(&session->req_nr); -} - int nvgpu_clk_arb_get_arbiter_clk_f_points(struct gk20a *g, u32 api_domain, u32 *max_points, u16 *fpoints) { -- cgit v1.2.2