summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk/clk_arb.c
diff options
context:
space:
mode:
authorThomas Fleury <tfleury@nvidia.com>2016-11-08 16:38:08 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-06 12:14:04 -0500
commit25969934122dab95b35fc84d194e423801d51861 (patch)
tree3a5a701f42c29c195de7ef19455a796c4e42faac /drivers/gpu/nvgpu/clk/clk_arb.c
parent5d4ba0a6d8cf3d45c948b620e691a80921ad61a5 (diff)
gpu: nvgpu: add read and ioctl for events
Add support for poll/read/ioctl for events in clock arbiter. Jira DNVGPU-186 Change-Id: Id95264fffae2b83a388ff8f186ebe7d723029b7f Reviewed-on: http://git-master/r/1253659 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> (cherry picked from commit 8d652a7313722aab331f82f0841b490ca25cb51d) Reviewed-on: http://git-master/r/1280885 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/clk/clk_arb.c')
-rw-r--r--drivers/gpu/nvgpu/clk/clk_arb.c159
1 files changed, 151 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c
index 0d8cd398..673f3721 100644
--- a/drivers/gpu/nvgpu/clk/clk_arb.c
+++ b/drivers/gpu/nvgpu/clk/clk_arb.c
@@ -34,7 +34,12 @@ static int nvgpu_clk_arb_release_event_dev(struct inode *inode,
34 struct file *filp); 34 struct file *filp);
35static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, 35static int nvgpu_clk_arb_release_completion_dev(struct inode *inode,
36 struct file *filp); 36 struct file *filp);
37static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait); 37static unsigned int nvgpu_clk_arb_poll_completion_dev(struct file *filp, poll_table *wait);
38static unsigned int nvgpu_clk_arb_poll_event_dev(struct file *filp, poll_table *wait);
39static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp,
40 char __user *buf, size_t size, loff_t *off);
41long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd,
42 unsigned long arg);
38 43
39static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work); 44static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work);
40static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work); 45static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work);
@@ -147,6 +152,9 @@ struct nvgpu_clk_dev {
147 atomic_t poll_mask; 152 atomic_t poll_mask;
148 u16 gpc2clk_target_mhz; 153 u16 gpc2clk_target_mhz;
149 u16 mclk_target_mhz; 154 u16 mclk_target_mhz;
155 spinlock_t event_lock;
156 u32 event_status;
157 u32 event_mask;
150 struct kref refcount; 158 struct kref refcount;
151}; 159};
152 160
@@ -164,13 +172,18 @@ struct nvgpu_clk_session {
164static const struct file_operations completion_dev_ops = { 172static const struct file_operations completion_dev_ops = {
165 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
166 .release = nvgpu_clk_arb_release_completion_dev, 174 .release = nvgpu_clk_arb_release_completion_dev,
167 .poll = nvgpu_clk_arb_poll_dev, 175 .poll = nvgpu_clk_arb_poll_completion_dev,
168}; 176};
169 177
170static const struct file_operations event_dev_ops = { 178static const struct file_operations event_dev_ops = {
171 .owner = THIS_MODULE, 179 .owner = THIS_MODULE,
172 .release = nvgpu_clk_arb_release_event_dev, 180 .release = nvgpu_clk_arb_release_event_dev,
173 .poll = nvgpu_clk_arb_poll_dev, 181 .poll = nvgpu_clk_arb_poll_event_dev,
182 .read = nvgpu_clk_arb_read_event_dev,
183#ifdef CONFIG_COMPAT
184 .compat_ioctl = nvgpu_clk_arb_ioctl_event_dev,
185#endif
186 .unlocked_ioctl = nvgpu_clk_arb_ioctl_event_dev,
174}; 187};
175 188
176int nvgpu_clk_arb_init_arbiter(struct gk20a *g) 189int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
@@ -350,7 +363,10 @@ static int nvgpu_clk_arb_install_fd(struct gk20a *g,
350 fd_install(fd, file); 363 fd_install(fd, file);
351 364
352 init_waitqueue_head(&dev->readout_wq); 365 init_waitqueue_head(&dev->readout_wq);
353 atomic_set(&dev->poll_mask, 0); 366
367 spin_lock_init(&dev->event_lock);
368 dev->event_status = 0;
369 dev->event_mask = ~0;
354 370
355 dev->session = session; 371 dev->session = session;
356 kref_init(&dev->refcount); 372 kref_init(&dev->refcount);
@@ -1080,7 +1096,9 @@ exit_arb:
1080 /* notify event for all users */ 1096 /* notify event for all users */
1081 rcu_read_lock(); 1097 rcu_read_lock();
1082 list_for_each_entry_rcu(dev, &arb->users, link) { 1098 list_for_each_entry_rcu(dev, &arb->users, link) {
1083 atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); 1099 spin_lock(&dev->event_lock);
1100 dev->event_status |= (1UL << NVGPU_GPU_EVENT_VF_UPDATE);
1101 spin_unlock(&dev->event_lock);
1084 wake_up_interruptible(&dev->readout_wq); 1102 wake_up_interruptible(&dev->readout_wq);
1085 } 1103 }
1086 rcu_read_unlock(); 1104 rcu_read_unlock();
@@ -1117,7 +1135,28 @@ fdput_fd:
1117 return err; 1135 return err;
1118} 1136}
1119 1137
1120static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait) 1138static inline int __pending_event(struct nvgpu_clk_dev *dev,
1139 struct nvgpu_gpu_event_info *info)
1140{
1141 struct gk20a *g = dev->session->g;
1142 u32 status;
1143
1144 spin_lock(&dev->event_lock);
1145 status = dev->event_status & dev->event_mask;
1146 if (status && info)
1147 {
1148 /* TODO: retrieve oldest event_id based on timestamp */
1149 info->event_id = ffs(status) - 1;
1150 g->ops.read_ptimer(g, &info->timestamp);
1151
1152 dev->event_status &= ~(1UL << info->event_id);
1153 }
1154 spin_unlock(&dev->event_lock);
1155 return status;
1156}
1157
1158static unsigned int nvgpu_clk_arb_poll_completion_dev(struct file *filp,
1159 poll_table *wait)
1121{ 1160{
1122 struct nvgpu_clk_dev *dev = filp->private_data; 1161 struct nvgpu_clk_dev *dev = filp->private_data;
1123 1162
@@ -1127,6 +1166,111 @@ static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait)
1127 return atomic_xchg(&dev->poll_mask, 0); 1166 return atomic_xchg(&dev->poll_mask, 0);
1128} 1167}
1129 1168
1169static unsigned int nvgpu_clk_arb_poll_event_dev(struct file *filp,
1170 poll_table *wait)
1171{
1172 struct nvgpu_clk_dev *dev = filp->private_data;
1173
1174 gk20a_dbg_fn("");
1175
1176 poll_wait(filp, &dev->readout_wq, wait);
1177 return __pending_event(dev, NULL);
1178}
1179
1180static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp,
1181 char __user *buf, size_t size, loff_t *off)
1182{
1183 struct nvgpu_clk_dev *dev = filp->private_data;
1184 struct nvgpu_gpu_event_info info;
1185 int err;
1186
1187 gk20a_dbg(gpu_dbg_fn, "filp=%p buf=%p size=%zu", filp, buf, size);
1188
1189 if (size < sizeof(info))
1190 return 0;
1191
1192 memset(&info, 0, sizeof(info));
1193 while (!__pending_event(dev, &info)) {
1194 if (filp->f_flags & O_NONBLOCK)
1195 return -EAGAIN;
1196 err = wait_event_interruptible(dev->readout_wq,
1197 __pending_event(dev, &info));
1198 if (err)
1199 return err;
1200 }
1201
1202 if (copy_to_user(buf, &info, sizeof(info)))
1203 return -EFAULT;
1204
1205 *off += sizeof(info);
1206
1207 return sizeof(info);
1208}
1209
1210static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev,
1211 struct nvgpu_gpu_set_event_filter_args *args)
1212{
1213 u32 mask;
1214
1215 gk20a_dbg(gpu_dbg_fn, "");
1216
1217 if (args->flags)
1218 return -EINVAL;
1219
1220 if (args->size != 1)
1221 return -EINVAL;
1222
1223 if (copy_from_user(&mask, (void __user *) args->buffer,
1224 args->size * sizeof(u32)))
1225 return -EFAULT;
1226
1227 spin_lock(&dev->event_lock);
1228 /* update event mask */
1229 dev->event_mask = mask;
1230 spin_unlock(&dev->event_lock);
1231
1232 return 0;
1233}
1234
1235long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd,
1236 unsigned long arg)
1237{
1238 struct nvgpu_clk_dev *dev = filp->private_data;
1239 struct gk20a *g = dev->session->g;
1240 u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE];
1241 int err = 0;
1242
1243 gk20a_dbg(gpu_dbg_fn, "nr=%d", _IOC_NR(cmd));
1244
1245 if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0)
1246 || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST))
1247 return -EINVAL;
1248
1249 BUG_ON(_IOC_SIZE(cmd) > NVGPU_EVENT_IOCTL_MAX_ARG_SIZE);
1250
1251 memset(buf, 0, sizeof(buf));
1252 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1253 if (copy_from_user(buf, (void __user *) arg, _IOC_SIZE(cmd)))
1254 return -EFAULT;
1255 }
1256
1257 switch (cmd) {
1258 case NVGPU_EVENT_IOCTL_SET_FILTER:
1259 err = nvgpu_clk_arb_set_event_filter(dev,
1260 (struct nvgpu_gpu_set_event_filter_args *)buf);
1261 break;
1262 default:
1263 dev_dbg(dev_from_gk20a(g),
1264 "unrecognized event ioctl cmd: 0x%x", cmd);
1265 err = -ENOTTY;
1266 }
1267
1268 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1269 err = copy_to_user((void __user *) arg, buf, _IOC_SIZE(cmd));
1270
1271 return err;
1272}
1273
1130static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, 1274static int nvgpu_clk_arb_release_completion_dev(struct inode *inode,
1131 struct file *filp) 1275 struct file *filp)
1132{ 1276{
@@ -1159,8 +1303,8 @@ static int nvgpu_clk_arb_release_event_dev(struct inode *inode,
1159 list_del_rcu(&dev->link); 1303 list_del_rcu(&dev->link);
1160 spin_unlock(&arb->users_lock); 1304 spin_unlock(&arb->users_lock);
1161 1305
1162 kref_put(&session->refcount, nvgpu_clk_arb_free_session);
1163 synchronize_rcu(); 1306 synchronize_rcu();
1307 kref_put(&session->refcount, nvgpu_clk_arb_free_session);
1164 kfree(dev); 1308 kfree(dev);
1165 1309
1166 return 0; 1310 return 0;
@@ -1529,7 +1673,6 @@ void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock)
1529 mutex_lock(&arb->pstate_lock); 1673 mutex_lock(&arb->pstate_lock);
1530 else 1674 else
1531 mutex_unlock(&arb->pstate_lock); 1675 mutex_unlock(&arb->pstate_lock);
1532
1533} 1676}
1534 1677
1535#ifdef CONFIG_DEBUG_FS 1678#ifdef CONFIG_DEBUG_FS