aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
Commit message (Expand)AuthorAge
* [PATCH] mm: more commenting on lock orderingNick Piggin2006-10-20
* [PATCH] move rmap BUG_ON outside DEBUG_VMDave Jones2006-10-11
* [PATCH] mm: tracking shared dirty pagesPeter Zijlstra2006-09-26
* [PATCH] zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPEDChristoph Lameter2006-06-30
* [PATCH] zoned vm counters: convert nr_mapped to per zone counterChristoph Lameter2006-06-30
* [PATCH] Allow migration of mlocked pagesChristoph Lameter2006-06-25
* [PATCH] More page migration: use migration entries for file pagesChristoph Lameter2006-06-23
* [PATCH] More page migration: do not inc/dec rss countersChristoph Lameter2006-06-23
* [PATCH] Swapless page migration: rip out swap based logicChristoph Lameter2006-06-23
* [PATCH] Swapless page migration: add R/W migration entriesChristoph Lameter2006-06-23
* [PATCH] page migration cleanup: rename "ignrefs" to "migration"Christoph Lameter2006-06-23
* [PATCH] mm: more CONFIG_DEBUG_VMNick Piggin2006-03-22
* [PATCH] mm: kill kmem_cache_t usagePekka Enberg2006-03-22
* [PATCH] page_add_file_rmap(): remove BUG_ON()sHugh Dickins2006-03-09
* [PATCH] remove_from_swap: fix lockingChristoph Lameter2006-02-28
* [PATCH] Direct Migration V9: Avoid writeback / page_migrate() methodChristoph Lameter2006-02-01
* [PATCH] Direct Migration V9: remove_from_swap() to remove swap ptesChristoph Lameter2006-02-01
* [PATCH] Direct Migration V9: migrate_pages() extensionChristoph Lameter2006-02-01
* [PATCH] mm: migration page refcounting fixNick Piggin2006-01-18
* [PATCH] mutex subsystem, semaphore to mutex: VFS, ->i_semJes Sorensen2006-01-09
* [PATCH] rmap: additional diagnostics in page_remove_rmap()Dave Jones2006-01-08
* [PATCH] mm: page_state optNick Piggin2006-01-06
* [PATCH] mm: rmap optimisationNick Piggin2006-01-06
* [PATCH] Fix missing pfn variables caused by vm changesBen Collins2005-11-29
* [PATCH] temporarily disable swap token on memory pressureRik van Riel2005-11-28
* mm: re-architect the VM_UNPAGED logicLinus Torvalds2005-11-28
* [PATCH] unpaged: anon in VM_UNPAGEDHugh Dickins2005-11-22
* [PATCH] unpaged: VM_NONLINEAR VM_RESERVEDHugh Dickins2005-11-22
* [PATCH] mm: update comments to pte lockHugh Dickins2005-10-30
* [PATCH] mm: fix rss and mmlist lockingHugh Dickins2005-10-30
* [PATCH] mm: split page table lockHugh Dickins2005-10-30
* [PATCH] mm: rmap with inner ptlockHugh Dickins2005-10-30
* [PATCH] mm: update_hiwaters just in timeHugh Dickins2005-10-30
* [PATCH] core remove PageReservedNick Piggin2005-10-30
* [PATCH] mm: rss = file_rss + anon_rssHugh Dickins2005-10-30
* [PATCH] swaptoken tuningRik Van Riel2005-10-30
* [PATCH] mm: cleanup rmapNick Piggin2005-09-05
* [PATCH] mm: micro-optimise rmapNick Piggin2005-09-05
* [PATCH] mm: comment rmapNick Piggin2005-09-05
* [PATCH] rmap: don't test rssHugh Dickins2005-09-05
* [PATCH] swap: swap_lock replace list+deviceHugh Dickins2005-09-05
* [PATCH] xip: fs/mm: execute in placeCarsten Otte2005-06-24
* [PATCH] can_share_swap_page: use page_mapcountHugh Dickins2005-06-21
* [PATCH] try_to_unmap_cluster() passes out-of-bounds pte to pte_unmap()William Lee Irwin III2005-05-24
* [PATCH] mm: fix rss counter being incremented when unmappingBjorn Steinbrink2005-05-17
* [PATCH] mm: rmap.c cleanupNikita Danilov2005-05-01
* Linux-2.6.12-rc2v2.6.12-rc2Linus Torvalds2005-04-16
n>tsg) { struct gk20a *g = tsg->g; struct channel_gk20a *ch; nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { g->ops.fifo.disable_channel(ch); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); return 0; } static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) { struct fifo_gk20a *f = &g->fifo; struct fifo_runlist_info_gk20a *runlist; unsigned int i; for (i = 0; i < f->max_runlists; ++i) { runlist = &f->runlist_info[i]; if (test_bit(ch->chid, runlist->active_channels)) return true; } return false; } /* * API to mark channel as part of TSG * * Note that channel is not runnable when we bind it to TSG */ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) { struct gk20a *g = ch->g; nvgpu_log_fn(g, " "); /* check if channel is already bound to some TSG */ if (gk20a_is_channel_marked_as_tsg(ch)) { return -EINVAL; } /* channel cannot be bound to TSG if it is already active */ if (gk20a_is_channel_active(tsg->g, ch)) { return -EINVAL; } ch->tsgid = tsg->tsgid; /* all the channel part of TSG should need to be same runlist_id */ if (tsg->runlist_id == FIFO_INVAL_TSG_ID) tsg->runlist_id = ch->runlist_id; else if (tsg->runlist_id != ch->runlist_id) { nvgpu_err(tsg->g, "Error: TSG channel should be share same runlist ch[%d] tsg[%d]", ch->runlist_id, tsg->runlist_id); return -EINVAL; } nvgpu_rwsem_down_write(&tsg->ch_list_lock); nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); nvgpu_rwsem_up_write(&tsg->ch_list_lock); nvgpu_ref_get(&tsg->refcount); nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", tsg->tsgid, ch->chid); nvgpu_log_fn(g, "done"); return 0; } int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) { struct gk20a *g = ch->g; struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; int err; err = g->ops.fifo.tsg_unbind_channel(ch); if (err) { nvgpu_err(g, "Channel %d unbind failed, tearing down TSG %d", ch->chid, tsg->tsgid); gk20a_fifo_abort_tsg(ch->g, ch->tsgid, true); /* If channel unbind fails, channel is still part of runlist */ channel_gk20a_update_runlist(ch, false); nvgpu_rwsem_down_write(&tsg->ch_list_lock); nvgpu_list_del(&ch->ch_entry); nvgpu_rwsem_up_write(&tsg->ch_list_lock); } nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); ch->tsgid = NVGPU_INVALID_TSG_ID; nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", tsg->tsgid, ch->chid); return 0; } int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) { struct tsg_gk20a *tsg = NULL; int err; if (tsgid >= g->fifo.num_channels) return -EINVAL; tsg = &g->fifo.tsg[tsgid]; tsg->in_use = false; tsg->tsgid = tsgid; nvgpu_init_list_node(&tsg->ch_list); nvgpu_rwsem_init(&tsg->ch_list_lock); nvgpu_init_list_node(&tsg->event_id_list); err = nvgpu_mutex_init(&tsg->event_id_list_lock); if (err) { tsg->in_use = true; /* make this TSG unusable */ return err; } return 0; } int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) { struct gk20a *g = tsg->g; int ret; nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); switch (level) { case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, 0, level); if (!ret) tsg->interleave_level = level; break; default: ret = -EINVAL; break; } return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); } int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) { struct gk20a *g = tsg->g; nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); } u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg) { struct gk20a *g = tsg->g; if (!tsg->timeslice_us) return g->ops.fifo.default_timeslice_us(g); return tsg->timeslice_us; } static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) { nvgpu_mutex_acquire(&f->tsg_inuse_mutex); f->tsg[tsg->tsgid].in_use = false; nvgpu_mutex_release(&f->tsg_inuse_mutex); } static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f) { struct tsg_gk20a *tsg = NULL; unsigned int tsgid; nvgpu_mutex_acquire(&f->tsg_inuse_mutex); for (tsgid = 0; tsgid < f->num_channels; tsgid++) { if (!f->tsg[tsgid].in_use) { f->tsg[tsgid].in_use = true; tsg = &f->tsg[tsgid]; break; } } nvgpu_mutex_release(&f->tsg_inuse_mutex); return tsg; } struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) { struct tsg_gk20a *tsg; int err; tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); if (!tsg) return NULL; tsg->g = g; tsg->num_active_channels = 0; nvgpu_ref_init(&tsg->refcount); tsg->vm = NULL; tsg->interleave_level = NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW; tsg->timeslice_us = 0; tsg->timeslice_timeout = 0; tsg->timeslice_scale = 0; tsg->runlist_id = ~0; tsg->tgid = pid; if (g->ops.fifo.init_eng_method_buffers) g->ops.fifo.init_eng_method_buffers(g, tsg); if (g->ops.fifo.tsg_open) { err = g->ops.fifo.tsg_open(tsg); if (err) { nvgpu_err(g, "tsg %d fifo open failed %d", tsg->tsgid, err); goto clean_up; } } nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); return tsg; clean_up: nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); return NULL; } void gk20a_tsg_release(struct nvgpu_ref *ref) { struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); struct gk20a *g = tsg->g; struct gk20a_event_id_data *event_id_data, *event_id_data_temp; if (g->ops.fifo.tsg_release) g->ops.fifo.tsg_release(tsg); if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) gr_gk20a_free_tsg_gr_ctx(tsg); if (g->ops.fifo.deinit_eng_method_buffers) g->ops.fifo.deinit_eng_method_buffers(g, tsg); if (tsg->vm) { nvgpu_vm_put(tsg->vm); tsg->vm = NULL; } /* unhook all events created on this TSG */ nvgpu_mutex_acquire(&tsg->event_id_list_lock); nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp, &tsg->event_id_list, gk20a_event_id_data, event_id_node) { nvgpu_list_del(&event_id_data->event_id_node); } nvgpu_mutex_release(&tsg->event_id_list_lock); release_used_tsg(&g->fifo, tsg); tsg->runlist_id = ~0; nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); } struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) { struct tsg_gk20a *tsg = NULL; if (gk20a_is_channel_marked_as_tsg(ch)) { struct gk20a *g = ch->g; struct fifo_gk20a *f = &g->fifo; tsg = &f->tsg[ch->tsgid]; } return tsg; }