aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/conmakehash.c
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-07-31 11:49:11 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-08-01 04:24:35 -0400
commit7c958e32649e0c35801762878fb0b6da8c55a515 (patch)
tree610b1e685b5f634ca591865cc9f5ce22f1e3e6c1 /scripts/conmakehash.c
parentfef246672b009cf3f7a74e2fc9a76932ef2eeed2 (diff)
block: Add a wrapper for setting minimum request size without a queue
Introduce blk_limits_io_min() and make blk_queue_io_min() call it. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'scripts/conmakehash.c')
0 files changed, 0 insertions, 0 deletions
static inline struct litmus_lock* get_lock(struct od_table_entry* entry) { BUG_ON(!is_lock(entry)); return (struct litmus_lock*) entry->obj->obj; } static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) { struct litmus_lock* lock; int err; err = litmus->allocate_lock(&lock, type, arg); if (err == 0) *obj_ref = lock; return err; } static int open_generic_lock(struct od_table_entry* entry, void* __user arg) { struct litmus_lock* lock = get_lock(entry); if (lock->ops->open) return lock->ops->open(lock, arg); else return 0; /* default: any task can open it */ } static int close_generic_lock(struct od_table_entry* entry) { struct litmus_lock* lock = get_lock(entry); if (lock->ops->close) return lock->ops->close(lock); else return 0; /* default: closing succeeds */ } static void destroy_generic_lock(obj_type_t type, void* obj) { struct litmus_lock* lock = (struct litmus_lock*) obj; lock->ops->deallocate(lock); } asmlinkage long sys_litmus_lock(int lock_od) { long err = -EINVAL; struct od_table_entry* entry; struct litmus_lock* l; TS_SYSCALL_IN_START; TS_SYSCALL_IN_END; TS_LOCK_START; entry = get_entry_for_od(lock_od); if (entry && is_lock(entry)) { l = get_lock(entry); TRACE_CUR("attempts to lock 0x%p\n", l); err = l->ops->lock(l); } /* Note: task my have been suspended or preempted in between! Take * this into account when computing overheads. */ TS_LOCK_END; TS_SYSCALL_OUT_START; return err; } asmlinkage long sys_litmus_unlock(int lock_od) { long err = -EINVAL; struct od_table_entry* entry; struct litmus_lock* l; TS_SYSCALL_IN_START; TS_SYSCALL_IN_END; TS_UNLOCK_START; entry = get_entry_for_od(lock_od); if (entry && is_lock(entry)) { l = get_lock(entry); TRACE_CUR("attempts to unlock 0x%p\n", l); err = l->ops->unlock(l); } /* Note: task my have been preempted in between! Take this into * account when computing overheads. */ TS_UNLOCK_END; TS_SYSCALL_OUT_START; return err; } struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) { wait_queue_entry_t* q; struct task_struct* t = NULL; if (waitqueue_active(wq)) { q = list_entry(wq->head.next, wait_queue_entry_t, entry); t = (struct task_struct*) q->private; __remove_wait_queue(wq, q); } return(t); } unsigned int __add_wait_queue_prio_exclusive( wait_queue_head_t* head, prio_wait_queue_t *new) { struct list_head *pos; unsigned int passed = 0; new->wq.flags |= WQ_FLAG_EXCLUSIVE; /* find a spot where the new entry is less than the next */ list_for_each(pos, &head->head) { prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t, wq.entry); if (unlikely(lt_before(new->priority, queued->priority) || (new->priority == queued->priority && new->tie_breaker < queued->tie_breaker))) { /* pos is not less than new, thus insert here */ __list_add(&new->wq.entry, pos->prev, pos); goto out; } passed++; } /* if we get to this point either the list is empty or every entry * queued element is less than new. * Let's add new to the end. */ list_add_tail(&new->wq.entry, &head->head); out: return passed; } #else struct fdso_ops generic_lock_ops = {}; asmlinkage long sys_litmus_lock(int sem_od) { return -ENOSYS; } asmlinkage long sys_litmus_unlock(int sem_od) { return -ENOSYS; } #endif