summaryrefslogtreecommitdiffstats
path: root/fs/jfs
ModeNameSize
-rw-r--r--Kconfig1597logstatsplainblame
-rw-r--r--Makefile440logstatsplainblame
-rw-r--r--acl.c4665logstatsplainblame
-rw-r--r--endian24.h1516logstatsplainblame
-rw-r--r--file.c3983logstatsplainblame
-rw-r--r--inode.c9694logstatsplainblame
-rw-r--r--ioctl.c3260logstatsplainblame
-rw-r--r--jfs_acl.h1232logstatsplainblame
-rw-r--r--jfs_btree.h4099logstatsplainblame
-rw-r--r--jfs_debug.c2823logstatsplainblame
-rw-r--r--jfs_debug.h3433logstatsplainblame
-rw-r--r--jfs_dinode.h5724logstatsplainblame
-rw-r--r--jfs_dmap.c111577logstatsplainblame
-rw-r--r--jfs_dmap.h11418logstatsplainblame
-rw-r--r--jfs_dtree.c102632logstatsplainblame
-rw-r--r--jfs_dtree.h6754logstatsplainblame
-rw-r--r--jfs_extent.c17706logstatsplainblame
-rw-r--r--jfs_extent.h1317logstatsplainblame
-rw-r--r--jfs_filsys.h9039logstatsplainblame
-rw-r--r--jfs_imap.c86352logstatsplainblame
-rw-r--r--jfs_imap.h6598logstatsplainblame
-rw-r--r--jfs_incore.h7442logstatsplainblame
-rw-r--r--jfs_inode.c4289logstatsplainblame
-rw-r--r--jfs_inode.h2598logstatsplainblame
-rw-r--r--jfs_lock.h1574logstatsplainblame
-rw-r--r--jfs_logmgr.c60798logstatsplainblame
-rw-r--r--jfs_logmgr.h15076logstatsplainblame
-rw-r--r--jfs_metapage.c20091logstatsplainblame
-rw-r--r--jfs_metapage.h4320logstatsplainblame
-rw-r--r--jfs_mount.c12996logstatsplainblame
-rw-r--r--jfs_superblock.h4211logstatsplainblame
-rw-r--r--jfs_txnmgr.c77319logstatsplainblame
-rw-r--r--jfs_txnmgr.h8428logstatsplainblame
-rw-r--r--jfs_types.h4092logstatsplainblame
-rw-r--r--jfs_umount.c4074logstatsplainblame
-rw-r--r--jfs_unicode.c3348logstatsplainblame
-rw-r--r--jfs_unicode.h3824logstatsplainblame
-rw-r--r--jfs_uniupr.c7707logstatsplainblame
-rw-r--r--jfs_xattr.h2656logstatsplainblame
-rw-r--r--jfs_xtree.c94348logstatsplainblame
-rw-r--r--jfs_xtree.h4083logstatsplainblame
-rw-r--r--namei.c38047logstatsplainblame
-rw-r--r--resize.c15088logstatsplainblame
-rw-r--r--super.c21339logstatsplainblame
-rw-r--r--symlink.c1657logstatsplainblame
-rw-r--r--xattr.c28002logstatsplainblame
opt">; int cpu; pending = local_softirq_pending(); account_system_vtime(current); __local_bh_disable((unsigned long)__builtin_return_address(0)); lockdep_softirq_enter(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); local_irq_enable(); h = softirq_vec; do { if (pending & 1) { int prev_count = preempt_count(); trace_softirq_entry(h, softirq_vec); h->action(h); trace_softirq_exit(h, softirq_vec); if (unlikely(prev_count != preempt_count())) { printk(KERN_ERR "huh, entered softirq %td %s %p" "with preempt_count %08x," " exited with %08x?\n", h - softirq_vec, softirq_to_name[h - softirq_vec], h->action, prev_count, preempt_count()); preempt_count() = prev_count; } rcu_bh_qsctr_inc(cpu); } h++; pending >>= 1; } while (pending); local_irq_disable(); pending = local_softirq_pending(); if (pending && --max_restart) goto restart; if (pending) wakeup_softirqd(); lockdep_softirq_exit(); account_system_vtime(current); _local_bh_enable(); } #ifndef __ARCH_HAS_DO_SOFTIRQ asmlinkage void do_softirq(void) { __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); if (pending) __do_softirq(); local_irq_restore(flags); } #endif /* * Enter an interrupt context. */ void irq_enter(void) { int cpu = smp_processor_id(); rcu_irq_enter(); if (idle_cpu(cpu) && !in_interrupt()) { __irq_enter(); tick_check_idle(cpu); } else __irq_enter(); } #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED # define invoke_softirq() __do_softirq() #else # define invoke_softirq() do_softirq() #endif /* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ rcu_irq_exit(); if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) tick_nohz_stop_sched_tick(0); #endif preempt_enable_no_resched(); } /* * This function must run with irqs disabled! */ inline void raise_softirq_irqoff(unsigned int nr) { __raise_softirq_irqoff(nr); /* * If we're in an interrupt or softirq, we're done * (this also catches softirq-disabled code). We will * actually run the softirq once we return from * the irq or softirq. * * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ if (!in_interrupt()) wakeup_softirqd(); } void raise_softirq(unsigned int nr) { unsigned long flags; local_irq_save(flags); raise_softirq_irqoff(nr); local_irq_restore(flags); } void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; } /* Tasklets */ struct tasklet_head { struct tasklet_struct *head; struct tasklet_struct **tail; }; static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); void __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); t->next = NULL; *__get_cpu_var(tasklet_vec).tail = t; __get_cpu_var(tasklet_vec).tail = &(t->next); raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_schedule); void __tasklet_hi_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); t->next = NULL; *__get_cpu_var(tasklet_hi_vec).tail = t; __get_cpu_var(tasklet_hi_vec).tail = &(t->next); raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_hi_schedule); static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __get_cpu_var(tasklet_vec).head; __get_cpu_var(tasklet_vec).head = NULL; __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__get_cpu_var(tasklet_vec).tail = t; __get_cpu_var(tasklet_vec).tail = &(t->next); __raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } } static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __get_cpu_var(tasklet_hi_vec).head; __get_cpu_var(tasklet_hi_vec).head = NULL; __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__get_cpu_var(tasklet_hi_vec).tail = t; __get_cpu_var(tasklet_hi_vec).tail = &(t->next); __raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } } void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { t->next = NULL; t->state = 0; atomic_set(&t->count, 0); t->func = func; t->data = data; } EXPORT_SYMBOL(tasklet_init); void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) printk("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do yield(); while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } EXPORT_SYMBOL(tasklet_kill); DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); EXPORT_PER_CPU_SYMBOL(softirq_work_list); static void __local_trigger(struct call_single_data *cp, int softirq) { struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); list_add_tail(&cp->list, head); /* Trigger the softirq only if the list was previously empty. */ if (head->next == &cp->list) raise_softirq_irqoff(softirq); } #ifdef CONFIG_USE_GENERIC_SMP_HELPERS static void remote_softirq_receive(void *data) { struct call_single_data *cp = data; unsigned long flags; int softirq; softirq = cp->priv; local_irq_save(flags); __local_trigger(cp, softirq); local_irq_restore(flags); } static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) { if (cpu_online(cpu)) { cp->func = remote_softirq_receive; cp->info = cp; cp->flags = 0; cp->priv = softirq; __smp_call_function_single(cpu, cp, 0); return 0; } return 1; } #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) { return 1; } #endif /** * __send_remote_softirq - try to schedule softirq work on a remote cpu * @cp: private SMP call function data area * @cpu: the remote cpu * @this_cpu: the currently executing cpu * @softirq: the softirq for the work * * Attempt to schedule softirq work on a remote cpu. If this cannot be * done, the work is instead queued up on the local cpu. * * Interrupts must be disabled. */ void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) { if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) __local_trigger(cp, softirq); } EXPORT_SYMBOL(__send_remote_softirq); /** * send_remote_softirq - try to schedule softirq work on a remote cpu * @cp: private SMP call function data area * @cpu: the remote cpu * @softirq: the softirq for the work * * Like __send_remote_softirq except that disabling interrupts and * computing the current cpu is done for the caller. */ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) { unsigned long flags; int this_cpu; local_irq_save(flags); this_cpu = smp_processor_id(); __send_remote_softirq(cp, cpu, this_cpu, softirq); local_irq_restore(flags); } EXPORT_SYMBOL(send_remote_softirq); static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; int i; local_irq_disable(); for (i = 0; i < NR_SOFTIRQS; i++) { struct list_head *head = &per_cpu(softirq_work_list[i], cpu); struct list_head *local_head; if (list_empty(head)) continue; local_head = &__get_cpu_var(softirq_work_list[i]); list_splice_init(head, local_head); raise_softirq_irqoff(i); } local_irq_enable(); } return NOTIFY_OK; } static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { .notifier_call = remote_softirq_cpu_notify, }; void __init softirq_init(void) { int cpu; for_each_possible_cpu(cpu) { int i; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; for (i = 0; i < NR_SOFTIRQS; i++) INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); } register_hotcpu_notifier(&remote_softirq_cpu_notifier); open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); } static int ksoftirqd(void * __bind_cpu) { set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { preempt_enable_no_resched(); schedule(); preempt_disable(); } __set_current_state(TASK_RUNNING); while (local_softirq_pending()) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ if (cpu_is_offline((long)__bind_cpu)) goto wait_to_die; do_softirq(); preempt_enable_no_resched(); cond_resched(); preempt_disable(); rcu_qsctr_inc((long)__bind_cpu); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; wait_to_die: preempt_enable(); /* Wait for kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; } #ifdef CONFIG_HOTPLUG_CPU /* * tasklet_kill_immediate is called to remove a tasklet which can already be * scheduled for execution on @cpu. * * Unlike tasklet_kill, this function removes the tasklet * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. * * When this function is called, @cpu must be in the CPU_DEAD state. */