diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/futex.c | 2 | ||||
| -rw-r--r-- | kernel/perf_event.c | 12 | ||||
| -rw-r--r-- | kernel/pid.c | 5 | ||||
| -rw-r--r-- | kernel/power/Kconfig | 6 | ||||
| -rw-r--r-- | kernel/sched.c | 20 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 14 | ||||
| -rw-r--r-- | kernel/signal.c | 4 | ||||
| -rw-r--r-- | kernel/time/posix-clock.c | 24 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 33 |
9 files changed, 60 insertions, 60 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index dfb924ffe65b..fe28dc282eae 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1886,7 +1886,7 @@ retry: | |||
| 1886 | restart->futex.val = val; | 1886 | restart->futex.val = val; |
| 1887 | restart->futex.time = abs_time->tv64; | 1887 | restart->futex.time = abs_time->tv64; |
| 1888 | restart->futex.bitset = bitset; | 1888 | restart->futex.bitset = bitset; |
| 1889 | restart->futex.flags = flags; | 1889 | restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
| 1890 | 1890 | ||
| 1891 | ret = -ERESTART_RESTARTBLOCK; | 1891 | ret = -ERESTART_RESTARTBLOCK; |
| 1892 | 1892 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 27960f114efd..8e81a9860a0d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | if (mode & PERF_CGROUP_SWIN) { | 366 | if (mode & PERF_CGROUP_SWIN) { |
| 367 | WARN_ON_ONCE(cpuctx->cgrp); | ||
| 367 | /* set cgrp before ctxsw in to | 368 | /* set cgrp before ctxsw in to |
| 368 | * allow event_filter_match() to not | 369 | * allow event_filter_match() to not |
| 369 | * have to pass task around | 370 | * have to pass task around |
| @@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2423 | if (!ctx || !ctx->nr_events) | 2424 | if (!ctx || !ctx->nr_events) |
| 2424 | goto out; | 2425 | goto out; |
| 2425 | 2426 | ||
| 2427 | /* | ||
| 2428 | * We must ctxsw out cgroup events to avoid conflict | ||
| 2429 | * when invoking perf_task_event_sched_in() later on | ||
| 2430 | * in this function. Otherwise we end up trying to | ||
| 2431 | * ctxswin cgroup events which are already scheduled | ||
| 2432 | * in. | ||
| 2433 | */ | ||
| 2434 | perf_cgroup_sched_out(current); | ||
| 2426 | task_ctx_sched_out(ctx, EVENT_ALL); | 2435 | task_ctx_sched_out(ctx, EVENT_ALL); |
| 2427 | 2436 | ||
| 2428 | raw_spin_lock(&ctx->lock); | 2437 | raw_spin_lock(&ctx->lock); |
| @@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2447 | 2456 | ||
| 2448 | raw_spin_unlock(&ctx->lock); | 2457 | raw_spin_unlock(&ctx->lock); |
| 2449 | 2458 | ||
| 2459 | /* | ||
| 2460 | * Also calls ctxswin for cgroup events, if any: | ||
| 2461 | */ | ||
| 2450 | perf_event_context_sched_in(ctx, ctx->task); | 2462 | perf_event_context_sched_in(ctx, ctx->task); |
| 2451 | out: | 2463 | out: |
| 2452 | local_irq_restore(flags); | 2464 | local_irq_restore(flags); |
diff --git a/kernel/pid.c b/kernel/pid.c index 02f221274265..57a8346a270e 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
| 217 | return -1; | 217 | return -1; |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | int next_pidmap(struct pid_namespace *pid_ns, int last) | 220 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) |
| 221 | { | 221 | { |
| 222 | int offset; | 222 | int offset; |
| 223 | struct pidmap *map, *end; | 223 | struct pidmap *map, *end; |
| 224 | 224 | ||
| 225 | if (last >= PID_MAX_LIMIT) | ||
| 226 | return -1; | ||
| 227 | |||
| 225 | offset = (last + 1) & BITS_PER_PAGE_MASK; | 228 | offset = (last + 1) & BITS_PER_PAGE_MASK; |
| 226 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; | 229 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
| 227 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; | 230 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 4603f08dc47b..6de9a8fc3417 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -18,9 +18,13 @@ config SUSPEND_FREEZER | |||
| 18 | 18 | ||
| 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. | 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. |
| 20 | 20 | ||
| 21 | config HIBERNATE_CALLBACKS | ||
| 22 | bool | ||
| 23 | |||
| 21 | config HIBERNATION | 24 | config HIBERNATION |
| 22 | bool "Hibernation (aka 'suspend to disk')" | 25 | bool "Hibernation (aka 'suspend to disk')" |
| 23 | depends on SWAP && ARCH_HIBERNATION_POSSIBLE | 26 | depends on SWAP && ARCH_HIBERNATION_POSSIBLE |
| 27 | select HIBERNATE_CALLBACKS | ||
| 24 | select LZO_COMPRESS | 28 | select LZO_COMPRESS |
| 25 | select LZO_DECOMPRESS | 29 | select LZO_DECOMPRESS |
| 26 | ---help--- | 30 | ---help--- |
| @@ -85,7 +89,7 @@ config PM_STD_PARTITION | |||
| 85 | 89 | ||
| 86 | config PM_SLEEP | 90 | config PM_SLEEP |
| 87 | def_bool y | 91 | def_bool y |
| 88 | depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE | 92 | depends on SUSPEND || HIBERNATE_CALLBACKS |
| 89 | 93 | ||
| 90 | config PM_SLEEP_SMP | 94 | config PM_SLEEP_SMP |
| 91 | def_bool y | 95 | def_bool y |
diff --git a/kernel/sched.c b/kernel/sched.c index 48013633d792..312f8b95c2d4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4111,20 +4111,20 @@ need_resched: | |||
| 4111 | try_to_wake_up_local(to_wakeup); | 4111 | try_to_wake_up_local(to_wakeup); |
| 4112 | } | 4112 | } |
| 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 4114 | |||
| 4115 | /* | ||
| 4116 | * If we are going to sleep and we have plugged IO queued, make | ||
| 4117 | * sure to submit it to avoid deadlocks. | ||
| 4118 | */ | ||
| 4119 | if (blk_needs_flush_plug(prev)) { | ||
| 4120 | raw_spin_unlock(&rq->lock); | ||
| 4121 | blk_schedule_flush_plug(prev); | ||
| 4122 | raw_spin_lock(&rq->lock); | ||
| 4123 | } | ||
| 4114 | } | 4124 | } |
| 4115 | switch_count = &prev->nvcsw; | 4125 | switch_count = &prev->nvcsw; |
| 4116 | } | 4126 | } |
| 4117 | 4127 | ||
| 4118 | /* | ||
| 4119 | * If we are going to sleep and we have plugged IO queued, make | ||
| 4120 | * sure to submit it to avoid deadlocks. | ||
| 4121 | */ | ||
| 4122 | if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) { | ||
| 4123 | raw_spin_unlock(&rq->lock); | ||
| 4124 | blk_flush_plug(prev); | ||
| 4125 | raw_spin_lock(&rq->lock); | ||
| 4126 | } | ||
| 4127 | |||
| 4128 | pre_schedule(rq, prev); | 4128 | pre_schedule(rq, prev); |
| 4129 | 4129 | ||
| 4130 | if (unlikely(!rq->nr_running)) | 4130 | if (unlikely(!rq->nr_running)) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..6fa833ab2cb8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
| 2104 | enum cpu_idle_type idle, int *all_pinned, | 2104 | enum cpu_idle_type idle, int *all_pinned, |
| 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) |
| 2106 | { | 2106 | { |
| 2107 | int loops = 0, pulled = 0, pinned = 0; | 2107 | int loops = 0, pulled = 0; |
| 2108 | long rem_load_move = max_load_move; | 2108 | long rem_load_move = max_load_move; |
| 2109 | struct task_struct *p, *n; | 2109 | struct task_struct *p, *n; |
| 2110 | 2110 | ||
| 2111 | if (max_load_move == 0) | 2111 | if (max_load_move == 0) |
| 2112 | goto out; | 2112 | goto out; |
| 2113 | 2113 | ||
| 2114 | pinned = 1; | ||
| 2115 | |||
| 2116 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | 2114 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { |
| 2117 | if (loops++ > sysctl_sched_nr_migrate) | 2115 | if (loops++ > sysctl_sched_nr_migrate) |
| 2118 | break; | 2116 | break; |
| 2119 | 2117 | ||
| 2120 | if ((p->se.load.weight >> 1) > rem_load_move || | 2118 | if ((p->se.load.weight >> 1) > rem_load_move || |
| 2121 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) | 2119 | !can_migrate_task(p, busiest, this_cpu, sd, idle, |
| 2120 | all_pinned)) | ||
| 2122 | continue; | 2121 | continue; |
| 2123 | 2122 | ||
| 2124 | pull_task(busiest, p, this_rq, this_cpu); | 2123 | pull_task(busiest, p, this_rq, this_cpu); |
| @@ -2153,9 +2152,6 @@ out: | |||
| 2153 | */ | 2152 | */ |
| 2154 | schedstat_add(sd, lb_gained[idle], pulled); | 2153 | schedstat_add(sd, lb_gained[idle], pulled); |
| 2155 | 2154 | ||
| 2156 | if (all_pinned) | ||
| 2157 | *all_pinned = pinned; | ||
| 2158 | |||
| 2159 | return max_load_move - rem_load_move; | 2155 | return max_load_move - rem_load_move; |
| 2160 | } | 2156 | } |
| 2161 | 2157 | ||
| @@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3127 | if (!sds.busiest || sds.busiest_nr_running == 0) | 3123 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 3128 | goto out_balanced; | 3124 | goto out_balanced; |
| 3129 | 3125 | ||
| 3126 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3127 | |||
| 3130 | /* | 3128 | /* |
| 3131 | * If the busiest group is imbalanced the below checks don't | 3129 | * If the busiest group is imbalanced the below checks don't |
| 3132 | * work because they assumes all things are equal, which typically | 3130 | * work because they assumes all things are equal, which typically |
| @@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3151 | * Don't pull any tasks if this group is already above the domain | 3149 | * Don't pull any tasks if this group is already above the domain |
| 3152 | * average load. | 3150 | * average load. |
| 3153 | */ | 3151 | */ |
| 3154 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3155 | if (sds.this_load >= sds.avg_load) | 3152 | if (sds.this_load >= sds.avg_load) |
| 3156 | goto out_balanced; | 3153 | goto out_balanced; |
| 3157 | 3154 | ||
| @@ -3340,6 +3337,7 @@ redo: | |||
| 3340 | * still unbalanced. ld_moved simply stays zero, so it is | 3337 | * still unbalanced. ld_moved simply stays zero, so it is |
| 3341 | * correctly treated as an imbalance. | 3338 | * correctly treated as an imbalance. |
| 3342 | */ | 3339 | */ |
| 3340 | all_pinned = 1; | ||
| 3343 | local_irq_save(flags); | 3341 | local_irq_save(flags); |
| 3344 | double_rq_lock(this_rq, busiest); | 3342 | double_rq_lock(this_rq, busiest); |
| 3345 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3343 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
diff --git a/kernel/signal.c b/kernel/signal.c index 29e233fd7a0f..7165af5f1b11 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2711,8 +2711,8 @@ out: | |||
| 2711 | /** | 2711 | /** |
| 2712 | * sys_rt_sigaction - alter an action taken by a process | 2712 | * sys_rt_sigaction - alter an action taken by a process |
| 2713 | * @sig: signal to be sent | 2713 | * @sig: signal to be sent |
| 2714 | * @act: the thread group ID of the thread | 2714 | * @act: new sigaction |
| 2715 | * @oact: the PID of the thread | 2715 | * @oact: used to save the previous sigaction |
| 2716 | * @sigsetsize: size of sigset_t type | 2716 | * @sigsetsize: size of sigset_t type |
| 2717 | */ | 2717 | */ |
| 2718 | SYSCALL_DEFINE4(rt_sigaction, int, sig, | 2718 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index 25028dd4fa18..c340ca658f37 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | */ | 19 | */ |
| 20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
| 21 | #include <linux/file.h> | 21 | #include <linux/file.h> |
| 22 | #include <linux/mutex.h> | ||
| 23 | #include <linux/posix-clock.h> | 22 | #include <linux/posix-clock.h> |
| 24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 25 | #include <linux/syscalls.h> | 24 | #include <linux/syscalls.h> |
| @@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp) | |||
| 34 | { | 33 | { |
| 35 | struct posix_clock *clk = fp->private_data; | 34 | struct posix_clock *clk = fp->private_data; |
| 36 | 35 | ||
| 37 | mutex_lock(&clk->mutex); | 36 | down_read(&clk->rwsem); |
| 38 | 37 | ||
| 39 | if (!clk->zombie) | 38 | if (!clk->zombie) |
| 40 | return clk; | 39 | return clk; |
| 41 | 40 | ||
| 42 | mutex_unlock(&clk->mutex); | 41 | up_read(&clk->rwsem); |
| 43 | 42 | ||
| 44 | return NULL; | 43 | return NULL; |
| 45 | } | 44 | } |
| 46 | 45 | ||
| 47 | static void put_posix_clock(struct posix_clock *clk) | 46 | static void put_posix_clock(struct posix_clock *clk) |
| 48 | { | 47 | { |
| 49 | mutex_unlock(&clk->mutex); | 48 | up_read(&clk->rwsem); |
| 50 | } | 49 | } |
| 51 | 50 | ||
| 52 | static ssize_t posix_clock_read(struct file *fp, char __user *buf, | 51 | static ssize_t posix_clock_read(struct file *fp, char __user *buf, |
| @@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) | |||
| 156 | struct posix_clock *clk = | 155 | struct posix_clock *clk = |
| 157 | container_of(inode->i_cdev, struct posix_clock, cdev); | 156 | container_of(inode->i_cdev, struct posix_clock, cdev); |
| 158 | 157 | ||
| 159 | mutex_lock(&clk->mutex); | 158 | down_read(&clk->rwsem); |
| 160 | 159 | ||
| 161 | if (clk->zombie) { | 160 | if (clk->zombie) { |
| 162 | err = -ENODEV; | 161 | err = -ENODEV; |
| @@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) | |||
| 172 | fp->private_data = clk; | 171 | fp->private_data = clk; |
| 173 | } | 172 | } |
| 174 | out: | 173 | out: |
| 175 | mutex_unlock(&clk->mutex); | 174 | up_read(&clk->rwsem); |
| 176 | return err; | 175 | return err; |
| 177 | } | 176 | } |
| 178 | 177 | ||
| @@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid) | |||
| 211 | int err; | 210 | int err; |
| 212 | 211 | ||
| 213 | kref_init(&clk->kref); | 212 | kref_init(&clk->kref); |
| 214 | mutex_init(&clk->mutex); | 213 | init_rwsem(&clk->rwsem); |
| 215 | 214 | ||
| 216 | cdev_init(&clk->cdev, &posix_clock_file_operations); | 215 | cdev_init(&clk->cdev, &posix_clock_file_operations); |
| 217 | clk->cdev.owner = clk->ops.owner; | 216 | clk->cdev.owner = clk->ops.owner; |
| 218 | err = cdev_add(&clk->cdev, devid, 1); | 217 | err = cdev_add(&clk->cdev, devid, 1); |
| 219 | if (err) | ||
| 220 | goto no_cdev; | ||
| 221 | 218 | ||
| 222 | return err; | 219 | return err; |
| 223 | no_cdev: | ||
| 224 | mutex_destroy(&clk->mutex); | ||
| 225 | return err; | ||
| 226 | } | 220 | } |
| 227 | EXPORT_SYMBOL_GPL(posix_clock_register); | 221 | EXPORT_SYMBOL_GPL(posix_clock_register); |
| 228 | 222 | ||
| 229 | static void delete_clock(struct kref *kref) | 223 | static void delete_clock(struct kref *kref) |
| 230 | { | 224 | { |
| 231 | struct posix_clock *clk = container_of(kref, struct posix_clock, kref); | 225 | struct posix_clock *clk = container_of(kref, struct posix_clock, kref); |
| 232 | mutex_destroy(&clk->mutex); | 226 | |
| 233 | if (clk->release) | 227 | if (clk->release) |
| 234 | clk->release(clk); | 228 | clk->release(clk); |
| 235 | } | 229 | } |
| @@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk) | |||
| 238 | { | 232 | { |
| 239 | cdev_del(&clk->cdev); | 233 | cdev_del(&clk->cdev); |
| 240 | 234 | ||
| 241 | mutex_lock(&clk->mutex); | 235 | down_write(&clk->rwsem); |
| 242 | clk->zombie = true; | 236 | clk->zombie = true; |
| 243 | mutex_unlock(&clk->mutex); | 237 | up_write(&clk->rwsem); |
| 244 | 238 | ||
| 245 | kref_put(&clk->kref, delete_clock); | 239 | kref_put(&clk->kref, delete_clock); |
| 246 | } | 240 | } |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7aa40f8e182d..6957aa298dfa 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) | |||
| 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); |
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) | 853 | static void blk_add_trace_unplug(void *ignore, struct request_queue *q, |
| 854 | unsigned int depth, bool explicit) | ||
| 854 | { | 855 | { |
| 855 | struct blk_trace *bt = q->blk_trace; | 856 | struct blk_trace *bt = q->blk_trace; |
| 856 | 857 | ||
| 857 | if (bt) { | 858 | if (bt) { |
| 858 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | 859 | __be64 rpdu = cpu_to_be64(depth); |
| 859 | __be64 rpdu = cpu_to_be64(pdu); | 860 | u32 what; |
| 860 | 861 | ||
| 861 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | 862 | if (explicit) |
| 862 | sizeof(rpdu), &rpdu); | 863 | what = BLK_TA_UNPLUG_IO; |
| 863 | } | 864 | else |
| 864 | } | 865 | what = BLK_TA_UNPLUG_TIMER; |
| 865 | |||
| 866 | static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q) | ||
| 867 | { | ||
| 868 | struct blk_trace *bt = q->blk_trace; | ||
| 869 | |||
| 870 | if (bt) { | ||
| 871 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
| 872 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 873 | 866 | ||
| 874 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | 867 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); |
| 875 | sizeof(rpdu), &rpdu); | ||
| 876 | } | 868 | } |
| 877 | } | 869 | } |
| 878 | 870 | ||
| @@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void) | |||
| 1015 | WARN_ON(ret); | 1007 | WARN_ON(ret); |
| 1016 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); | 1008 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); |
| 1017 | WARN_ON(ret); | 1009 | WARN_ON(ret); |
| 1018 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | 1010 | ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); |
| 1019 | WARN_ON(ret); | ||
| 1020 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | ||
| 1021 | WARN_ON(ret); | 1011 | WARN_ON(ret); |
| 1022 | ret = register_trace_block_split(blk_add_trace_split, NULL); | 1012 | ret = register_trace_block_split(blk_add_trace_split, NULL); |
| 1023 | WARN_ON(ret); | 1013 | WARN_ON(ret); |
| @@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void) | |||
| 1032 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); | 1022 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
| 1033 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); | 1023 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
| 1034 | unregister_trace_block_split(blk_add_trace_split, NULL); | 1024 | unregister_trace_block_split(blk_add_trace_split, NULL); |
| 1035 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | 1025 | unregister_trace_block_unplug(blk_add_trace_unplug, NULL); |
| 1036 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | ||
| 1037 | unregister_trace_block_plug(blk_add_trace_plug, NULL); | 1026 | unregister_trace_block_plug(blk_add_trace_plug, NULL); |
| 1038 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); | 1027 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); |
| 1039 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); | 1028 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); |
