aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/mm/pageattr.c8
-rw-r--r--block/ll_rw_blk.c5
-rw-r--r--fs/aio.c4
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/workqueue.c4
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/link_watch.c5
7 files changed, 19 insertions, 21 deletions
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 92c3d9f0e731..0887b34bc59b 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -209,19 +209,19 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
209} 209}
210 210
211void global_flush_tlb(void) 211void global_flush_tlb(void)
212{ 212{
213 LIST_HEAD(l); 213 struct list_head l;
214 struct page *pg, *next; 214 struct page *pg, *next;
215 215
216 BUG_ON(irqs_disabled()); 216 BUG_ON(irqs_disabled());
217 217
218 spin_lock_irq(&cpa_lock); 218 spin_lock_irq(&cpa_lock);
219 list_splice_init(&df_list, &l); 219 list_replace_init(&df_list, &l);
220 spin_unlock_irq(&cpa_lock); 220 spin_unlock_irq(&cpa_lock);
221 flush_map(); 221 flush_map();
222 list_for_each_entry_safe(pg, next, &l, lru) 222 list_for_each_entry_safe(pg, next, &l, lru)
223 __free_page(pg); 223 __free_page(pg);
224} 224}
225 225
226#ifdef CONFIG_DEBUG_PAGEALLOC 226#ifdef CONFIG_DEBUG_PAGEALLOC
227void kernel_map_pages(struct page *page, int numpages, int enable) 227void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 7eb36c53f4b7..465b54312c59 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3359,12 +3359,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
3359 */ 3359 */
3360static void blk_done_softirq(struct softirq_action *h) 3360static void blk_done_softirq(struct softirq_action *h)
3361{ 3361{
3362 struct list_head *cpu_list; 3362 struct list_head *cpu_list, local_list;
3363 LIST_HEAD(local_list);
3364 3363
3365 local_irq_disable(); 3364 local_irq_disable();
3366 cpu_list = &__get_cpu_var(blk_cpu_done); 3365 cpu_list = &__get_cpu_var(blk_cpu_done);
3367 list_splice_init(cpu_list, &local_list); 3366 list_replace_init(cpu_list, &local_list);
3368 local_irq_enable(); 3367 local_irq_enable();
3369 3368
3370 while (!list_empty(&local_list)) { 3369 while (!list_empty(&local_list)) {
diff --git a/fs/aio.c b/fs/aio.c
index e41e932ba489..8c34a62df7d7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -777,11 +777,11 @@ out:
777static int __aio_run_iocbs(struct kioctx *ctx) 777static int __aio_run_iocbs(struct kioctx *ctx)
778{ 778{
779 struct kiocb *iocb; 779 struct kiocb *iocb;
780 LIST_HEAD(run_list); 780 struct list_head run_list;
781 781
782 assert_spin_locked(&ctx->ctx_lock); 782 assert_spin_locked(&ctx->ctx_lock);
783 783
784 list_splice_init(&ctx->run_list, &run_list); 784 list_replace_init(&ctx->run_list, &run_list);
785 while (!list_empty(&run_list)) { 785 while (!list_empty(&run_list)) {
786 iocb = list_entry(run_list.next, struct kiocb, 786 iocb = list_entry(run_list.next, struct kiocb,
787 ki_run_list); 787 ki_run_list);
diff --git a/kernel/timer.c b/kernel/timer.c
index 9e49deed468c..3bf0e9ed2dbe 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -419,10 +419,10 @@ static inline void __run_timers(tvec_base_t *base)
419 419
420 spin_lock_irq(&base->lock); 420 spin_lock_irq(&base->lock);
421 while (time_after_eq(jiffies, base->timer_jiffies)) { 421 while (time_after_eq(jiffies, base->timer_jiffies)) {
422 struct list_head work_list = LIST_HEAD_INIT(work_list); 422 struct list_head work_list;
423 struct list_head *head = &work_list; 423 struct list_head *head = &work_list;
424 int index = base->timer_jiffies & TVR_MASK; 424 int index = base->timer_jiffies & TVR_MASK;
425 425
426 /* 426 /*
427 * Cascade timers: 427 * Cascade timers:
428 */ 428 */
@@ -431,8 +431,8 @@ static inline void __run_timers(tvec_base_t *base)
431 (!cascade(base, &base->tv3, INDEX(1))) && 431 (!cascade(base, &base->tv3, INDEX(1))) &&
432 !cascade(base, &base->tv4, INDEX(2))) 432 !cascade(base, &base->tv4, INDEX(2)))
433 cascade(base, &base->tv5, INDEX(3)); 433 cascade(base, &base->tv5, INDEX(3));
434 ++base->timer_jiffies; 434 ++base->timer_jiffies;
435 list_splice_init(base->tv1.vec + index, &work_list); 435 list_replace_init(base->tv1.vec + index, &work_list);
436 while (!list_empty(head)) { 436 while (!list_empty(head)) {
437 void (*fn)(unsigned long); 437 void (*fn)(unsigned long);
438 unsigned long data; 438 unsigned long data;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 880fb415a8f6..740c5abceb07 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -531,11 +531,11 @@ int current_is_keventd(void)
531static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 531static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
532{ 532{
533 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 533 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
534 LIST_HEAD(list); 534 struct list_head list;
535 struct work_struct *work; 535 struct work_struct *work;
536 536
537 spin_lock_irq(&cwq->lock); 537 spin_lock_irq(&cwq->lock);
538 list_splice_init(&cwq->worklist, &list); 538 list_replace_init(&cwq->worklist, &list);
539 539
540 while (!list_empty(&list)) { 540 while (!list_empty(&list)) {
541 printk("Taking work for %s\n", wq->name); 541 printk("Taking work for %s\n", wq->name);
diff --git a/net/core/dev.c b/net/core/dev.c
index ab39fe17cb58..195a5e96b2d1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2980,7 +2980,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
2980static DEFINE_MUTEX(net_todo_run_mutex); 2980static DEFINE_MUTEX(net_todo_run_mutex);
2981void netdev_run_todo(void) 2981void netdev_run_todo(void)
2982{ 2982{
2983 struct list_head list = LIST_HEAD_INIT(list); 2983 struct list_head list;
2984 2984
2985 /* Need to guard against multiple cpu's getting out of order. */ 2985 /* Need to guard against multiple cpu's getting out of order. */
2986 mutex_lock(&net_todo_run_mutex); 2986 mutex_lock(&net_todo_run_mutex);
@@ -2995,9 +2995,9 @@ void netdev_run_todo(void)
2995 2995
2996 /* Snapshot list, allow later requests */ 2996 /* Snapshot list, allow later requests */
2997 spin_lock(&net_todo_list_lock); 2997 spin_lock(&net_todo_list_lock);
2998 list_splice_init(&net_todo_list, &list); 2998 list_replace_init(&net_todo_list, &list);
2999 spin_unlock(&net_todo_list_lock); 2999 spin_unlock(&net_todo_list_lock);
3000 3000
3001 while (!list_empty(&list)) { 3001 while (!list_empty(&list)) {
3002 struct net_device *dev 3002 struct net_device *dev
3003 = list_entry(list.next, struct net_device, todo_list); 3003 = list_entry(list.next, struct net_device, todo_list);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 646937cc2d84..0f37266411b5 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -91,11 +91,10 @@ static void rfc2863_policy(struct net_device *dev)
91/* Must be called with the rtnl semaphore held */ 91/* Must be called with the rtnl semaphore held */
92void linkwatch_run_queue(void) 92void linkwatch_run_queue(void)
93{ 93{
94 LIST_HEAD(head); 94 struct list_head head, *n, *next;
95 struct list_head *n, *next;
96 95
97 spin_lock_irq(&lweventlist_lock); 96 spin_lock_irq(&lweventlist_lock);
98 list_splice_init(&lweventlist, &head); 97 list_replace_init(&lweventlist, &head);
99 spin_unlock_irq(&lweventlist_lock); 98 spin_unlock_irq(&lweventlist_lock);
100 99
101 list_for_each_safe(n, next, &head) { 100 list_for_each_safe(n, next, &head) {