diff options
| -rw-r--r-- | arch/s390/Kconfig | 3 | ||||
| -rw-r--r-- | arch/s390/appldata/appldata_base.c | 8 | ||||
| -rw-r--r-- | arch/s390/defconfig | 11 | ||||
| -rw-r--r-- | arch/s390/kernel/dis.c | 2 | ||||
| -rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
| -rw-r--r-- | arch/s390/mm/init.c | 49 | ||||
| -rw-r--r-- | arch/s390/mm/vmem.c | 18 | ||||
| -rw-r--r-- | drivers/s390/block/dasd.c | 28 | ||||
| -rw-r--r-- | drivers/s390/char/raw3270.c | 9 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_config.c | 2 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_vt220.c | 27 | ||||
| -rw-r--r-- | drivers/s390/char/tape.h | 3 | ||||
| -rw-r--r-- | drivers/s390/char/tape_block.c | 4 | ||||
| -rw-r--r-- | drivers/s390/char/tape_core.c | 16 | ||||
| -rw-r--r-- | include/asm-s390/types.h | 6 |
15 files changed, 101 insertions, 87 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1d035082e78e..93acb3c1859d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -308,6 +308,9 @@ config ARCH_SPARSEMEM_ENABLE | |||
| 308 | config ARCH_SPARSEMEM_DEFAULT | 308 | config ARCH_SPARSEMEM_DEFAULT |
| 309 | def_bool y | 309 | def_bool y |
| 310 | 310 | ||
| 311 | config ARCH_SELECT_MEMORY_MODEL | ||
| 312 | def_bool y | ||
| 313 | |||
| 311 | source "mm/Kconfig" | 314 | source "mm/Kconfig" |
| 312 | 315 | ||
| 313 | comment "I/O subsystem configuration" | 316 | comment "I/O subsystem configuration" |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 655d52543e2d..ad40729bec3d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
| @@ -130,6 +130,7 @@ static void appldata_work_fn(struct work_struct *work) | |||
| 130 | 130 | ||
| 131 | P_DEBUG(" -= Work Queue =-\n"); | 131 | P_DEBUG(" -= Work Queue =-\n"); |
| 132 | i = 0; | 132 | i = 0; |
| 133 | get_online_cpus(); | ||
| 133 | spin_lock(&appldata_ops_lock); | 134 | spin_lock(&appldata_ops_lock); |
| 134 | list_for_each(lh, &appldata_ops_list) { | 135 | list_for_each(lh, &appldata_ops_list) { |
| 135 | ops = list_entry(lh, struct appldata_ops, list); | 136 | ops = list_entry(lh, struct appldata_ops, list); |
| @@ -140,6 +141,7 @@ static void appldata_work_fn(struct work_struct *work) | |||
| 140 | } | 141 | } |
| 141 | } | 142 | } |
| 142 | spin_unlock(&appldata_ops_lock); | 143 | spin_unlock(&appldata_ops_lock); |
| 144 | put_online_cpus(); | ||
| 143 | } | 145 | } |
| 144 | 146 | ||
| 145 | /* | 147 | /* |
| @@ -266,12 +268,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, | |||
| 266 | len = *lenp; | 268 | len = *lenp; |
| 267 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) | 269 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) |
| 268 | return -EFAULT; | 270 | return -EFAULT; |
| 271 | get_online_cpus(); | ||
| 269 | spin_lock(&appldata_timer_lock); | 272 | spin_lock(&appldata_timer_lock); |
| 270 | if (buf[0] == '1') | 273 | if (buf[0] == '1') |
| 271 | __appldata_vtimer_setup(APPLDATA_ADD_TIMER); | 274 | __appldata_vtimer_setup(APPLDATA_ADD_TIMER); |
| 272 | else if (buf[0] == '0') | 275 | else if (buf[0] == '0') |
| 273 | __appldata_vtimer_setup(APPLDATA_DEL_TIMER); | 276 | __appldata_vtimer_setup(APPLDATA_DEL_TIMER); |
| 274 | spin_unlock(&appldata_timer_lock); | 277 | spin_unlock(&appldata_timer_lock); |
| 278 | put_online_cpus(); | ||
| 275 | out: | 279 | out: |
| 276 | *lenp = len; | 280 | *lenp = len; |
| 277 | *ppos += len; | 281 | *ppos += len; |
| @@ -314,10 +318,12 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, | |||
| 314 | return -EINVAL; | 318 | return -EINVAL; |
| 315 | } | 319 | } |
| 316 | 320 | ||
| 321 | get_online_cpus(); | ||
| 317 | spin_lock(&appldata_timer_lock); | 322 | spin_lock(&appldata_timer_lock); |
| 318 | appldata_interval = interval; | 323 | appldata_interval = interval; |
| 319 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | 324 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); |
| 320 | spin_unlock(&appldata_timer_lock); | 325 | spin_unlock(&appldata_timer_lock); |
| 326 | put_online_cpus(); | ||
| 321 | 327 | ||
| 322 | P_INFO("Monitoring CPU interval set to %u milliseconds.\n", | 328 | P_INFO("Monitoring CPU interval set to %u milliseconds.\n", |
| 323 | interval); | 329 | interval); |
| @@ -556,8 +562,10 @@ static int __init appldata_init(void) | |||
| 556 | return -ENOMEM; | 562 | return -ENOMEM; |
| 557 | } | 563 | } |
| 558 | 564 | ||
| 565 | get_online_cpus(); | ||
| 559 | for_each_online_cpu(i) | 566 | for_each_online_cpu(i) |
| 560 | appldata_online_cpu(i); | 567 | appldata_online_cpu(i); |
| 568 | put_online_cpus(); | ||
| 561 | 569 | ||
| 562 | /* Register cpu hotplug notifier */ | 570 | /* Register cpu hotplug notifier */ |
| 563 | register_hotcpu_notifier(&appldata_nb); | 571 | register_hotcpu_notifier(&appldata_nb); |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index aa341d0ea1e6..c5cdb975d590 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
| 3 | # Linux kernel version: 2.6.25 | 3 | # Linux kernel version: 2.6.26-rc4 |
| 4 | # Wed Apr 30 11:07:45 2008 | 4 | # Fri May 30 09:49:33 2008 |
| 5 | # | 5 | # |
| 6 | CONFIG_SCHED_MC=y | 6 | CONFIG_SCHED_MC=y |
| 7 | CONFIG_MMU=y | 7 | CONFIG_MMU=y |
| @@ -103,6 +103,7 @@ CONFIG_RT_MUTEXES=y | |||
| 103 | # CONFIG_TINY_SHMEM is not set | 103 | # CONFIG_TINY_SHMEM is not set |
| 104 | CONFIG_BASE_SMALL=0 | 104 | CONFIG_BASE_SMALL=0 |
| 105 | CONFIG_MODULES=y | 105 | CONFIG_MODULES=y |
| 106 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
| 106 | CONFIG_MODULE_UNLOAD=y | 107 | CONFIG_MODULE_UNLOAD=y |
| 107 | # CONFIG_MODULE_FORCE_UNLOAD is not set | 108 | # CONFIG_MODULE_FORCE_UNLOAD is not set |
| 108 | CONFIG_MODVERSIONS=y | 109 | CONFIG_MODVERSIONS=y |
| @@ -173,6 +174,7 @@ CONFIG_PREEMPT=y | |||
| 173 | # CONFIG_PREEMPT_RCU is not set | 174 | # CONFIG_PREEMPT_RCU is not set |
| 174 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 175 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
| 175 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | 176 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y |
| 177 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | ||
| 176 | CONFIG_SELECT_MEMORY_MODEL=y | 178 | CONFIG_SELECT_MEMORY_MODEL=y |
| 177 | # CONFIG_FLATMEM_MANUAL is not set | 179 | # CONFIG_FLATMEM_MANUAL is not set |
| 178 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 180 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
| @@ -210,6 +212,7 @@ CONFIG_FORCE_MAX_ZONEORDER=9 | |||
| 210 | CONFIG_PFAULT=y | 212 | CONFIG_PFAULT=y |
| 211 | # CONFIG_SHARED_KERNEL is not set | 213 | # CONFIG_SHARED_KERNEL is not set |
| 212 | # CONFIG_CMM is not set | 214 | # CONFIG_CMM is not set |
| 215 | # CONFIG_PAGE_STATES is not set | ||
| 213 | CONFIG_VIRT_TIMER=y | 216 | CONFIG_VIRT_TIMER=y |
| 214 | CONFIG_VIRT_CPU_ACCOUNTING=y | 217 | CONFIG_VIRT_CPU_ACCOUNTING=y |
| 215 | # CONFIG_APPLDATA_BASE is not set | 218 | # CONFIG_APPLDATA_BASE is not set |
| @@ -620,6 +623,7 @@ CONFIG_S390_VMUR=m | |||
| 620 | # | 623 | # |
| 621 | # CONFIG_MEMSTICK is not set | 624 | # CONFIG_MEMSTICK is not set |
| 622 | # CONFIG_NEW_LEDS is not set | 625 | # CONFIG_NEW_LEDS is not set |
| 626 | CONFIG_ACCESSIBILITY=y | ||
| 623 | 627 | ||
| 624 | # | 628 | # |
| 625 | # File systems | 629 | # File systems |
| @@ -754,11 +758,12 @@ CONFIG_FRAME_WARN=2048 | |||
| 754 | CONFIG_MAGIC_SYSRQ=y | 758 | CONFIG_MAGIC_SYSRQ=y |
| 755 | # CONFIG_UNUSED_SYMBOLS is not set | 759 | # CONFIG_UNUSED_SYMBOLS is not set |
| 756 | CONFIG_DEBUG_FS=y | 760 | CONFIG_DEBUG_FS=y |
| 757 | CONFIG_HEADERS_CHECK=y | 761 | # CONFIG_HEADERS_CHECK is not set |
| 758 | CONFIG_DEBUG_KERNEL=y | 762 | CONFIG_DEBUG_KERNEL=y |
| 759 | # CONFIG_SCHED_DEBUG is not set | 763 | # CONFIG_SCHED_DEBUG is not set |
| 760 | # CONFIG_SCHEDSTATS is not set | 764 | # CONFIG_SCHEDSTATS is not set |
| 761 | # CONFIG_TIMER_STATS is not set | 765 | # CONFIG_TIMER_STATS is not set |
| 766 | # CONFIG_DEBUG_OBJECTS is not set | ||
| 762 | # CONFIG_DEBUG_SLAB is not set | 767 | # CONFIG_DEBUG_SLAB is not set |
| 763 | CONFIG_DEBUG_PREEMPT=y | 768 | CONFIG_DEBUG_PREEMPT=y |
| 764 | # CONFIG_DEBUG_RT_MUTEXES is not set | 769 | # CONFIG_DEBUG_RT_MUTEXES is not set |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c14a336f6300..d2f270c995d9 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
| @@ -208,7 +208,7 @@ static const unsigned char formats[][7] = { | |||
| 208 | [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ | 208 | [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ |
| 209 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ | 209 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ |
| 210 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ | 210 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ |
| 211 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ | 211 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ |
| 212 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ | 212 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ |
| 213 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ | 213 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ |
| 214 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ | 214 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1f4228948dc4..42b1d12ebb10 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -1089,7 +1089,7 @@ out: | |||
| 1089 | 1089 | ||
| 1090 | #ifdef CONFIG_HOTPLUG_CPU | 1090 | #ifdef CONFIG_HOTPLUG_CPU |
| 1091 | 1091 | ||
| 1092 | int smp_rescan_cpus(void) | 1092 | int __ref smp_rescan_cpus(void) |
| 1093 | { | 1093 | { |
| 1094 | cpumask_t newcpus; | 1094 | cpumask_t newcpus; |
| 1095 | int cpu; | 1095 | int cpu; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 29f3a63806b9..05598649b326 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | |||
| 44 | 44 | ||
| 45 | void show_mem(void) | 45 | void show_mem(void) |
| 46 | { | 46 | { |
| 47 | int i, total = 0, reserved = 0; | 47 | unsigned long i, total = 0, reserved = 0; |
| 48 | int shared = 0, cached = 0; | 48 | unsigned long shared = 0, cached = 0; |
| 49 | unsigned long flags; | ||
| 49 | struct page *page; | 50 | struct page *page; |
| 51 | pg_data_t *pgdat; | ||
| 50 | 52 | ||
| 51 | printk("Mem-info:\n"); | 53 | printk("Mem-info:\n"); |
| 52 | show_free_areas(); | 54 | show_free_areas(); |
| 53 | i = max_mapnr; | 55 | for_each_online_pgdat(pgdat) { |
| 54 | while (i-- > 0) { | 56 | pgdat_resize_lock(pgdat, &flags); |
| 55 | if (!pfn_valid(i)) | 57 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
| 56 | continue; | 58 | if (!pfn_valid(pgdat->node_start_pfn + i)) |
| 57 | page = pfn_to_page(i); | 59 | continue; |
| 58 | total++; | 60 | page = pfn_to_page(pgdat->node_start_pfn + i); |
| 59 | if (PageReserved(page)) | 61 | total++; |
| 60 | reserved++; | 62 | if (PageReserved(page)) |
| 61 | else if (PageSwapCache(page)) | 63 | reserved++; |
| 62 | cached++; | 64 | else if (PageSwapCache(page)) |
| 63 | else if (page_count(page)) | 65 | cached++; |
| 64 | shared += page_count(page) - 1; | 66 | else if (page_count(page)) |
| 67 | shared += page_count(page) - 1; | ||
| 68 | } | ||
| 69 | pgdat_resize_unlock(pgdat, &flags); | ||
| 65 | } | 70 | } |
| 66 | printk("%d pages of RAM\n", total); | 71 | printk("%ld pages of RAM\n", total); |
| 67 | printk("%d reserved pages\n", reserved); | 72 | printk("%ld reserved pages\n", reserved); |
| 68 | printk("%d pages shared\n", shared); | 73 | printk("%ld pages shared\n", shared); |
| 69 | printk("%d pages swap cached\n", cached); | 74 | printk("%ld pages swap cached\n", cached); |
| 70 | |||
| 71 | printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); | ||
| 72 | printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK)); | ||
| 73 | printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); | ||
| 74 | printk("%lu pages slab\n", | ||
| 75 | global_page_state(NR_SLAB_RECLAIMABLE) + | ||
| 76 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | ||
| 77 | printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); | ||
| 78 | } | 75 | } |
| 79 | 76 | ||
| 80 | /* | 77 | /* |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ea2804808f39..f591188fa2c0 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
| @@ -27,12 +27,19 @@ struct memory_segment { | |||
| 27 | 27 | ||
| 28 | static LIST_HEAD(mem_segs); | 28 | static LIST_HEAD(mem_segs); |
| 29 | 29 | ||
| 30 | static pud_t *vmem_pud_alloc(void) | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
| 31 | { | ||
| 32 | if (slab_is_available()) | ||
| 33 | return (void *)__get_free_pages(GFP_KERNEL, order); | ||
| 34 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline pud_t *vmem_pud_alloc(void) | ||
| 31 | { | 38 | { |
| 32 | pud_t *pud = NULL; | 39 | pud_t *pud = NULL; |
| 33 | 40 | ||
| 34 | #ifdef CONFIG_64BIT | 41 | #ifdef CONFIG_64BIT |
| 35 | pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); | 42 | pud = vmem_alloc_pages(2); |
| 36 | if (!pud) | 43 | if (!pud) |
| 37 | return NULL; | 44 | return NULL; |
| 38 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); | 45 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
| @@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void) | |||
| 40 | return pud; | 47 | return pud; |
| 41 | } | 48 | } |
| 42 | 49 | ||
| 43 | static pmd_t *vmem_pmd_alloc(void) | 50 | static inline pmd_t *vmem_pmd_alloc(void) |
| 44 | { | 51 | { |
| 45 | pmd_t *pmd = NULL; | 52 | pmd_t *pmd = NULL; |
| 46 | 53 | ||
| 47 | #ifdef CONFIG_64BIT | 54 | #ifdef CONFIG_64BIT |
| 48 | pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); | 55 | pmd = vmem_alloc_pages(2); |
| 49 | if (!pmd) | 56 | if (!pmd) |
| 50 | return NULL; | 57 | return NULL; |
| 51 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); | 58 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
| @@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) | |||
| 207 | if (pte_none(*pt_dir)) { | 214 | if (pte_none(*pt_dir)) { |
| 208 | unsigned long new_page; | 215 | unsigned long new_page; |
| 209 | 216 | ||
| 210 | new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); | 217 | new_page =__pa(vmem_alloc_pages(0)); |
| 211 | if (!new_page) | 218 | if (!new_page) |
| 212 | goto out; | 219 | goto out; |
| 213 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | 220 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |
| 214 | *pt_dir = pte; | 221 | *pt_dir = pte; |
| 215 | } | 222 | } |
| 216 | } | 223 | } |
| 224 | memset(start, 0, nr * sizeof(struct page)); | ||
| 217 | ret = 0; | 225 | ret = 0; |
| 218 | out: | 226 | out: |
| 219 | flush_tlb_kernel_range(start_addr, end_addr); | 227 | flush_tlb_kernel_range(start_addr, end_addr); |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8ba3f135da22..1a4025683362 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
| @@ -63,6 +63,7 @@ static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); | |||
| 63 | */ | 63 | */ |
| 64 | static wait_queue_head_t dasd_init_waitq; | 64 | static wait_queue_head_t dasd_init_waitq; |
| 65 | static wait_queue_head_t dasd_flush_wq; | 65 | static wait_queue_head_t dasd_flush_wq; |
| 66 | static wait_queue_head_t generic_waitq; | ||
| 66 | 67 | ||
| 67 | /* | 68 | /* |
| 68 | * Allocate memory for a new device structure. | 69 | * Allocate memory for a new device structure. |
| @@ -1151,11 +1152,15 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, | |||
| 1151 | struct list_head *l, *n; | 1152 | struct list_head *l, *n; |
| 1152 | struct dasd_ccw_req *cqr; | 1153 | struct dasd_ccw_req *cqr; |
| 1153 | struct dasd_block *block; | 1154 | struct dasd_block *block; |
| 1155 | void (*callback)(struct dasd_ccw_req *, void *data); | ||
| 1156 | void *callback_data; | ||
| 1154 | 1157 | ||
| 1155 | list_for_each_safe(l, n, final_queue) { | 1158 | list_for_each_safe(l, n, final_queue) { |
| 1156 | cqr = list_entry(l, struct dasd_ccw_req, devlist); | 1159 | cqr = list_entry(l, struct dasd_ccw_req, devlist); |
| 1157 | list_del_init(&cqr->devlist); | 1160 | list_del_init(&cqr->devlist); |
| 1158 | block = cqr->block; | 1161 | block = cqr->block; |
| 1162 | callback = cqr->callback; | ||
| 1163 | callback_data = cqr->callback_data; | ||
| 1159 | if (block) | 1164 | if (block) |
| 1160 | spin_lock_bh(&block->queue_lock); | 1165 | spin_lock_bh(&block->queue_lock); |
| 1161 | switch (cqr->status) { | 1166 | switch (cqr->status) { |
| @@ -1176,7 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, | |||
| 1176 | BUG(); | 1181 | BUG(); |
| 1177 | } | 1182 | } |
| 1178 | if (cqr->callback != NULL) | 1183 | if (cqr->callback != NULL) |
| 1179 | (cqr->callback)(cqr, cqr->callback_data); | 1184 | (callback)(cqr, callback_data); |
| 1180 | if (block) | 1185 | if (block) |
| 1181 | spin_unlock_bh(&block->queue_lock); | 1186 | spin_unlock_bh(&block->queue_lock); |
| 1182 | } | 1187 | } |
| @@ -1406,17 +1411,15 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) | |||
| 1406 | */ | 1411 | */ |
| 1407 | int dasd_sleep_on(struct dasd_ccw_req *cqr) | 1412 | int dasd_sleep_on(struct dasd_ccw_req *cqr) |
| 1408 | { | 1413 | { |
| 1409 | wait_queue_head_t wait_q; | ||
| 1410 | struct dasd_device *device; | 1414 | struct dasd_device *device; |
| 1411 | int rc; | 1415 | int rc; |
| 1412 | 1416 | ||
| 1413 | device = cqr->startdev; | 1417 | device = cqr->startdev; |
| 1414 | 1418 | ||
| 1415 | init_waitqueue_head (&wait_q); | ||
| 1416 | cqr->callback = dasd_wakeup_cb; | 1419 | cqr->callback = dasd_wakeup_cb; |
| 1417 | cqr->callback_data = (void *) &wait_q; | 1420 | cqr->callback_data = (void *) &generic_waitq; |
| 1418 | dasd_add_request_tail(cqr); | 1421 | dasd_add_request_tail(cqr); |
| 1419 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1422 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); |
| 1420 | 1423 | ||
| 1421 | /* Request status is either done or failed. */ | 1424 | /* Request status is either done or failed. */ |
| 1422 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | 1425 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
| @@ -1429,20 +1432,18 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) | |||
| 1429 | */ | 1432 | */ |
| 1430 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) | 1433 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) |
| 1431 | { | 1434 | { |
| 1432 | wait_queue_head_t wait_q; | ||
| 1433 | struct dasd_device *device; | 1435 | struct dasd_device *device; |
| 1434 | int rc; | 1436 | int rc; |
| 1435 | 1437 | ||
| 1436 | device = cqr->startdev; | 1438 | device = cqr->startdev; |
| 1437 | init_waitqueue_head (&wait_q); | ||
| 1438 | cqr->callback = dasd_wakeup_cb; | 1439 | cqr->callback = dasd_wakeup_cb; |
| 1439 | cqr->callback_data = (void *) &wait_q; | 1440 | cqr->callback_data = (void *) &generic_waitq; |
| 1440 | dasd_add_request_tail(cqr); | 1441 | dasd_add_request_tail(cqr); |
| 1441 | rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); | 1442 | rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); |
| 1442 | if (rc == -ERESTARTSYS) { | 1443 | if (rc == -ERESTARTSYS) { |
| 1443 | dasd_cancel_req(cqr); | 1444 | dasd_cancel_req(cqr); |
| 1444 | /* wait (non-interruptible) for final status */ | 1445 | /* wait (non-interruptible) for final status */ |
| 1445 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1446 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); |
| 1446 | } | 1447 | } |
| 1447 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | 1448 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
| 1448 | return rc; | 1449 | return rc; |
| @@ -1466,7 +1467,6 @@ static inline int _dasd_term_running_cqr(struct dasd_device *device) | |||
| 1466 | 1467 | ||
| 1467 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | 1468 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) |
| 1468 | { | 1469 | { |
| 1469 | wait_queue_head_t wait_q; | ||
| 1470 | struct dasd_device *device; | 1470 | struct dasd_device *device; |
| 1471 | int rc; | 1471 | int rc; |
| 1472 | 1472 | ||
| @@ -1478,9 +1478,8 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
| 1478 | return rc; | 1478 | return rc; |
| 1479 | } | 1479 | } |
| 1480 | 1480 | ||
| 1481 | init_waitqueue_head (&wait_q); | ||
| 1482 | cqr->callback = dasd_wakeup_cb; | 1481 | cqr->callback = dasd_wakeup_cb; |
| 1483 | cqr->callback_data = (void *) &wait_q; | 1482 | cqr->callback_data = (void *) &generic_waitq; |
| 1484 | cqr->status = DASD_CQR_QUEUED; | 1483 | cqr->status = DASD_CQR_QUEUED; |
| 1485 | list_add(&cqr->devlist, &device->ccw_queue); | 1484 | list_add(&cqr->devlist, &device->ccw_queue); |
| 1486 | 1485 | ||
| @@ -1489,7 +1488,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
| 1489 | 1488 | ||
| 1490 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1489 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
| 1491 | 1490 | ||
| 1492 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1491 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); |
| 1493 | 1492 | ||
| 1494 | /* Request status is either done or failed. */ | 1493 | /* Request status is either done or failed. */ |
| 1495 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | 1494 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
| @@ -2430,6 +2429,7 @@ static int __init dasd_init(void) | |||
| 2430 | 2429 | ||
| 2431 | init_waitqueue_head(&dasd_init_waitq); | 2430 | init_waitqueue_head(&dasd_init_waitq); |
| 2432 | init_waitqueue_head(&dasd_flush_wq); | 2431 | init_waitqueue_head(&dasd_flush_wq); |
| 2432 | init_waitqueue_head(&generic_waitq); | ||
| 2433 | 2433 | ||
| 2434 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ | 2434 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ |
| 2435 | dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); | 2435 | dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 0d98f1ff2edd..848ef7e8523f 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
| @@ -549,7 +549,6 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, | |||
| 549 | struct raw3270_request *rq) | 549 | struct raw3270_request *rq) |
| 550 | { | 550 | { |
| 551 | unsigned long flags; | 551 | unsigned long flags; |
| 552 | wait_queue_head_t wq; | ||
| 553 | int rc; | 552 | int rc; |
| 554 | 553 | ||
| 555 | #ifdef CONFIG_TN3270_CONSOLE | 554 | #ifdef CONFIG_TN3270_CONSOLE |
| @@ -566,20 +565,20 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, | |||
| 566 | return rq->rc; | 565 | return rq->rc; |
| 567 | } | 566 | } |
| 568 | #endif | 567 | #endif |
| 569 | init_waitqueue_head(&wq); | ||
| 570 | rq->callback = raw3270_wake_init; | 568 | rq->callback = raw3270_wake_init; |
| 571 | rq->callback_data = &wq; | 569 | rq->callback_data = &raw3270_wait_queue; |
| 572 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); | 570 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); |
| 573 | rc = __raw3270_start(rp, view, rq); | 571 | rc = __raw3270_start(rp, view, rq); |
| 574 | spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); | 572 | spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); |
| 575 | if (rc) | 573 | if (rc) |
| 576 | return rc; | 574 | return rc; |
| 577 | /* Now wait for the completion. */ | 575 | /* Now wait for the completion. */ |
| 578 | rc = wait_event_interruptible(wq, raw3270_request_final(rq)); | 576 | rc = wait_event_interruptible(raw3270_wait_queue, |
| 577 | raw3270_request_final(rq)); | ||
| 579 | if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ | 578 | if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ |
| 580 | raw3270_halt_io(view->dev, rq); | 579 | raw3270_halt_io(view->dev, rq); |
| 581 | /* No wait for the halt to complete. */ | 580 | /* No wait for the halt to complete. */ |
| 582 | wait_event(wq, raw3270_request_final(rq)); | 581 | wait_event(raw3270_wait_queue, raw3270_request_final(rq)); |
| 583 | return -ERESTARTSYS; | 582 | return -ERESTARTSYS; |
| 584 | } | 583 | } |
| 585 | return rq->rc; | 584 | return rq->rc; |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 9e784d5f7f57..ad05a87bc480 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
| @@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
| 40 | put_online_cpus(); | 40 | put_online_cpus(); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | static void sclp_cpu_change_notify(struct work_struct *work) | 43 | static void __ref sclp_cpu_change_notify(struct work_struct *work) |
| 44 | { | 44 | { |
| 45 | smp_rescan_cpus(); | 45 | smp_rescan_cpus(); |
| 46 | } | 46 | } |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 35707c04e613..62576af36f47 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
| @@ -71,9 +71,6 @@ static struct list_head sclp_vt220_outqueue; | |||
| 71 | /* Number of requests in outqueue */ | 71 | /* Number of requests in outqueue */ |
| 72 | static int sclp_vt220_outqueue_count; | 72 | static int sclp_vt220_outqueue_count; |
| 73 | 73 | ||
| 74 | /* Wait queue used to delay write requests while we've run out of buffers */ | ||
| 75 | static wait_queue_head_t sclp_vt220_waitq; | ||
| 76 | |||
| 77 | /* Timer used for delaying write requests to merge subsequent messages into | 74 | /* Timer used for delaying write requests to merge subsequent messages into |
| 78 | * a single buffer */ | 75 | * a single buffer */ |
| 79 | static struct timer_list sclp_vt220_timer; | 76 | static struct timer_list sclp_vt220_timer; |
| @@ -133,7 +130,6 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) | |||
| 133 | } while (request && __sclp_vt220_emit(request)); | 130 | } while (request && __sclp_vt220_emit(request)); |
| 134 | if (request == NULL && sclp_vt220_flush_later) | 131 | if (request == NULL && sclp_vt220_flush_later) |
| 135 | sclp_vt220_emit_current(); | 132 | sclp_vt220_emit_current(); |
| 136 | wake_up(&sclp_vt220_waitq); | ||
| 137 | /* Check if the tty needs a wake up call */ | 133 | /* Check if the tty needs a wake up call */ |
| 138 | if (sclp_vt220_tty != NULL) { | 134 | if (sclp_vt220_tty != NULL) { |
| 139 | tty_wakeup(sclp_vt220_tty); | 135 | tty_wakeup(sclp_vt220_tty); |
| @@ -383,7 +379,7 @@ sclp_vt220_timeout(unsigned long data) | |||
| 383 | */ | 379 | */ |
| 384 | static int | 380 | static int |
| 385 | __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | 381 | __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, |
| 386 | int convertlf, int may_schedule) | 382 | int convertlf, int may_fail) |
| 387 | { | 383 | { |
| 388 | unsigned long flags; | 384 | unsigned long flags; |
| 389 | void *page; | 385 | void *page; |
| @@ -395,15 +391,14 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | |||
| 395 | overall_written = 0; | 391 | overall_written = 0; |
| 396 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 392 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
| 397 | do { | 393 | do { |
| 398 | /* Create a sclp output buffer if none exists yet */ | 394 | /* Create an sclp output buffer if none exists yet */ |
| 399 | if (sclp_vt220_current_request == NULL) { | 395 | if (sclp_vt220_current_request == NULL) { |
| 400 | while (list_empty(&sclp_vt220_empty)) { | 396 | while (list_empty(&sclp_vt220_empty)) { |
| 401 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 397 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
| 402 | if (in_interrupt() || !may_schedule) | 398 | if (may_fail) |
| 403 | sclp_sync_wait(); | 399 | goto out; |
| 404 | else | 400 | else |
| 405 | wait_event(sclp_vt220_waitq, | 401 | sclp_sync_wait(); |
| 406 | !list_empty(&sclp_vt220_empty)); | ||
| 407 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 402 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
| 408 | } | 403 | } |
| 409 | page = (void *) sclp_vt220_empty.next; | 404 | page = (void *) sclp_vt220_empty.next; |
| @@ -437,6 +432,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | |||
| 437 | add_timer(&sclp_vt220_timer); | 432 | add_timer(&sclp_vt220_timer); |
| 438 | } | 433 | } |
| 439 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 434 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
| 435 | out: | ||
| 440 | return overall_written; | 436 | return overall_written; |
| 441 | } | 437 | } |
| 442 | 438 | ||
| @@ -520,19 +516,11 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp) | |||
| 520 | * character to the tty device. If the kernel uses this routine, | 516 | * character to the tty device. If the kernel uses this routine, |
| 521 | * it must call the flush_chars() routine (if defined) when it is | 517 | * it must call the flush_chars() routine (if defined) when it is |
| 522 | * done stuffing characters into the driver. | 518 | * done stuffing characters into the driver. |
| 523 | * | ||
| 524 | * NOTE: include/linux/tty_driver.h specifies that a character should be | ||
| 525 | * ignored if there is no room in the queue. This driver implements a different | ||
| 526 | * semantic in that it will block when there is no more room left. | ||
| 527 | * | ||
| 528 | * FIXME: putchar can currently be called from BH and other non blocking | ||
| 529 | * handlers so this semantic isn't a good idea. | ||
| 530 | */ | 519 | */ |
| 531 | static int | 520 | static int |
| 532 | sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) | 521 | sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) |
| 533 | { | 522 | { |
| 534 | __sclp_vt220_write(&ch, 1, 0, 0, 1); | 523 | return __sclp_vt220_write(&ch, 1, 0, 0, 1); |
| 535 | return 1; | ||
| 536 | } | 524 | } |
| 537 | 525 | ||
| 538 | /* | 526 | /* |
| @@ -653,7 +641,6 @@ static int __init __sclp_vt220_init(void) | |||
| 653 | spin_lock_init(&sclp_vt220_lock); | 641 | spin_lock_init(&sclp_vt220_lock); |
| 654 | INIT_LIST_HEAD(&sclp_vt220_empty); | 642 | INIT_LIST_HEAD(&sclp_vt220_empty); |
| 655 | INIT_LIST_HEAD(&sclp_vt220_outqueue); | 643 | INIT_LIST_HEAD(&sclp_vt220_outqueue); |
| 656 | init_waitqueue_head(&sclp_vt220_waitq); | ||
| 657 | init_timer(&sclp_vt220_timer); | 644 | init_timer(&sclp_vt220_timer); |
| 658 | sclp_vt220_current_request = NULL; | 645 | sclp_vt220_current_request = NULL; |
| 659 | sclp_vt220_buffered_chars = 0; | 646 | sclp_vt220_buffered_chars = 0; |
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index dddf8d62c153..d0d565a05dfe 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
| @@ -231,6 +231,9 @@ struct tape_device { | |||
| 231 | /* Request queue. */ | 231 | /* Request queue. */ |
| 232 | struct list_head req_queue; | 232 | struct list_head req_queue; |
| 233 | 233 | ||
| 234 | /* Request wait queue. */ | ||
| 235 | wait_queue_head_t wait_queue; | ||
| 236 | |||
| 234 | /* Each tape device has (currently) two minor numbers. */ | 237 | /* Each tape device has (currently) two minor numbers. */ |
| 235 | int first_minor; | 238 | int first_minor; |
| 236 | 239 | ||
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index ddc4a114e7f4..95da72bc17e8 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
| @@ -179,11 +179,11 @@ tapeblock_requeue(struct work_struct *work) { | |||
| 179 | tapeblock_end_request(req, -EIO); | 179 | tapeblock_end_request(req, -EIO); |
| 180 | continue; | 180 | continue; |
| 181 | } | 181 | } |
| 182 | blkdev_dequeue_request(req); | ||
| 183 | nr_queued++; | ||
| 182 | spin_unlock_irq(&device->blk_data.request_queue_lock); | 184 | spin_unlock_irq(&device->blk_data.request_queue_lock); |
| 183 | rc = tapeblock_start_request(device, req); | 185 | rc = tapeblock_start_request(device, req); |
| 184 | spin_lock_irq(&device->blk_data.request_queue_lock); | 186 | spin_lock_irq(&device->blk_data.request_queue_lock); |
| 185 | blkdev_dequeue_request(req); | ||
| 186 | nr_queued++; | ||
| 187 | } | 187 | } |
| 188 | spin_unlock_irq(&device->blk_data.request_queue_lock); | 188 | spin_unlock_irq(&device->blk_data.request_queue_lock); |
| 189 | atomic_set(&device->blk_data.requeue_scheduled, 0); | 189 | atomic_set(&device->blk_data.requeue_scheduled, 0); |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 76e44eb7c47f..c20e3c548343 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
| @@ -449,6 +449,7 @@ tape_alloc_device(void) | |||
| 449 | INIT_LIST_HEAD(&device->req_queue); | 449 | INIT_LIST_HEAD(&device->req_queue); |
| 450 | INIT_LIST_HEAD(&device->node); | 450 | INIT_LIST_HEAD(&device->node); |
| 451 | init_waitqueue_head(&device->state_change_wq); | 451 | init_waitqueue_head(&device->state_change_wq); |
| 452 | init_waitqueue_head(&device->wait_queue); | ||
| 452 | device->tape_state = TS_INIT; | 453 | device->tape_state = TS_INIT; |
| 453 | device->medium_state = MS_UNKNOWN; | 454 | device->medium_state = MS_UNKNOWN; |
| 454 | *device->modeset_byte = 0; | 455 | *device->modeset_byte = 0; |
| @@ -954,21 +955,19 @@ __tape_wake_up(struct tape_request *request, void *data) | |||
| 954 | int | 955 | int |
| 955 | tape_do_io(struct tape_device *device, struct tape_request *request) | 956 | tape_do_io(struct tape_device *device, struct tape_request *request) |
| 956 | { | 957 | { |
| 957 | wait_queue_head_t wq; | ||
| 958 | int rc; | 958 | int rc; |
| 959 | 959 | ||
| 960 | init_waitqueue_head(&wq); | ||
| 961 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 960 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
| 962 | /* Setup callback */ | 961 | /* Setup callback */ |
| 963 | request->callback = __tape_wake_up; | 962 | request->callback = __tape_wake_up; |
| 964 | request->callback_data = &wq; | 963 | request->callback_data = &device->wait_queue; |
| 965 | /* Add request to request queue and try to start it. */ | 964 | /* Add request to request queue and try to start it. */ |
| 966 | rc = __tape_start_request(device, request); | 965 | rc = __tape_start_request(device, request); |
| 967 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 966 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
| 968 | if (rc) | 967 | if (rc) |
| 969 | return rc; | 968 | return rc; |
| 970 | /* Request added to the queue. Wait for its completion. */ | 969 | /* Request added to the queue. Wait for its completion. */ |
| 971 | wait_event(wq, (request->callback == NULL)); | 970 | wait_event(device->wait_queue, (request->callback == NULL)); |
| 972 | /* Get rc from request */ | 971 | /* Get rc from request */ |
| 973 | return request->rc; | 972 | return request->rc; |
| 974 | } | 973 | } |
| @@ -989,20 +988,19 @@ int | |||
| 989 | tape_do_io_interruptible(struct tape_device *device, | 988 | tape_do_io_interruptible(struct tape_device *device, |
| 990 | struct tape_request *request) | 989 | struct tape_request *request) |
| 991 | { | 990 | { |
| 992 | wait_queue_head_t wq; | ||
| 993 | int rc; | 991 | int rc; |
| 994 | 992 | ||
| 995 | init_waitqueue_head(&wq); | ||
| 996 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 993 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
| 997 | /* Setup callback */ | 994 | /* Setup callback */ |
| 998 | request->callback = __tape_wake_up_interruptible; | 995 | request->callback = __tape_wake_up_interruptible; |
| 999 | request->callback_data = &wq; | 996 | request->callback_data = &device->wait_queue; |
| 1000 | rc = __tape_start_request(device, request); | 997 | rc = __tape_start_request(device, request); |
| 1001 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 998 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
| 1002 | if (rc) | 999 | if (rc) |
| 1003 | return rc; | 1000 | return rc; |
| 1004 | /* Request added to the queue. Wait for its completion. */ | 1001 | /* Request added to the queue. Wait for its completion. */ |
| 1005 | rc = wait_event_interruptible(wq, (request->callback == NULL)); | 1002 | rc = wait_event_interruptible(device->wait_queue, |
| 1003 | (request->callback == NULL)); | ||
| 1006 | if (rc != -ERESTARTSYS) | 1004 | if (rc != -ERESTARTSYS) |
| 1007 | /* Request finished normally. */ | 1005 | /* Request finished normally. */ |
| 1008 | return request->rc; | 1006 | return request->rc; |
| @@ -1015,7 +1013,7 @@ tape_do_io_interruptible(struct tape_device *device, | |||
| 1015 | /* Wait for the interrupt that acknowledges the halt. */ | 1013 | /* Wait for the interrupt that acknowledges the halt. */ |
| 1016 | do { | 1014 | do { |
| 1017 | rc = wait_event_interruptible( | 1015 | rc = wait_event_interruptible( |
| 1018 | wq, | 1016 | device->wait_queue, |
| 1019 | (request->callback == NULL) | 1017 | (request->callback == NULL) |
| 1020 | ); | 1018 | ); |
| 1021 | } while (rc == -ERESTARTSYS); | 1019 | } while (rc == -ERESTARTSYS); |
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h index 0e959e20e9a3..41c547656130 100644 --- a/include/asm-s390/types.h +++ b/include/asm-s390/types.h | |||
| @@ -40,7 +40,13 @@ typedef __signed__ long saddr_t; | |||
| 40 | 40 | ||
| 41 | #ifndef __ASSEMBLY__ | 41 | #ifndef __ASSEMBLY__ |
| 42 | 42 | ||
| 43 | typedef u64 dma64_addr_t; | ||
| 44 | #ifdef __s390x__ | ||
| 45 | /* DMA addresses come in 32-bit and 64-bit flavours. */ | ||
| 46 | typedef u64 dma_addr_t; | ||
| 47 | #else | ||
| 43 | typedef u32 dma_addr_t; | 48 | typedef u32 dma_addr_t; |
| 49 | #endif | ||
| 44 | 50 | ||
| 45 | #ifndef __s390x__ | 51 | #ifndef __s390x__ |
| 46 | typedef union { | 52 | typedef union { |
