diff options
author | Paul Mackerras <paulus@samba.org> | 2008-03-24 22:31:46 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-03-24 22:31:46 -0400 |
commit | 16fddf5457d2a7eb5e96ceb016a8f722eca97af6 (patch) | |
tree | ae3083a50c55f1e1a2c83f475d0e8bb2da8d7196 | |
parent | 5492a7e4cba8e38419d489f0865de0a67c737e8a (diff) | |
parent | cc7feea39bed2951cc29af3ad642f39a99dfe8d3 (diff) |
Merge branch 'linux-2.6' into merge
32 files changed, 171 insertions, 245 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 590f1f67c874..a83dfa3cf40c 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -351,9 +351,14 @@ static void __init htab_init_page_sizes(void) | |||
351 | mmu_vmalloc_psize = MMU_PAGE_64K; | 351 | mmu_vmalloc_psize = MMU_PAGE_64K; |
352 | if (mmu_linear_psize == MMU_PAGE_4K) | 352 | if (mmu_linear_psize == MMU_PAGE_4K) |
353 | mmu_linear_psize = MMU_PAGE_64K; | 353 | mmu_linear_psize = MMU_PAGE_64K; |
354 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) | 354 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { |
355 | mmu_io_psize = MMU_PAGE_64K; | 355 | /* |
356 | else | 356 | * Don't use 64k pages for ioremap on pSeries, since |
357 | * that would stop us accessing the HEA ethernet. | ||
358 | */ | ||
359 | if (!machine_is(pseries)) | ||
360 | mmu_io_psize = MMU_PAGE_64K; | ||
361 | } else | ||
357 | mmu_ci_restrictions = 1; | 362 | mmu_ci_restrictions = 1; |
358 | } | 363 | } |
359 | #endif /* CONFIG_PPC_64K_PAGES */ | 364 | #endif /* CONFIG_PPC_64K_PAGES */ |
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index f589999361e0..64ec7d629363 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c | |||
@@ -52,6 +52,10 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) | |||
52 | int i, tasknum = -1; | 52 | int i, tasknum = -1; |
53 | struct bcom_task *tsk; | 53 | struct bcom_task *tsk; |
54 | 54 | ||
55 | /* Don't try to do anything if bestcomm init failed */ | ||
56 | if (!bcom_eng) | ||
57 | return NULL; | ||
58 | |||
55 | /* Get and reserve a task num */ | 59 | /* Get and reserve a task num */ |
56 | spin_lock(&bcom_eng->lock); | 60 | spin_lock(&bcom_eng->lock); |
57 | 61 | ||
@@ -484,8 +488,8 @@ mpc52xx_bcom_remove(struct of_device *op) | |||
484 | } | 488 | } |
485 | 489 | ||
486 | static struct of_device_id mpc52xx_bcom_of_match[] = { | 490 | static struct of_device_id mpc52xx_bcom_of_match[] = { |
487 | { .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", }, | 491 | { .compatible = "fsl,mpc5200-bestcomm", }, |
488 | { .type = "dma-controller", .compatible = "mpc5200-bestcomm", }, | 492 | { .compatible = "mpc5200-bestcomm", }, |
489 | {}, | 493 | {}, |
490 | }; | 494 | }; |
491 | 495 | ||
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index ae0dbf4c1d66..0f2dfb0aaa6a 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -906,7 +906,7 @@ static int __init init_ipic_sysfs(void) | |||
906 | { | 906 | { |
907 | int rc; | 907 | int rc; |
908 | 908 | ||
909 | if (!primary_ipic->regs) | 909 | if (!primary_ipic || !primary_ipic->regs) |
910 | return -ENODEV; | 910 | return -ENODEV; |
911 | printk(KERN_DEBUG "Registering ipic with sysfs...\n"); | 911 | printk(KERN_DEBUG "Registering ipic with sysfs...\n"); |
912 | 912 | ||
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c index bd76482077be..edb74f5a1186 100644 --- a/arch/sparc64/kernel/ds.c +++ b/arch/sparc64/kernel/ds.c | |||
@@ -972,8 +972,7 @@ static void process_ds_work(void) | |||
972 | LIST_HEAD(todo); | 972 | LIST_HEAD(todo); |
973 | 973 | ||
974 | spin_lock_irqsave(&ds_lock, flags); | 974 | spin_lock_irqsave(&ds_lock, flags); |
975 | list_splice(&ds_work_list, &todo); | 975 | list_splice_init(&ds_work_list, &todo); |
976 | INIT_LIST_HEAD(&ds_work_list); | ||
977 | spin_unlock_irqrestore(&ds_lock, flags); | 976 | spin_unlock_irqrestore(&ds_lock, flags); |
978 | 977 | ||
979 | list_for_each_entry_safe(qp, tmp, &todo, list) { | 978 | list_for_each_entry_safe(qp, tmp, &todo, list) { |
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 44b105c04dd3..34f8ff57c56b 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S | |||
@@ -288,8 +288,12 @@ sun4v_chip_type: | |||
288 | /* Leave arg2 as-is, prom_mmu_ihandle_cache */ | 288 | /* Leave arg2 as-is, prom_mmu_ihandle_cache */ |
289 | mov -1, %l3 | 289 | mov -1, %l3 |
290 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) | 290 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) |
291 | sethi %hi(8 * 1024 * 1024), %l3 | 291 | /* 4MB align the kernel image size. */ |
292 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) | 292 | set (_end - KERNBASE), %l3 |
293 | set ((4 * 1024 * 1024) - 1), %l4 | ||
294 | add %l3, %l4, %l3 | ||
295 | andn %l3, %l4, %l3 | ||
296 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) | ||
293 | sethi %hi(KERNBASE), %l3 | 297 | sethi %hi(KERNBASE), %l3 |
294 | stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) | 298 | stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) |
295 | stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty | 299 | stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index e116e38b160e..acf8c5250aa9 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs) | |||
731 | current_thread_info()->xfsr[0] = 0; | 731 | current_thread_info()->xfsr[0] = 0; |
732 | current_thread_info()->fpsaved[0] = 0; | 732 | current_thread_info()->fpsaved[0] = 0; |
733 | regs->tstate &= ~TSTATE_PEF; | 733 | regs->tstate &= ~TSTATE_PEF; |
734 | task_lock(current); | ||
735 | current->ptrace &= ~PT_DTRACE; | ||
736 | task_unlock(current); | ||
737 | } | 734 | } |
738 | out: | 735 | out: |
739 | return error; | 736 | return error; |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index cc454731d879..5a1126b363a4 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -284,14 +284,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | |||
284 | { | 284 | { |
285 | extern unsigned long sparc64_ttable_tl0; | 285 | extern unsigned long sparc64_ttable_tl0; |
286 | extern unsigned long kern_locked_tte_data; | 286 | extern unsigned long kern_locked_tte_data; |
287 | extern int bigkernel; | ||
288 | struct hvtramp_descr *hdesc; | 287 | struct hvtramp_descr *hdesc; |
289 | unsigned long trampoline_ra; | 288 | unsigned long trampoline_ra; |
290 | struct trap_per_cpu *tb; | 289 | struct trap_per_cpu *tb; |
291 | u64 tte_vaddr, tte_data; | 290 | u64 tte_vaddr, tte_data; |
292 | unsigned long hv_err; | 291 | unsigned long hv_err; |
292 | int i; | ||
293 | 293 | ||
294 | hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); | 294 | hdesc = kzalloc(sizeof(*hdesc) + |
295 | (sizeof(struct hvtramp_mapping) * | ||
296 | num_kernel_image_mappings - 1), | ||
297 | GFP_KERNEL); | ||
295 | if (!hdesc) { | 298 | if (!hdesc) { |
296 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " | 299 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " |
297 | "hvtramp_descr.\n"); | 300 | "hvtramp_descr.\n"); |
@@ -299,7 +302,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | |||
299 | } | 302 | } |
300 | 303 | ||
301 | hdesc->cpu = cpu; | 304 | hdesc->cpu = cpu; |
302 | hdesc->num_mappings = (bigkernel ? 2 : 1); | 305 | hdesc->num_mappings = num_kernel_image_mappings; |
303 | 306 | ||
304 | tb = &trap_block[cpu]; | 307 | tb = &trap_block[cpu]; |
305 | tb->hdesc = hdesc; | 308 | tb->hdesc = hdesc; |
@@ -312,13 +315,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | |||
312 | tte_vaddr = (unsigned long) KERNBASE; | 315 | tte_vaddr = (unsigned long) KERNBASE; |
313 | tte_data = kern_locked_tte_data; | 316 | tte_data = kern_locked_tte_data; |
314 | 317 | ||
315 | hdesc->maps[0].vaddr = tte_vaddr; | 318 | for (i = 0; i < hdesc->num_mappings; i++) { |
316 | hdesc->maps[0].tte = tte_data; | 319 | hdesc->maps[i].vaddr = tte_vaddr; |
317 | if (bigkernel) { | 320 | hdesc->maps[i].tte = tte_data; |
318 | tte_vaddr += 0x400000; | 321 | tte_vaddr += 0x400000; |
319 | tte_data += 0x400000; | 322 | tte_data += 0x400000; |
320 | hdesc->maps[1].vaddr = tte_vaddr; | ||
321 | hdesc->maps[1].tte = tte_data; | ||
322 | } | 323 | } |
323 | 324 | ||
324 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); | 325 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); |
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index deaba2bd0535..2455fa498876 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c | |||
@@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs) | |||
678 | current_thread_info()->xfsr[0] = 0; | 678 | current_thread_info()->xfsr[0] = 0; |
679 | current_thread_info()->fpsaved[0] = 0; | 679 | current_thread_info()->fpsaved[0] = 0; |
680 | regs->tstate &= ~TSTATE_PEF; | 680 | regs->tstate &= ~TSTATE_PEF; |
681 | task_lock(current); | ||
682 | current->ptrace &= ~PT_DTRACE; | ||
683 | task_unlock(current); | ||
684 | } | 681 | } |
685 | out: | 682 | out: |
686 | return error; | 683 | return error; |
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 4ae2e525d68b..56ff55211341 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -105,7 +105,7 @@ startup_continue: | |||
105 | wr %g2, 0, %tick_cmpr | 105 | wr %g2, 0, %tick_cmpr |
106 | 106 | ||
107 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. | 107 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. |
108 | * We lock 2 consequetive entries if we are 'bigkernel'. | 108 | * We lock 'num_kernel_image_mappings' consequetive entries. |
109 | */ | 109 | */ |
110 | sethi %hi(prom_entry_lock), %g2 | 110 | sethi %hi(prom_entry_lock), %g2 |
111 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 | 111 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 |
@@ -119,6 +119,29 @@ startup_continue: | |||
119 | add %l2, -(192 + 128), %sp | 119 | add %l2, -(192 + 128), %sp |
120 | flushw | 120 | flushw |
121 | 121 | ||
122 | /* Setup the loop variables: | ||
123 | * %l3: VADDR base | ||
124 | * %l4: TTE base | ||
125 | * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' | ||
126 | * %l6: Number of TTE entries to map | ||
127 | * %l7: Highest TTE entry number, we count down | ||
128 | */ | ||
129 | sethi %hi(KERNBASE), %l3 | ||
130 | sethi %hi(kern_locked_tte_data), %l4 | ||
131 | ldx [%l4 + %lo(kern_locked_tte_data)], %l4 | ||
132 | clr %l5 | ||
133 | sethi %hi(num_kernel_image_mappings), %l6 | ||
134 | lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 | ||
135 | add %l6, 1, %l6 | ||
136 | |||
137 | mov 15, %l7 | ||
138 | BRANCH_IF_ANY_CHEETAH(g1,g5,2f) | ||
139 | |||
140 | mov 63, %l7 | ||
141 | 2: | ||
142 | |||
143 | 3: | ||
144 | /* Lock into I-MMU */ | ||
122 | sethi %hi(call_method), %g2 | 145 | sethi %hi(call_method), %g2 |
123 | or %g2, %lo(call_method), %g2 | 146 | or %g2, %lo(call_method), %g2 |
124 | stx %g2, [%sp + 2047 + 128 + 0x00] | 147 | stx %g2, [%sp + 2047 + 128 + 0x00] |
@@ -132,63 +155,26 @@ startup_continue: | |||
132 | sethi %hi(prom_mmu_ihandle_cache), %g2 | 155 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
133 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 | 156 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
134 | stx %g2, [%sp + 2047 + 128 + 0x20] | 157 | stx %g2, [%sp + 2047 + 128 + 0x20] |
135 | sethi %hi(KERNBASE), %g2 | ||
136 | stx %g2, [%sp + 2047 + 128 + 0x28] | ||
137 | sethi %hi(kern_locked_tte_data), %g2 | ||
138 | ldx [%g2 + %lo(kern_locked_tte_data)], %g2 | ||
139 | stx %g2, [%sp + 2047 + 128 + 0x30] | ||
140 | |||
141 | mov 15, %g2 | ||
142 | BRANCH_IF_ANY_CHEETAH(g1,g5,1f) | ||
143 | 158 | ||
144 | mov 63, %g2 | 159 | /* Each TTE maps 4MB, convert index to offset. */ |
145 | 1: | 160 | sllx %l5, 22, %g1 |
146 | stx %g2, [%sp + 2047 + 128 + 0x38] | ||
147 | sethi %hi(p1275buf), %g2 | ||
148 | or %g2, %lo(p1275buf), %g2 | ||
149 | ldx [%g2 + 0x08], %o1 | ||
150 | call %o1 | ||
151 | add %sp, (2047 + 128), %o0 | ||
152 | 161 | ||
153 | sethi %hi(bigkernel), %g2 | 162 | add %l3, %g1, %g2 |
154 | lduw [%g2 + %lo(bigkernel)], %g2 | 163 | stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR |
155 | brz,pt %g2, do_dtlb | 164 | add %l4, %g1, %g2 |
156 | nop | 165 | stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE |
157 | 166 | ||
158 | sethi %hi(call_method), %g2 | 167 | /* TTE index is highest minus loop index. */ |
159 | or %g2, %lo(call_method), %g2 | 168 | sub %l7, %l5, %g2 |
160 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
161 | mov 5, %g2 | ||
162 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
163 | mov 1, %g2 | ||
164 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
165 | sethi %hi(itlb_load), %g2 | ||
166 | or %g2, %lo(itlb_load), %g2 | ||
167 | stx %g2, [%sp + 2047 + 128 + 0x18] | ||
168 | sethi %hi(prom_mmu_ihandle_cache), %g2 | ||
169 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 | ||
170 | stx %g2, [%sp + 2047 + 128 + 0x20] | ||
171 | sethi %hi(KERNBASE + 0x400000), %g2 | ||
172 | stx %g2, [%sp + 2047 + 128 + 0x28] | ||
173 | sethi %hi(kern_locked_tte_data), %g2 | ||
174 | ldx [%g2 + %lo(kern_locked_tte_data)], %g2 | ||
175 | sethi %hi(0x400000), %g1 | ||
176 | add %g2, %g1, %g2 | ||
177 | stx %g2, [%sp + 2047 + 128 + 0x30] | ||
178 | |||
179 | mov 14, %g2 | ||
180 | BRANCH_IF_ANY_CHEETAH(g1,g5,1f) | ||
181 | |||
182 | mov 62, %g2 | ||
183 | 1: | ||
184 | stx %g2, [%sp + 2047 + 128 + 0x38] | 169 | stx %g2, [%sp + 2047 + 128 + 0x38] |
170 | |||
185 | sethi %hi(p1275buf), %g2 | 171 | sethi %hi(p1275buf), %g2 |
186 | or %g2, %lo(p1275buf), %g2 | 172 | or %g2, %lo(p1275buf), %g2 |
187 | ldx [%g2 + 0x08], %o1 | 173 | ldx [%g2 + 0x08], %o1 |
188 | call %o1 | 174 | call %o1 |
189 | add %sp, (2047 + 128), %o0 | 175 | add %sp, (2047 + 128), %o0 |
190 | 176 | ||
191 | do_dtlb: | 177 | /* Lock into D-MMU */ |
192 | sethi %hi(call_method), %g2 | 178 | sethi %hi(call_method), %g2 |
193 | or %g2, %lo(call_method), %g2 | 179 | or %g2, %lo(call_method), %g2 |
194 | stx %g2, [%sp + 2047 + 128 + 0x00] | 180 | stx %g2, [%sp + 2047 + 128 + 0x00] |
@@ -202,65 +188,30 @@ do_dtlb: | |||
202 | sethi %hi(prom_mmu_ihandle_cache), %g2 | 188 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
203 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 | 189 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
204 | stx %g2, [%sp + 2047 + 128 + 0x20] | 190 | stx %g2, [%sp + 2047 + 128 + 0x20] |
205 | sethi %hi(KERNBASE), %g2 | ||
206 | stx %g2, [%sp + 2047 + 128 + 0x28] | ||
207 | sethi %hi(kern_locked_tte_data), %g2 | ||
208 | ldx [%g2 + %lo(kern_locked_tte_data)], %g2 | ||
209 | stx %g2, [%sp + 2047 + 128 + 0x30] | ||
210 | 191 | ||
211 | mov 15, %g2 | 192 | /* Each TTE maps 4MB, convert index to offset. */ |
212 | BRANCH_IF_ANY_CHEETAH(g1,g5,1f) | 193 | sllx %l5, 22, %g1 |
213 | 194 | ||
214 | mov 63, %g2 | 195 | add %l3, %g1, %g2 |
215 | 1: | 196 | stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR |
197 | add %l4, %g1, %g2 | ||
198 | stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE | ||
216 | 199 | ||
200 | /* TTE index is highest minus loop index. */ | ||
201 | sub %l7, %l5, %g2 | ||
217 | stx %g2, [%sp + 2047 + 128 + 0x38] | 202 | stx %g2, [%sp + 2047 + 128 + 0x38] |
203 | |||
218 | sethi %hi(p1275buf), %g2 | 204 | sethi %hi(p1275buf), %g2 |
219 | or %g2, %lo(p1275buf), %g2 | 205 | or %g2, %lo(p1275buf), %g2 |
220 | ldx [%g2 + 0x08], %o1 | 206 | ldx [%g2 + 0x08], %o1 |
221 | call %o1 | 207 | call %o1 |
222 | add %sp, (2047 + 128), %o0 | 208 | add %sp, (2047 + 128), %o0 |
223 | 209 | ||
224 | sethi %hi(bigkernel), %g2 | 210 | add %l5, 1, %l5 |
225 | lduw [%g2 + %lo(bigkernel)], %g2 | 211 | cmp %l5, %l6 |
226 | brz,pt %g2, do_unlock | 212 | bne,pt %xcc, 3b |
227 | nop | 213 | nop |
228 | 214 | ||
229 | sethi %hi(call_method), %g2 | ||
230 | or %g2, %lo(call_method), %g2 | ||
231 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
232 | mov 5, %g2 | ||
233 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
234 | mov 1, %g2 | ||
235 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
236 | sethi %hi(dtlb_load), %g2 | ||
237 | or %g2, %lo(dtlb_load), %g2 | ||
238 | stx %g2, [%sp + 2047 + 128 + 0x18] | ||
239 | sethi %hi(prom_mmu_ihandle_cache), %g2 | ||
240 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 | ||
241 | stx %g2, [%sp + 2047 + 128 + 0x20] | ||
242 | sethi %hi(KERNBASE + 0x400000), %g2 | ||
243 | stx %g2, [%sp + 2047 + 128 + 0x28] | ||
244 | sethi %hi(kern_locked_tte_data), %g2 | ||
245 | ldx [%g2 + %lo(kern_locked_tte_data)], %g2 | ||
246 | sethi %hi(0x400000), %g1 | ||
247 | add %g2, %g1, %g2 | ||
248 | stx %g2, [%sp + 2047 + 128 + 0x30] | ||
249 | |||
250 | mov 14, %g2 | ||
251 | BRANCH_IF_ANY_CHEETAH(g1,g5,1f) | ||
252 | |||
253 | mov 62, %g2 | ||
254 | 1: | ||
255 | |||
256 | stx %g2, [%sp + 2047 + 128 + 0x38] | ||
257 | sethi %hi(p1275buf), %g2 | ||
258 | or %g2, %lo(p1275buf), %g2 | ||
259 | ldx [%g2 + 0x08], %o1 | ||
260 | call %o1 | ||
261 | add %sp, (2047 + 128), %o0 | ||
262 | |||
263 | do_unlock: | ||
264 | sethi %hi(prom_entry_lock), %g2 | 215 | sethi %hi(prom_entry_lock), %g2 |
265 | stb %g0, [%g2 + %lo(prom_entry_lock)] | 216 | stb %g0, [%g2 + %lo(prom_entry_lock)] |
266 | membar #StoreStore | #StoreLoad | 217 | membar #StoreStore | #StoreLoad |
@@ -269,47 +220,36 @@ do_unlock: | |||
269 | nop | 220 | nop |
270 | 221 | ||
271 | niagara_lock_tlb: | 222 | niagara_lock_tlb: |
223 | sethi %hi(KERNBASE), %l3 | ||
224 | sethi %hi(kern_locked_tte_data), %l4 | ||
225 | ldx [%l4 + %lo(kern_locked_tte_data)], %l4 | ||
226 | clr %l5 | ||
227 | sethi %hi(num_kernel_image_mappings), %l6 | ||
228 | lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 | ||
229 | add %l6, 1, %l6 | ||
230 | |||
231 | 1: | ||
272 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | 232 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 |
273 | sethi %hi(KERNBASE), %o0 | 233 | sllx %l5, 22, %g2 |
234 | add %l3, %g2, %o0 | ||
274 | clr %o1 | 235 | clr %o1 |
275 | sethi %hi(kern_locked_tte_data), %o2 | 236 | add %l4, %g2, %o2 |
276 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
277 | mov HV_MMU_IMMU, %o3 | 237 | mov HV_MMU_IMMU, %o3 |
278 | ta HV_FAST_TRAP | 238 | ta HV_FAST_TRAP |
279 | 239 | ||
280 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | 240 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 |
281 | sethi %hi(KERNBASE), %o0 | 241 | sllx %l5, 22, %g2 |
242 | add %l3, %g2, %o0 | ||
282 | clr %o1 | 243 | clr %o1 |
283 | sethi %hi(kern_locked_tte_data), %o2 | 244 | add %l4, %g2, %o2 |
284 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
285 | mov HV_MMU_DMMU, %o3 | 245 | mov HV_MMU_DMMU, %o3 |
286 | ta HV_FAST_TRAP | 246 | ta HV_FAST_TRAP |
287 | 247 | ||
288 | sethi %hi(bigkernel), %g2 | 248 | add %l5, 1, %l5 |
289 | lduw [%g2 + %lo(bigkernel)], %g2 | 249 | cmp %l5, %l6 |
290 | brz,pt %g2, after_lock_tlb | 250 | bne,pt %xcc, 1b |
291 | nop | 251 | nop |
292 | 252 | ||
293 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
294 | sethi %hi(KERNBASE + 0x400000), %o0 | ||
295 | clr %o1 | ||
296 | sethi %hi(kern_locked_tte_data), %o2 | ||
297 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
298 | sethi %hi(0x400000), %o3 | ||
299 | add %o2, %o3, %o2 | ||
300 | mov HV_MMU_IMMU, %o3 | ||
301 | ta HV_FAST_TRAP | ||
302 | |||
303 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
304 | sethi %hi(KERNBASE + 0x400000), %o0 | ||
305 | clr %o1 | ||
306 | sethi %hi(kern_locked_tte_data), %o2 | ||
307 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
308 | sethi %hi(0x400000), %o3 | ||
309 | add %o2, %o3, %o2 | ||
310 | mov HV_MMU_DMMU, %o3 | ||
311 | ta HV_FAST_TRAP | ||
312 | |||
313 | after_lock_tlb: | 253 | after_lock_tlb: |
314 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate | 254 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate |
315 | wr %g0, 0, %fprs | 255 | wr %g0, 0, %fprs |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5c30416fdac..466fd6cffac9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -166,7 +166,7 @@ unsigned long sparc64_kern_pri_context __read_mostly; | |||
166 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | 166 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; |
167 | unsigned long sparc64_kern_sec_context __read_mostly; | 167 | unsigned long sparc64_kern_sec_context __read_mostly; |
168 | 168 | ||
169 | int bigkernel = 0; | 169 | int num_kernel_image_mappings; |
170 | 170 | ||
171 | #ifdef CONFIG_DEBUG_DCFLUSH | 171 | #ifdef CONFIG_DEBUG_DCFLUSH |
172 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | 172 | atomic_t dcpage_flushes = ATOMIC_INIT(0); |
@@ -572,7 +572,7 @@ static unsigned long kern_large_tte(unsigned long paddr); | |||
572 | static void __init remap_kernel(void) | 572 | static void __init remap_kernel(void) |
573 | { | 573 | { |
574 | unsigned long phys_page, tte_vaddr, tte_data; | 574 | unsigned long phys_page, tte_vaddr, tte_data; |
575 | int tlb_ent = sparc64_highest_locked_tlbent(); | 575 | int i, tlb_ent = sparc64_highest_locked_tlbent(); |
576 | 576 | ||
577 | tte_vaddr = (unsigned long) KERNBASE; | 577 | tte_vaddr = (unsigned long) KERNBASE; |
578 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 578 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
@@ -582,27 +582,20 @@ static void __init remap_kernel(void) | |||
582 | 582 | ||
583 | /* Now lock us into the TLBs via Hypervisor or OBP. */ | 583 | /* Now lock us into the TLBs via Hypervisor or OBP. */ |
584 | if (tlb_type == hypervisor) { | 584 | if (tlb_type == hypervisor) { |
585 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); | 585 | for (i = 0; i < num_kernel_image_mappings; i++) { |
586 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | ||
587 | if (bigkernel) { | ||
588 | tte_vaddr += 0x400000; | ||
589 | tte_data += 0x400000; | ||
590 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); | 586 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
591 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | 587 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); |
588 | tte_vaddr += 0x400000; | ||
589 | tte_data += 0x400000; | ||
592 | } | 590 | } |
593 | } else { | 591 | } else { |
594 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | 592 | for (i = 0; i < num_kernel_image_mappings; i++) { |
595 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | 593 | prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); |
596 | if (bigkernel) { | 594 | prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); |
597 | tlb_ent -= 1; | 595 | tte_vaddr += 0x400000; |
598 | prom_dtlb_load(tlb_ent, | 596 | tte_data += 0x400000; |
599 | tte_data + 0x400000, | ||
600 | tte_vaddr + 0x400000); | ||
601 | prom_itlb_load(tlb_ent, | ||
602 | tte_data + 0x400000, | ||
603 | tte_vaddr + 0x400000); | ||
604 | } | 597 | } |
605 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | 598 | sparc64_highest_unlocked_tlb_ent = tlb_ent - i; |
606 | } | 599 | } |
607 | if (tlb_type == cheetah_plus) { | 600 | if (tlb_type == cheetah_plus) { |
608 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | 601 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | |
@@ -1352,12 +1345,9 @@ void __init paging_init(void) | |||
1352 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | 1345 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
1353 | 1346 | ||
1354 | real_end = (unsigned long)_end; | 1347 | real_end = (unsigned long)_end; |
1355 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) | 1348 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); |
1356 | bigkernel = 1; | 1349 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", |
1357 | if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { | 1350 | num_kernel_image_mappings); |
1358 | prom_printf("paging_init: Kernel > 8MB, too large.\n"); | ||
1359 | prom_halt(); | ||
1360 | } | ||
1361 | 1351 | ||
1362 | /* Set kernel pgd to upper alias so physical page computations | 1352 | /* Set kernel pgd to upper alias so physical page computations |
1363 | * work. | 1353 | * work. |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 8fe576baa148..4afaba0ed722 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -106,7 +106,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size, | |||
106 | * have to convert them into an offset in a page-aligned mapping, but the | 106 | * have to convert them into an offset in a page-aligned mapping, but the |
107 | * caller shouldn't need to know that small detail. | 107 | * caller shouldn't need to know that small detail. |
108 | */ | 108 | */ |
109 | static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | 109 | static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, |
110 | enum ioremap_mode mode) | 110 | enum ioremap_mode mode) |
111 | { | 111 | { |
112 | unsigned long pfn, offset, last_addr, vaddr; | 112 | unsigned long pfn, offset, last_addr, vaddr; |
@@ -193,13 +193,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
193 | * | 193 | * |
194 | * Must be freed with iounmap. | 194 | * Must be freed with iounmap. |
195 | */ | 195 | */ |
196 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) | 196 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
197 | { | 197 | { |
198 | return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); | 198 | return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); |
199 | } | 199 | } |
200 | EXPORT_SYMBOL(ioremap_nocache); | 200 | EXPORT_SYMBOL(ioremap_nocache); |
201 | 201 | ||
202 | void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) | 202 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
203 | { | 203 | { |
204 | return __ioremap(phys_addr, size, IOR_MODE_CACHED); | 204 | return __ioremap(phys_addr, size, IOR_MODE_CACHED); |
205 | } | 205 | } |
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 5732ca3259f9..b6fe7e7a2c2f 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -146,7 +146,7 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) | |||
146 | 146 | ||
147 | dev->nls = nls; | 147 | dev->nls = nls; |
148 | 148 | ||
149 | dev->cn_queue = create_workqueue(dev->name); | 149 | dev->cn_queue = create_singlethread_workqueue(dev->name); |
150 | if (!dev->cn_queue) { | 150 | if (!dev->cn_queue) { |
151 | kfree(dev); | 151 | kfree(dev); |
152 | return NULL; | 152 | return NULL; |
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c index 8af142ccf373..de32b3fba322 100644 --- a/drivers/net/bnx2x.c +++ b/drivers/net/bnx2x.c | |||
@@ -63,8 +63,8 @@ | |||
63 | #include "bnx2x.h" | 63 | #include "bnx2x.h" |
64 | #include "bnx2x_init.h" | 64 | #include "bnx2x_init.h" |
65 | 65 | ||
66 | #define DRV_MODULE_VERSION "1.40.22" | 66 | #define DRV_MODULE_VERSION "1.42.3" |
67 | #define DRV_MODULE_RELDATE "2007/11/27" | 67 | #define DRV_MODULE_RELDATE "2008/3/9" |
68 | #define BNX2X_BC_VER 0x040200 | 68 | #define BNX2X_BC_VER 0x040200 |
69 | 69 | ||
70 | /* Time in jiffies before concluding the transmitter is hung. */ | 70 | /* Time in jiffies before concluding the transmitter is hung. */ |
@@ -8008,38 +8008,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
8008 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, | 8008 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, |
8009 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | 8009 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); |
8010 | 8010 | ||
8011 | switch (cmd->port) { | ||
8012 | case PORT_TP: | ||
8013 | if (!(bp->supported & SUPPORTED_TP)) { | ||
8014 | DP(NETIF_MSG_LINK, "TP not supported\n"); | ||
8015 | return -EINVAL; | ||
8016 | } | ||
8017 | |||
8018 | if (bp->phy_flags & PHY_XGXS_FLAG) { | ||
8019 | bnx2x_link_reset(bp); | ||
8020 | bnx2x_link_settings_supported(bp, SWITCH_CFG_1G); | ||
8021 | bnx2x_phy_deassert(bp); | ||
8022 | } | ||
8023 | break; | ||
8024 | |||
8025 | case PORT_FIBRE: | ||
8026 | if (!(bp->supported & SUPPORTED_FIBRE)) { | ||
8027 | DP(NETIF_MSG_LINK, "FIBRE not supported\n"); | ||
8028 | return -EINVAL; | ||
8029 | } | ||
8030 | |||
8031 | if (!(bp->phy_flags & PHY_XGXS_FLAG)) { | ||
8032 | bnx2x_link_reset(bp); | ||
8033 | bnx2x_link_settings_supported(bp, SWITCH_CFG_10G); | ||
8034 | bnx2x_phy_deassert(bp); | ||
8035 | } | ||
8036 | break; | ||
8037 | |||
8038 | default: | ||
8039 | DP(NETIF_MSG_LINK, "Unknown port type\n"); | ||
8040 | return -EINVAL; | ||
8041 | } | ||
8042 | |||
8043 | if (cmd->autoneg == AUTONEG_ENABLE) { | 8011 | if (cmd->autoneg == AUTONEG_ENABLE) { |
8044 | if (!(bp->supported & SUPPORTED_Autoneg)) { | 8012 | if (!(bp->supported & SUPPORTED_Autoneg)) { |
8045 | DP(NETIF_MSG_LINK, "Aotoneg not supported\n"); | 8013 | DP(NETIF_MSG_LINK, "Aotoneg not supported\n"); |
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 1837584c4504..6a3ac4ea97e9 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -109,7 +109,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i | |||
109 | int irq = irq_of_parse_and_map(child, 0); | 109 | int irq = irq_of_parse_and_map(child, 0); |
110 | if (irq != NO_IRQ) { | 110 | if (irq != NO_IRQ) { |
111 | const u32 *id = of_get_property(child, "reg", NULL); | 111 | const u32 *id = of_get_property(child, "reg", NULL); |
112 | bus->irq[*id] = irq; | 112 | if (id) |
113 | bus->irq[*id] = irq; | ||
113 | } | 114 | } |
114 | } | 115 | } |
115 | 116 | ||
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 97212799c513..4291458955ef 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -912,7 +912,7 @@ static int gem_poll(struct napi_struct *napi, int budget) | |||
912 | * rx ring - must call napi_disable(), which | 912 | * rx ring - must call napi_disable(), which |
913 | * schedule_timeout()'s if polling is already disabled. | 913 | * schedule_timeout()'s if polling is already disabled. |
914 | */ | 914 | */ |
915 | work_done += gem_rx(gp, budget); | 915 | work_done += gem_rx(gp, budget - work_done); |
916 | 916 | ||
917 | if (work_done >= budget) | 917 | if (work_done >= budget) |
918 | return work_done; | 918 | return work_done; |
diff --git a/include/asm-sparc64/hvtramp.h b/include/asm-sparc64/hvtramp.h index c7dd6ad056df..b2b9b947b3a4 100644 --- a/include/asm-sparc64/hvtramp.h +++ b/include/asm-sparc64/hvtramp.h | |||
@@ -16,7 +16,7 @@ struct hvtramp_descr { | |||
16 | __u64 fault_info_va; | 16 | __u64 fault_info_va; |
17 | __u64 fault_info_pa; | 17 | __u64 fault_info_pa; |
18 | __u64 thread_reg; | 18 | __u64 thread_reg; |
19 | struct hvtramp_mapping maps[2]; | 19 | struct hvtramp_mapping maps[1]; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | extern void hv_cpu_startup(unsigned long hvdescr_pa); | 22 | extern void hv_cpu_startup(unsigned long hvdescr_pa); |
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index 63b7040e8134..985ea7e31992 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h | |||
@@ -63,6 +63,8 @@ extern void cheetah_enable_pcache(void); | |||
63 | SPITFIRE_HIGHEST_LOCKED_TLBENT : \ | 63 | SPITFIRE_HIGHEST_LOCKED_TLBENT : \ |
64 | CHEETAH_HIGHEST_LOCKED_TLBENT) | 64 | CHEETAH_HIGHEST_LOCKED_TLBENT) |
65 | 65 | ||
66 | extern int num_kernel_image_mappings; | ||
67 | |||
66 | /* The data cache is write through, so this just invalidates the | 68 | /* The data cache is write through, so this just invalidates the |
67 | * specified line. | 69 | * specified line. |
68 | */ | 70 | */ |
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index 58d2c45cd0b1..d4d8fbd9378c 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h | |||
@@ -114,13 +114,13 @@ static inline void * phys_to_virt(unsigned long address) | |||
114 | * If the area you are trying to map is a PCI BAR you should have a | 114 | * If the area you are trying to map is a PCI BAR you should have a |
115 | * look at pci_iomap(). | 115 | * look at pci_iomap(). |
116 | */ | 116 | */ |
117 | extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); | 117 | extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); |
118 | extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); | 118 | extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * The default ioremap() behavior is non-cached: | 121 | * The default ioremap() behavior is non-cached: |
122 | */ | 122 | */ |
123 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | 123 | static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) |
124 | { | 124 | { |
125 | return ioremap_nocache(offset, size); | 125 | return ioremap_nocache(offset, size); |
126 | } | 126 | } |
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index f64a59cc396d..db0be2011a3c 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h | |||
@@ -158,13 +158,13 @@ extern void early_iounmap(void *addr, unsigned long size); | |||
158 | * it's useful if some control registers are in such an area and write combining | 158 | * it's useful if some control registers are in such an area and write combining |
159 | * or read caching is not desirable: | 159 | * or read caching is not desirable: |
160 | */ | 160 | */ |
161 | extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); | 161 | extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); |
162 | extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); | 162 | extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); |
163 | 163 | ||
164 | /* | 164 | /* |
165 | * The default ioremap() behavior is non-cached: | 165 | * The default ioremap() behavior is non-cached: |
166 | */ | 166 | */ |
167 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | 167 | static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) |
168 | { | 168 | { |
169 | return ioremap_nocache(offset, size); | 169 | return ioremap_nocache(offset, size); |
170 | } | 170 | } |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 57ed3e323d97..ea806732b084 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -389,7 +389,7 @@ void sctp_v6_del_protocol(void); | |||
389 | 389 | ||
390 | #else /* #ifdef defined(CONFIG_IPV6) */ | 390 | #else /* #ifdef defined(CONFIG_IPV6) */ |
391 | 391 | ||
392 | static inline void sctp_v6_pf_init(void) { return 0; } | 392 | static inline void sctp_v6_pf_init(void) { return; } |
393 | static inline void sctp_v6_pf_exit(void) { return; } | 393 | static inline void sctp_v6_pf_exit(void) { return; } |
394 | static inline int sctp_v6_protosw_init(void) { return 0; } | 394 | static inline int sctp_v6_protosw_init(void) { return 0; } |
395 | static inline void sctp_v6_protosw_exit(void) { return; } | 395 | static inline void sctp_v6_protosw_exit(void) { return; } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 671af612b768..a3fa587c350c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -191,8 +191,12 @@ static void change_clocksource(void) | |||
191 | 191 | ||
192 | tick_clock_notify(); | 192 | tick_clock_notify(); |
193 | 193 | ||
194 | /* | ||
195 | * We're holding xtime lock and waking up klogd would deadlock | ||
196 | * us on enqueue. So no printing! | ||
194 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | 197 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
195 | clock->name); | 198 | clock->name); |
199 | */ | ||
196 | } | 200 | } |
197 | #else | 201 | #else |
198 | static inline void change_clocksource(void) { } | 202 | static inline void change_clocksource(void) { } |
diff --git a/lib/iomap.c b/lib/iomap.c index db004a9ff509..dd6ca48fe6b0 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioport_unmap); | |||
256 | * */ | 256 | * */ |
257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
258 | { | 258 | { |
259 | unsigned long start = pci_resource_start(dev, bar); | 259 | resource_size_t start = pci_resource_start(dev, bar); |
260 | unsigned long len = pci_resource_len(dev, bar); | 260 | unsigned long len = pci_resource_len(dev, bar); |
261 | unsigned long flags = pci_resource_flags(dev, bar); | 261 | unsigned long flags = pci_resource_flags(dev, bar); |
262 | 262 | ||
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 1aa9d5175398..4e8d4e724b96 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -861,7 +861,6 @@ static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) | |||
861 | 861 | ||
862 | static void p9_mux_flush_cb(struct p9_req *freq, void *a) | 862 | static void p9_mux_flush_cb(struct p9_req *freq, void *a) |
863 | { | 863 | { |
864 | p9_conn_req_callback cb; | ||
865 | int tag; | 864 | int tag; |
866 | struct p9_conn *m; | 865 | struct p9_conn *m; |
867 | struct p9_req *req, *rreq, *rptr; | 866 | struct p9_req *req, *rreq, *rptr; |
@@ -872,7 +871,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a) | |||
872 | freq->tcall->params.tflush.oldtag); | 871 | freq->tcall->params.tflush.oldtag); |
873 | 872 | ||
874 | spin_lock(&m->lock); | 873 | spin_lock(&m->lock); |
875 | cb = NULL; | ||
876 | tag = freq->tcall->params.tflush.oldtag; | 874 | tag = freq->tcall->params.tflush.oldtag; |
877 | req = NULL; | 875 | req = NULL; |
878 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | 876 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
diff --git a/net/atm/clip.c b/net/atm/clip.c index d30167c0b48e..2ab1e36098fd 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -947,6 +947,8 @@ static const struct file_operations arp_seq_fops = { | |||
947 | }; | 947 | }; |
948 | #endif | 948 | #endif |
949 | 949 | ||
950 | static void atm_clip_exit_noproc(void); | ||
951 | |||
950 | static int __init atm_clip_init(void) | 952 | static int __init atm_clip_init(void) |
951 | { | 953 | { |
952 | neigh_table_init_no_netlink(&clip_tbl); | 954 | neigh_table_init_no_netlink(&clip_tbl); |
@@ -963,18 +965,22 @@ static int __init atm_clip_init(void) | |||
963 | struct proc_dir_entry *p; | 965 | struct proc_dir_entry *p; |
964 | 966 | ||
965 | p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); | 967 | p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); |
968 | if (!p) { | ||
969 | printk(KERN_ERR "Unable to initialize " | ||
970 | "/proc/net/atm/arp\n"); | ||
971 | atm_clip_exit_noproc(); | ||
972 | return -ENOMEM; | ||
973 | } | ||
966 | } | 974 | } |
967 | #endif | 975 | #endif |
968 | 976 | ||
969 | return 0; | 977 | return 0; |
970 | } | 978 | } |
971 | 979 | ||
972 | static void __exit atm_clip_exit(void) | 980 | static void atm_clip_exit_noproc(void) |
973 | { | 981 | { |
974 | struct net_device *dev, *next; | 982 | struct net_device *dev, *next; |
975 | 983 | ||
976 | remove_proc_entry("arp", atm_proc_root); | ||
977 | |||
978 | unregister_inetaddr_notifier(&clip_inet_notifier); | 984 | unregister_inetaddr_notifier(&clip_inet_notifier); |
979 | unregister_netdevice_notifier(&clip_dev_notifier); | 985 | unregister_netdevice_notifier(&clip_dev_notifier); |
980 | 986 | ||
@@ -1005,6 +1011,13 @@ static void __exit atm_clip_exit(void) | |||
1005 | clip_tbl_hook = NULL; | 1011 | clip_tbl_hook = NULL; |
1006 | } | 1012 | } |
1007 | 1013 | ||
1014 | static void __exit atm_clip_exit(void) | ||
1015 | { | ||
1016 | remove_proc_entry("arp", atm_proc_root); | ||
1017 | |||
1018 | atm_clip_exit_noproc(); | ||
1019 | } | ||
1020 | |||
1008 | module_init(atm_clip_init); | 1021 | module_init(atm_clip_init); |
1009 | module_exit(atm_clip_exit); | 1022 | module_exit(atm_clip_exit); |
1010 | MODULE_AUTHOR("Werner Almesberger"); | 1023 | MODULE_AUTHOR("Werner Almesberger"); |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 0e450d12f035..a2efa7ff41f1 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1250,6 +1250,10 @@ static int __init lane_module_init(void) | |||
1250 | struct proc_dir_entry *p; | 1250 | struct proc_dir_entry *p; |
1251 | 1251 | ||
1252 | p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); | 1252 | p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); |
1253 | if (!p) { | ||
1254 | printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n"); | ||
1255 | return -ENOMEM; | ||
1256 | } | ||
1253 | #endif | 1257 | #endif |
1254 | 1258 | ||
1255 | register_atm_ioctl(&lane_ioctl_ops); | 1259 | register_atm_ioctl(&lane_ioctl_ops); |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1ff446d0fa8b..f6cdc012eec5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -177,10 +177,13 @@ static inline struct tnode *node_parent_rcu(struct node *node) | |||
177 | return rcu_dereference(ret); | 177 | return rcu_dereference(ret); |
178 | } | 178 | } |
179 | 179 | ||
180 | /* Same as rcu_assign_pointer | ||
181 | * but that macro() assumes that value is a pointer. | ||
182 | */ | ||
180 | static inline void node_set_parent(struct node *node, struct tnode *ptr) | 183 | static inline void node_set_parent(struct node *node, struct tnode *ptr) |
181 | { | 184 | { |
182 | rcu_assign_pointer(node->parent, | 185 | smp_wmb(); |
183 | (unsigned long)ptr | NODE_TYPE(node)); | 186 | node->parent = (unsigned long)ptr | NODE_TYPE(node); |
184 | } | 187 | } |
185 | 188 | ||
186 | static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i) | 189 | static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i) |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a2e92f9709db..3b2e5adca838 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -568,7 +568,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
568 | 568 | ||
569 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | 569 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); |
570 | 570 | ||
571 | net = skb->dev->nd_net; | 571 | net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net; |
572 | /* Start by cleaning up the memory. */ | 572 | /* Start by cleaning up the memory. */ |
573 | if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) | 573 | if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) |
574 | ip_evictor(net); | 574 | ip_evictor(net); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 071e83a894ad..39b629ac2404 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -735,7 +735,7 @@ new_segment: | |||
735 | if (!(psize -= copy)) | 735 | if (!(psize -= copy)) |
736 | goto out; | 736 | goto out; |
737 | 737 | ||
738 | if (skb->len < mss_now || (flags & MSG_OOB)) | 738 | if (skb->len < size_goal || (flags & MSG_OOB)) |
739 | continue; | 739 | continue; |
740 | 740 | ||
741 | if (forced_push(tp)) { | 741 | if (forced_push(tp)) { |
@@ -981,7 +981,7 @@ new_segment: | |||
981 | if ((seglen -= copy) == 0 && iovlen == 0) | 981 | if ((seglen -= copy) == 0 && iovlen == 0) |
982 | goto out; | 982 | goto out; |
983 | 983 | ||
984 | if (skb->len < mss_now || (flags & MSG_OOB)) | 984 | if (skb->len < size_goal || (flags & MSG_OOB)) |
985 | continue; | 985 | continue; |
986 | 986 | ||
987 | if (forced_push(tp)) { | 987 | if (forced_push(tp)) { |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0d33a7d32125..51557c27a0cd 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1420,7 +1420,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1420 | u8 *opt; | 1420 | u8 *opt; |
1421 | int rd_len; | 1421 | int rd_len; |
1422 | int err; | 1422 | int err; |
1423 | int hlen; | ||
1424 | u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; | 1423 | u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; |
1425 | 1424 | ||
1426 | dev = skb->dev; | 1425 | dev = skb->dev; |
@@ -1491,7 +1490,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1491 | return; | 1490 | return; |
1492 | } | 1491 | } |
1493 | 1492 | ||
1494 | hlen = 0; | ||
1495 | 1493 | ||
1496 | skb_reserve(buff, LL_RESERVED_SPACE(dev)); | 1494 | skb_reserve(buff, LL_RESERVED_SPACE(dev)); |
1497 | ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, | 1495 | ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 795c761ad99f..66148cc4759e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -711,9 +711,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
711 | */ | 711 | */ |
712 | static psched_time_t htb_do_events(struct htb_sched *q, int level) | 712 | static psched_time_t htb_do_events(struct htb_sched *q, int level) |
713 | { | 713 | { |
714 | int i; | 714 | /* don't run for longer than 2 jiffies; 2 is used instead of |
715 | 715 | 1 to simplify things when jiffy is going to be incremented | |
716 | for (i = 0; i < 500; i++) { | 716 | too soon */ |
717 | unsigned long stop_at = jiffies + 2; | ||
718 | while (time_before(jiffies, stop_at)) { | ||
717 | struct htb_class *cl; | 719 | struct htb_class *cl; |
718 | long diff; | 720 | long diff; |
719 | struct rb_node *p = rb_first(&q->wait_pq[level]); | 721 | struct rb_node *p = rb_first(&q->wait_pq[level]); |
@@ -731,9 +733,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level) | |||
731 | if (cl->cmode != HTB_CAN_SEND) | 733 | if (cl->cmode != HTB_CAN_SEND) |
732 | htb_add_to_wait_tree(q, cl, diff); | 734 | htb_add_to_wait_tree(q, cl, diff); |
733 | } | 735 | } |
734 | if (net_ratelimit()) | 736 | /* too much load - let's continue on next jiffie */ |
735 | printk(KERN_WARNING "htb: too many events !\n"); | 737 | return q->now + PSCHED_TICKS_PER_SEC / HZ; |
736 | return q->now + PSCHED_TICKS_PER_SEC / 10; | ||
737 | } | 738 | } |
738 | 739 | ||
739 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 740 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
diff --git a/net/socket.c b/net/socket.c index b6d35cd72a50..9d3fbfbc8535 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -909,11 +909,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
909 | if (!dlci_ioctl_hook) | 909 | if (!dlci_ioctl_hook) |
910 | request_module("dlci"); | 910 | request_module("dlci"); |
911 | 911 | ||
912 | if (dlci_ioctl_hook) { | 912 | mutex_lock(&dlci_ioctl_mutex); |
913 | mutex_lock(&dlci_ioctl_mutex); | 913 | if (dlci_ioctl_hook) |
914 | err = dlci_ioctl_hook(cmd, argp); | 914 | err = dlci_ioctl_hook(cmd, argp); |
915 | mutex_unlock(&dlci_ioctl_mutex); | 915 | mutex_unlock(&dlci_ioctl_mutex); |
916 | } | ||
917 | break; | 916 | break; |
918 | default: | 917 | default: |
919 | err = sock->ops->ioctl(sock, cmd, arg); | 918 | err = sock->ops->ioctl(sock, cmd, arg); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index ab54a736486e..971271602dd0 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -237,14 +237,12 @@ static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt, | |||
237 | 237 | ||
238 | static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) | 238 | static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) |
239 | { | 239 | { |
240 | #ifdef RDMA_TRANSPORT_IWARP | ||
241 | if ((RDMA_TRANSPORT_IWARP == | 240 | if ((RDMA_TRANSPORT_IWARP == |
242 | rdma_node_get_transport(xprt->sc_cm_id-> | 241 | rdma_node_get_transport(xprt->sc_cm_id-> |
243 | device->node_type)) | 242 | device->node_type)) |
244 | && sge_count > 1) | 243 | && sge_count > 1) |
245 | return 1; | 244 | return 1; |
246 | else | 245 | else |
247 | #endif | ||
248 | return min_t(int, sge_count, xprt->sc_max_sge); | 246 | return min_t(int, sge_count, xprt->sc_max_sge); |
249 | } | 247 | } |
250 | 248 | ||