diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Kconfig | 3 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 3 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 221 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simscsi.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/ia64_ksyms.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/pal.S | 18 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 34 | ||||
-rw-r--r-- | arch/ia64/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 16 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 68 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 55 | ||||
-rw-r--r-- | arch/ia64/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 6 |
16 files changed, 239 insertions, 209 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index e44a4c6a4fe5..ccc1edff5c97 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig | |||
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI | |||
96 | 96 | ||
97 | config X86_GX_SUSPMOD | 97 | config X86_GX_SUSPMOD |
98 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" | 98 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" |
99 | depends on PCI | ||
99 | help | 100 | help |
100 | This add the CPUFreq driver for NatSemi Geode processors which | 101 | This add the CPUFreq driver for NatSemi Geode processors which |
101 | support suspend modulation. | 102 | support suspend modulation. |
@@ -202,7 +203,7 @@ config X86_LONGRUN | |||
202 | config X86_LONGHAUL | 203 | config X86_LONGHAUL |
203 | tristate "VIA Cyrix III Longhaul" | 204 | tristate "VIA Cyrix III Longhaul" |
204 | select CPU_FREQ_TABLE | 205 | select CPU_FREQ_TABLE |
205 | depends on BROKEN | 206 | depends on ACPI_PROCESSOR |
206 | help | 207 | help |
207 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, | 208 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, |
208 | VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T | 209 | VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T |
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 567b39bea07e..efb41e81351c 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /* Do initialization in ACPI core */ | 386 | /* Do initialization in ACPI core */ |
387 | acpi_processor_preregister_performance(acpi_perf_data); | 387 | return acpi_processor_preregister_performance(acpi_perf_data); |
388 | return 0; | ||
389 | } | 388 | } |
390 | 389 | ||
391 | static int | 390 | static int |
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index 146f607e9c44..4f2c3aeef724 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c | |||
@@ -29,11 +29,13 @@ | |||
29 | #include <linux/cpufreq.h> | 29 | #include <linux/cpufreq.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/pci.h> | ||
33 | 32 | ||
34 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
35 | #include <asm/timex.h> | 34 | #include <asm/timex.h> |
36 | #include <asm/io.h> | 35 | #include <asm/io.h> |
36 | #include <asm/acpi.h> | ||
37 | #include <linux/acpi.h> | ||
38 | #include <acpi/processor.h> | ||
37 | 39 | ||
38 | #include "longhaul.h" | 40 | #include "longhaul.h" |
39 | 41 | ||
@@ -56,6 +58,8 @@ static int minvid, maxvid; | |||
56 | static unsigned int minmult, maxmult; | 58 | static unsigned int minmult, maxmult; |
57 | static int can_scale_voltage; | 59 | static int can_scale_voltage; |
58 | static int vrmrev; | 60 | static int vrmrev; |
61 | static struct acpi_processor *pr = NULL; | ||
62 | static struct acpi_processor_cx *cx = NULL; | ||
59 | 63 | ||
60 | /* Module parameters */ | 64 | /* Module parameters */ |
61 | static int dont_scale_voltage; | 65 | static int dont_scale_voltage; |
@@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void) | |||
118 | return eblcr_table[invalue]; | 122 | return eblcr_table[invalue]; |
119 | } | 123 | } |
120 | 124 | ||
125 | /* For processor with BCR2 MSR */ | ||
121 | 126 | ||
122 | static void do_powersaver(union msr_longhaul *longhaul, | 127 | static void do_longhaul1(int cx_address, unsigned int clock_ratio_index) |
123 | unsigned int clock_ratio_index) | ||
124 | { | 128 | { |
125 | struct pci_dev *dev; | 129 | union msr_bcr2 bcr2; |
126 | unsigned long flags; | 130 | u32 t; |
127 | unsigned int tmp_mask; | ||
128 | int version; | ||
129 | int i; | ||
130 | u16 pci_cmd; | ||
131 | u16 cmd_state[64]; | ||
132 | 131 | ||
133 | switch (cpu_model) { | 132 | rdmsrl(MSR_VIA_BCR2, bcr2.val); |
134 | case CPU_EZRA_T: | 133 | /* Enable software clock multiplier */ |
135 | version = 3; | 134 | bcr2.bits.ESOFTBF = 1; |
136 | break; | 135 | bcr2.bits.CLOCKMUL = clock_ratio_index; |
137 | case CPU_NEHEMIAH: | ||
138 | version = 0xf; | ||
139 | break; | ||
140 | default: | ||
141 | return; | ||
142 | } | ||
143 | 136 | ||
144 | rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); | 137 | /* Sync to timer tick */ |
145 | longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf; | 138 | safe_halt(); |
146 | longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | 139 | ACPI_FLUSH_CPU_CACHE(); |
147 | longhaul->bits.EnableSoftBusRatio = 1; | 140 | /* Change frequency on next halt or sleep */ |
148 | longhaul->bits.RevisionKey = 0; | 141 | wrmsrl(MSR_VIA_BCR2, bcr2.val); |
142 | /* Invoke C3 */ | ||
143 | inb(cx_address); | ||
144 | /* Dummy op - must do something useless after P_LVL3 read */ | ||
145 | t = inl(acpi_fadt.xpm_tmr_blk.address); | ||
146 | |||
147 | /* Disable software clock multiplier */ | ||
148 | local_irq_disable(); | ||
149 | rdmsrl(MSR_VIA_BCR2, bcr2.val); | ||
150 | bcr2.bits.ESOFTBF = 0; | ||
151 | wrmsrl(MSR_VIA_BCR2, bcr2.val); | ||
152 | } | ||
149 | 153 | ||
150 | preempt_disable(); | 154 | /* For processor with Longhaul MSR */ |
151 | local_irq_save(flags); | ||
152 | 155 | ||
153 | /* | 156 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index) |
154 | * get current pci bus master state for all devices | 157 | { |
155 | * and clear bus master bit | 158 | union msr_longhaul longhaul; |
156 | */ | 159 | u32 t; |
157 | dev = NULL; | ||
158 | i = 0; | ||
159 | do { | ||
160 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
161 | if (dev != NULL) { | ||
162 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
163 | cmd_state[i++] = pci_cmd; | ||
164 | pci_cmd &= ~PCI_COMMAND_MASTER; | ||
165 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
166 | } | ||
167 | } while (dev != NULL); | ||
168 | 160 | ||
169 | tmp_mask=inb(0x21); /* works on C3. save mask. */ | 161 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
170 | outb(0xFE,0x21); /* TMR0 only */ | 162 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; |
171 | outb(0xFF,0x80); /* delay */ | 163 | longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; |
164 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | ||
165 | longhaul.bits.EnableSoftBusRatio = 1; | ||
172 | 166 | ||
167 | /* Sync to timer tick */ | ||
173 | safe_halt(); | 168 | safe_halt(); |
174 | wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); | 169 | ACPI_FLUSH_CPU_CACHE(); |
175 | halt(); | 170 | /* Change frequency on next halt or sleep */ |
176 | 171 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | |
172 | /* Invoke C3 */ | ||
173 | inb(cx_address); | ||
174 | /* Dummy op - must do something useless after P_LVL3 read */ | ||
175 | t = inl(acpi_fadt.xpm_tmr_blk.address); | ||
176 | |||
177 | /* Disable bus ratio bit */ | ||
177 | local_irq_disable(); | 178 | local_irq_disable(); |
178 | 179 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; | |
179 | outb(tmp_mask,0x21); /* restore mask */ | 180 | longhaul.bits.EnableSoftBusRatio = 0; |
180 | 181 | longhaul.bits.EnableSoftBSEL = 0; | |
181 | /* restore pci bus master state for all devices */ | 182 | longhaul.bits.EnableSoftVID = 0; |
182 | dev = NULL; | 183 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
183 | i = 0; | ||
184 | do { | ||
185 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
186 | if (dev != NULL) { | ||
187 | pci_cmd = cmd_state[i++]; | ||
188 | pci_write_config_byte(dev, PCI_COMMAND, pci_cmd); | ||
189 | } | ||
190 | } while (dev != NULL); | ||
191 | local_irq_restore(flags); | ||
192 | preempt_enable(); | ||
193 | |||
194 | /* disable bus ratio bit */ | ||
195 | rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); | ||
196 | longhaul->bits.EnableSoftBusRatio = 0; | ||
197 | longhaul->bits.RevisionKey = version; | ||
198 | wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); | ||
199 | } | 184 | } |
200 | 185 | ||
201 | /** | 186 | /** |
@@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
209 | { | 194 | { |
210 | int speed, mult; | 195 | int speed, mult; |
211 | struct cpufreq_freqs freqs; | 196 | struct cpufreq_freqs freqs; |
212 | union msr_longhaul longhaul; | ||
213 | union msr_bcr2 bcr2; | ||
214 | static unsigned int old_ratio=-1; | 197 | static unsigned int old_ratio=-1; |
198 | unsigned long flags; | ||
199 | unsigned int pic1_mask, pic2_mask; | ||
215 | 200 | ||
216 | if (old_ratio == clock_ratio_index) | 201 | if (old_ratio == clock_ratio_index) |
217 | return; | 202 | return; |
@@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
234 | dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", | 219 | dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", |
235 | fsb, mult/10, mult%10, print_speed(speed/1000)); | 220 | fsb, mult/10, mult%10, print_speed(speed/1000)); |
236 | 221 | ||
222 | preempt_disable(); | ||
223 | local_irq_save(flags); | ||
224 | |||
225 | pic2_mask = inb(0xA1); | ||
226 | pic1_mask = inb(0x21); /* works on C3. save mask. */ | ||
227 | outb(0xFF,0xA1); /* Overkill */ | ||
228 | outb(0xFE,0x21); /* TMR0 only */ | ||
229 | |||
230 | /* Disable bus master arbitration */ | ||
231 | if (pr->flags.bm_check) { | ||
232 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, | ||
233 | ACPI_MTX_DO_NOT_LOCK); | ||
234 | } | ||
235 | |||
237 | switch (longhaul_version) { | 236 | switch (longhaul_version) { |
238 | 237 | ||
239 | /* | 238 | /* |
@@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
245 | */ | 244 | */ |
246 | case TYPE_LONGHAUL_V1: | 245 | case TYPE_LONGHAUL_V1: |
247 | case TYPE_LONGHAUL_V2: | 246 | case TYPE_LONGHAUL_V2: |
248 | rdmsrl (MSR_VIA_BCR2, bcr2.val); | 247 | do_longhaul1(cx->address, clock_ratio_index); |
249 | /* Enable software clock multiplier */ | ||
250 | bcr2.bits.ESOFTBF = 1; | ||
251 | bcr2.bits.CLOCKMUL = clock_ratio_index; | ||
252 | local_irq_disable(); | ||
253 | wrmsrl (MSR_VIA_BCR2, bcr2.val); | ||
254 | safe_halt(); | ||
255 | |||
256 | /* Disable software clock multiplier */ | ||
257 | rdmsrl (MSR_VIA_BCR2, bcr2.val); | ||
258 | bcr2.bits.ESOFTBF = 0; | ||
259 | local_irq_disable(); | ||
260 | wrmsrl (MSR_VIA_BCR2, bcr2.val); | ||
261 | local_irq_enable(); | ||
262 | break; | 248 | break; |
263 | 249 | ||
264 | /* | 250 | /* |
@@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
273 | * to work in practice. | 259 | * to work in practice. |
274 | */ | 260 | */ |
275 | case TYPE_POWERSAVER: | 261 | case TYPE_POWERSAVER: |
276 | do_powersaver(&longhaul, clock_ratio_index); | 262 | do_powersaver(cx->address, clock_ratio_index); |
277 | break; | 263 | break; |
278 | } | 264 | } |
279 | 265 | ||
266 | /* Enable bus master arbitration */ | ||
267 | if (pr->flags.bm_check) { | ||
268 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, | ||
269 | ACPI_MTX_DO_NOT_LOCK); | ||
270 | } | ||
271 | |||
272 | outb(pic2_mask,0xA1); /* restore mask */ | ||
273 | outb(pic1_mask,0x21); | ||
274 | |||
275 | local_irq_restore(flags); | ||
276 | preempt_enable(); | ||
277 | |||
280 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 278 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
281 | } | 279 | } |
282 | 280 | ||
@@ -324,9 +322,11 @@ static int guess_fsb(void) | |||
324 | static int __init longhaul_get_ranges(void) | 322 | static int __init longhaul_get_ranges(void) |
325 | { | 323 | { |
326 | unsigned long invalue; | 324 | unsigned long invalue; |
327 | unsigned int multipliers[32]= { | 325 | unsigned int ezra_t_multipliers[32]= { |
328 | 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, | 326 | 90, 30, 40, 100, 55, 35, 45, 95, |
329 | -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; | 327 | 50, 70, 80, 60, 120, 75, 85, 65, |
328 | -1, 110, 120, -1, 135, 115, 125, 105, | ||
329 | 130, 150, 160, 140, -1, 155, -1, 145 }; | ||
330 | unsigned int j, k = 0; | 330 | unsigned int j, k = 0; |
331 | union msr_longhaul longhaul; | 331 | union msr_longhaul longhaul; |
332 | unsigned long lo, hi; | 332 | unsigned long lo, hi; |
@@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void) | |||
355 | invalue = longhaul.bits.MaxMHzBR; | 355 | invalue = longhaul.bits.MaxMHzBR; |
356 | if (longhaul.bits.MaxMHzBR4) | 356 | if (longhaul.bits.MaxMHzBR4) |
357 | invalue += 16; | 357 | invalue += 16; |
358 | maxmult=multipliers[invalue]; | 358 | maxmult=ezra_t_multipliers[invalue]; |
359 | 359 | ||
360 | invalue = longhaul.bits.MinMHzBR; | 360 | invalue = longhaul.bits.MinMHzBR; |
361 | if (longhaul.bits.MinMHzBR4 == 1) | 361 | if (longhaul.bits.MinMHzBR4 == 1) |
362 | minmult = 30; | 362 | minmult = 30; |
363 | else | 363 | else |
364 | minmult = multipliers[invalue]; | 364 | minmult = ezra_t_multipliers[invalue]; |
365 | fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; | 365 | fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; |
366 | break; | 366 | break; |
367 | } | 367 | } |
@@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu) | |||
527 | return calc_speed(longhaul_get_cpu_mult()); | 527 | return calc_speed(longhaul_get_cpu_mult()); |
528 | } | 528 | } |
529 | 529 | ||
530 | static acpi_status longhaul_walk_callback(acpi_handle obj_handle, | ||
531 | u32 nesting_level, | ||
532 | void *context, void **return_value) | ||
533 | { | ||
534 | struct acpi_device *d; | ||
535 | |||
536 | if ( acpi_bus_get_device(obj_handle, &d) ) { | ||
537 | return 0; | ||
538 | } | ||
539 | *return_value = (void *)acpi_driver_data(d); | ||
540 | return 1; | ||
541 | } | ||
530 | 542 | ||
531 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | 543 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) |
532 | { | 544 | { |
@@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
534 | char *cpuname=NULL; | 546 | char *cpuname=NULL; |
535 | int ret; | 547 | int ret; |
536 | 548 | ||
549 | /* Check ACPI support for C3 state */ | ||
550 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, | ||
551 | &longhaul_walk_callback, NULL, (void *)&pr); | ||
552 | if (pr == NULL) goto err_acpi; | ||
553 | |||
554 | cx = &pr->power.states[ACPI_STATE_C3]; | ||
555 | if (cx->address == 0 || cx->latency > 1000) goto err_acpi; | ||
556 | |||
557 | /* Now check what we have on this motherboard */ | ||
537 | switch (c->x86_model) { | 558 | switch (c->x86_model) { |
538 | case 6: | 559 | case 6: |
539 | cpu_model = CPU_SAMUEL; | 560 | cpu_model = CPU_SAMUEL; |
@@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
634 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); | 655 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); |
635 | 656 | ||
636 | return 0; | 657 | return 0; |
658 | |||
659 | err_acpi: | ||
660 | printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n"); | ||
661 | return -ENODEV; | ||
637 | } | 662 | } |
638 | 663 | ||
639 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) | 664 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) |
@@ -666,6 +691,18 @@ static int __init longhaul_init(void) | |||
666 | if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) | 691 | if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) |
667 | return -ENODEV; | 692 | return -ENODEV; |
668 | 693 | ||
694 | #ifdef CONFIG_SMP | ||
695 | if (num_online_cpus() > 1) { | ||
696 | return -ENODEV; | ||
697 | printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); | ||
698 | } | ||
699 | #endif | ||
700 | #ifdef CONFIG_X86_IO_APIC | ||
701 | if (cpu_has_apic) { | ||
702 | printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); | ||
703 | return -ENODEV; | ||
704 | } | ||
705 | #endif | ||
669 | switch (c->x86_model) { | 706 | switch (c->x86_model) { |
670 | case 6 ... 9: | 707 | case 6 ... 9: |
671 | return cpufreq_register_driver(&longhaul_driver); | 708 | return cpufreq_register_driver(&longhaul_driver); |
@@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | |||
699 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); | 736 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); |
700 | MODULE_LICENSE ("GPL"); | 737 | MODULE_LICENSE ("GPL"); |
701 | 738 | ||
702 | module_init(longhaul_init); | 739 | late_initcall(longhaul_init); |
703 | module_exit(longhaul_exit); | 740 | module_exit(longhaul_exit); |
704 | 741 | ||
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index a3fe97531134..8a4f0d0d17a3 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -151,7 +151,7 @@ static void | |||
151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) | 151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) |
152 | { | 152 | { |
153 | int list_len = sc->use_sg; | 153 | int list_len = sc->use_sg; |
154 | struct scatterlist *sl = (struct scatterlist *)sc->buffer; | 154 | struct scatterlist *sl = (struct scatterlist *)sc->request_buffer; |
155 | struct disk_stat stat; | 155 | struct disk_stat stat; |
156 | struct disk_req req; | 156 | struct disk_req req; |
157 | 157 | ||
@@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) | |||
244 | 244 | ||
245 | if (scatterlen == 0) | 245 | if (scatterlen == 0) |
246 | memcpy(sc->request_buffer, buf, len); | 246 | memcpy(sc->request_buffer, buf, len); |
247 | else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { | 247 | else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { |
248 | unsigned thislen = min(len, slp->length); | 248 | unsigned thislen = min(len, slp->length); |
249 | 249 | ||
250 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); | 250 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index e4bfa9dafbce..bb8770a177b5 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr) | |||
632 | if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) | 632 | if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) |
633 | return md; | 633 | return md; |
634 | } | 634 | } |
635 | return 0; | 635 | return NULL; |
636 | } | 636 | } |
637 | 637 | ||
638 | static efi_memory_desc_t * | 638 | static efi_memory_desc_t * |
@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr) | |||
652 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) | 652 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) |
653 | return md; | 653 | return md; |
654 | } | 654 | } |
655 | return 0; | 655 | return NULL; |
656 | } | 656 | } |
657 | 657 | ||
658 | u32 | 658 | u32 |
@@ -923,7 +923,7 @@ find_memmap_space (void) | |||
923 | void | 923 | void |
924 | efi_memmap_init(unsigned long *s, unsigned long *e) | 924 | efi_memmap_init(unsigned long *s, unsigned long *e) |
925 | { | 925 | { |
926 | struct kern_memdesc *k, *prev = 0; | 926 | struct kern_memdesc *k, *prev = NULL; |
927 | u64 contig_low=0, contig_high=0; | 927 | u64 contig_low=0, contig_high=0; |
928 | u64 as, ae, lim; | 928 | u64 as, ae, lim; |
929 | void *efi_map_start, *efi_map_end, *p, *q; | 929 | void *efi_map_start, *efi_map_end, *p, *q; |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 561b8f1d3bc7..29236f0c62b5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -853,7 +853,6 @@ END(__ia64_init_fpu) | |||
853 | */ | 853 | */ |
854 | GLOBAL_ENTRY(ia64_switch_mode_phys) | 854 | GLOBAL_ENTRY(ia64_switch_mode_phys) |
855 | { | 855 | { |
856 | alloc r2=ar.pfs,0,0,0,0 | ||
857 | rsm psr.i | psr.ic // disable interrupts and interrupt collection | 856 | rsm psr.i | psr.ic // disable interrupts and interrupt collection |
858 | mov r15=ip | 857 | mov r15=ip |
859 | } | 858 | } |
@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys) | |||
902 | */ | 901 | */ |
903 | GLOBAL_ENTRY(ia64_switch_mode_virt) | 902 | GLOBAL_ENTRY(ia64_switch_mode_virt) |
904 | { | 903 | { |
905 | alloc r2=ar.pfs,0,0,0,0 | ||
906 | rsm psr.i | psr.ic // disable interrupts and interrupt collection | 904 | rsm psr.i | psr.ic // disable interrupts and interrupt collection |
907 | mov r15=ip | 905 | mov r15=ip |
908 | } | 906 | } |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index b7cf651ceb14..3ead20fb6f4b 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3); | |||
62 | EXPORT_SYMBOL(__moddi3); | 62 | EXPORT_SYMBOL(__moddi3); |
63 | EXPORT_SYMBOL(__umoddi3); | 63 | EXPORT_SYMBOL(__umoddi3); |
64 | 64 | ||
65 | #if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) | 65 | #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) |
66 | extern void xor_ia64_2(void); | 66 | extern void xor_ia64_2(void); |
67 | extern void xor_ia64_3(void); | 67 | extern void xor_ia64_3(void); |
68 | extern void xor_ia64_4(void); | 68 | extern void xor_ia64_4(void); |
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index 5018c7f2e7a8..ebaf1e685f5e 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S | |||
@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) | |||
217 | .body | 217 | .body |
218 | ;; | 218 | ;; |
219 | ld8 loc2 = [loc2] // loc2 <- entry point | 219 | ld8 loc2 = [loc2] // loc2 <- entry point |
220 | mov out0 = in0 // first argument | 220 | mov loc3 = psr // save psr |
221 | mov out1 = in1 // copy arg2 | ||
222 | mov out2 = in2 // copy arg3 | ||
223 | mov out3 = in3 // copy arg3 | ||
224 | ;; | ||
225 | mov loc3 = psr // save psr | ||
226 | ;; | 221 | ;; |
227 | mov loc4=ar.rsc // save RSE configuration | 222 | mov loc4=ar.rsc // save RSE configuration |
228 | dep.z loc2=loc2,0,61 // convert pal entry point to physical | 223 | dep.z loc2=loc2,0,61 // convert pal entry point to physical |
@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) | |||
236 | ;; | 231 | ;; |
237 | andcm r16=loc3,r16 // removes bits to clear from psr | 232 | andcm r16=loc3,r16 // removes bits to clear from psr |
238 | br.call.sptk.many rp=ia64_switch_mode_phys | 233 | br.call.sptk.many rp=ia64_switch_mode_phys |
239 | .ret6: | 234 | |
235 | mov out0 = in0 // first argument | ||
236 | mov out1 = in1 // copy arg2 | ||
237 | mov out2 = in2 // copy arg3 | ||
238 | mov out3 = in3 // copy arg3 | ||
240 | mov loc5 = r19 | 239 | mov loc5 = r19 |
241 | mov loc6 = r20 | 240 | mov loc6 = r20 |
241 | |||
242 | br.call.sptk.many rp=b7 // now make the call | 242 | br.call.sptk.many rp=b7 // now make the call |
243 | .ret7: | 243 | |
244 | mov ar.rsc=0 // put RSE in enforced lazy, LE mode | 244 | mov ar.rsc=0 // put RSE in enforced lazy, LE mode |
245 | mov r16=loc3 // r16= original psr | 245 | mov r16=loc3 // r16= original psr |
246 | mov r19=loc5 | 246 | mov r19=loc5 |
247 | mov r20=loc6 | 247 | mov r20=loc6 |
248 | br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode | 248 | br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode |
249 | 249 | ||
250 | .ret8: mov psr.l = loc3 // restore init PSR | 250 | mov psr.l = loc3 // restore init PSR |
251 | mov ar.pfs = loc1 | 251 | mov ar.pfs = loc1 |
252 | mov rp = loc0 | 252 | mov rp = loc0 |
253 | ;; | 253 | ;; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index ab5b52413e91..0b546e2b36ac 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -566,29 +566,23 @@ version_info(char *page) | |||
566 | pal_version_u_t min_ver, cur_ver; | 566 | pal_version_u_t min_ver, cur_ver; |
567 | char *p = page; | 567 | char *p = page; |
568 | 568 | ||
569 | /* The PAL_VERSION call is advertised as being able to support | 569 | if (ia64_pal_version(&min_ver, &cur_ver) != 0) |
570 | * both physical and virtual mode calls. This seems to be a documentation | 570 | return 0; |
571 | * bug rather than firmware bug. In fact, it does only support physical mode. | ||
572 | * So now the code reflects this fact and the pal_version() has been updated | ||
573 | * accordingly. | ||
574 | */ | ||
575 | if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; | ||
576 | 571 | ||
577 | p += sprintf(p, | 572 | p += sprintf(p, |
578 | "PAL_vendor : 0x%02x (min=0x%02x)\n" | 573 | "PAL_vendor : 0x%02x (min=0x%02x)\n" |
579 | "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" | 574 | "PAL_A : %02x.%02x (min=%02x.%02x)\n" |
580 | "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", | 575 | "PAL_B : %02x.%02x (min=%02x.%02x)\n", |
581 | cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, | 576 | cur_ver.pal_version_s.pv_pal_vendor, |
582 | 577 | min_ver.pal_version_s.pv_pal_vendor, | |
583 | cur_ver.pal_version_s.pv_pal_a_model>>4, | 578 | cur_ver.pal_version_s.pv_pal_a_model, |
584 | cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, | 579 | cur_ver.pal_version_s.pv_pal_a_rev, |
585 | min_ver.pal_version_s.pv_pal_a_model>>4, | 580 | min_ver.pal_version_s.pv_pal_a_model, |
586 | min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, | 581 | min_ver.pal_version_s.pv_pal_a_rev, |
587 | 582 | cur_ver.pal_version_s.pv_pal_b_model, | |
588 | cur_ver.pal_version_s.pv_pal_b_model>>4, | 583 | cur_ver.pal_version_s.pv_pal_b_rev, |
589 | cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, | 584 | min_ver.pal_version_s.pv_pal_b_model, |
590 | min_ver.pal_version_s.pv_pal_b_model>>4, | 585 | min_ver.pal_version_s.pv_pal_b_rev); |
591 | min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev); | ||
592 | return p - page; | 586 | return p - page; |
593 | } | 587 | } |
594 | 588 | ||
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index d8536a2c22a9..38fa6e49e791 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | |||
14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o | 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o |
15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o | 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o |
16 | lib-$(CONFIG_PERFMON) += carta_random.o | 16 | lib-$(CONFIG_PERFMON) += carta_random.o |
17 | lib-$(CONFIG_MD_RAID5) += xor.o | 17 | lib-$(CONFIG_MD_RAID456) += xor.o |
18 | 18 | ||
19 | AFLAGS___divdi3.o = | 19 | AFLAGS___divdi3.o = |
20 | AFLAGS___udivdi3.o = -DUNSIGNED | 20 | AFLAGS___udivdi3.o = -DUNSIGNED |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 2a88cdd6d924..e004143ba86b 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 28 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
29 | static unsigned long num_dma_physpages; | 29 | static unsigned long num_dma_physpages; |
30 | static unsigned long max_gap; | ||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | /** | 33 | /** |
@@ -45,9 +46,15 @@ show_mem (void) | |||
45 | 46 | ||
46 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 47 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
47 | i = max_mapnr; | 48 | i = max_mapnr; |
48 | while (i-- > 0) { | 49 | for (i = 0; i < max_mapnr; i++) { |
49 | if (!pfn_valid(i)) | 50 | if (!pfn_valid(i)) { |
51 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
52 | if (max_gap < LARGE_GAP) | ||
53 | continue; | ||
54 | i = vmemmap_find_next_valid_pfn(0, i) - 1; | ||
55 | #endif | ||
50 | continue; | 56 | continue; |
57 | } | ||
51 | total++; | 58 | total++; |
52 | if (PageReserved(mem_map+i)) | 59 | if (PageReserved(mem_map+i)) |
53 | reserved++; | 60 | reserved++; |
@@ -234,7 +241,6 @@ paging_init (void) | |||
234 | unsigned long zones_size[MAX_NR_ZONES]; | 241 | unsigned long zones_size[MAX_NR_ZONES]; |
235 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 242 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
236 | unsigned long zholes_size[MAX_NR_ZONES]; | 243 | unsigned long zholes_size[MAX_NR_ZONES]; |
237 | unsigned long max_gap; | ||
238 | #endif | 244 | #endif |
239 | 245 | ||
240 | /* initialize mem_map[] */ | 246 | /* initialize mem_map[] */ |
@@ -266,7 +272,6 @@ paging_init (void) | |||
266 | } | 272 | } |
267 | } | 273 | } |
268 | 274 | ||
269 | max_gap = 0; | ||
270 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 275 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
271 | if (max_gap < LARGE_GAP) { | 276 | if (max_gap < LARGE_GAP) { |
272 | vmem_map = (struct page *) 0; | 277 | vmem_map = (struct page *) 0; |
@@ -277,7 +282,8 @@ paging_init (void) | |||
277 | 282 | ||
278 | /* allocate virtual_mem_map */ | 283 | /* allocate virtual_mem_map */ |
279 | 284 | ||
280 | map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 285 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
286 | sizeof(struct page)); | ||
281 | vmalloc_end -= map_size; | 287 | vmalloc_end -= map_size; |
282 | vmem_map = (struct page *) vmalloc_end; | 288 | vmem_map = (struct page *) vmalloc_end; |
283 | efi_memmap_walk(create_mem_map_page_table, NULL); | 289 | efi_memmap_walk(create_mem_map_page_table, NULL); |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 99bd9e30db96..d260bffa01ab 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void) | |||
534 | } | 534 | } |
535 | #endif /* CONFIG_SMP */ | 535 | #endif /* CONFIG_SMP */ |
536 | 536 | ||
537 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
538 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
539 | { | ||
540 | unsigned long end_address, hole_next_pfn; | ||
541 | unsigned long stop_address; | ||
542 | |||
543 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
544 | end_address = PAGE_ALIGN(end_address); | ||
545 | |||
546 | stop_address = (unsigned long) &vmem_map[ | ||
547 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
548 | |||
549 | do { | ||
550 | pgd_t *pgd; | ||
551 | pud_t *pud; | ||
552 | pmd_t *pmd; | ||
553 | pte_t *pte; | ||
554 | |||
555 | pgd = pgd_offset_k(end_address); | ||
556 | if (pgd_none(*pgd)) { | ||
557 | end_address += PGDIR_SIZE; | ||
558 | continue; | ||
559 | } | ||
560 | |||
561 | pud = pud_offset(pgd, end_address); | ||
562 | if (pud_none(*pud)) { | ||
563 | end_address += PUD_SIZE; | ||
564 | continue; | ||
565 | } | ||
566 | |||
567 | pmd = pmd_offset(pud, end_address); | ||
568 | if (pmd_none(*pmd)) { | ||
569 | end_address += PMD_SIZE; | ||
570 | continue; | ||
571 | } | ||
572 | |||
573 | pte = pte_offset_kernel(pmd, end_address); | ||
574 | retry_pte: | ||
575 | if (pte_none(*pte)) { | ||
576 | end_address += PAGE_SIZE; | ||
577 | pte++; | ||
578 | if ((end_address < stop_address) && | ||
579 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
580 | goto retry_pte; | ||
581 | continue; | ||
582 | } | ||
583 | /* Found next valid vmem_map page */ | ||
584 | break; | ||
585 | } while (end_address < stop_address); | ||
586 | |||
587 | end_address = min(end_address, stop_address); | ||
588 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
589 | hole_next_pfn = end_address / sizeof(struct page); | ||
590 | return hole_next_pfn - pgdat->node_start_pfn; | ||
591 | } | ||
592 | #else | ||
593 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
594 | { | ||
595 | return i + 1; | ||
596 | } | ||
597 | #endif | ||
598 | |||
599 | /** | 537 | /** |
600 | * show_mem - give short summary of memory stats | 538 | * show_mem - give short summary of memory stats |
601 | * | 539 | * |
@@ -625,7 +563,8 @@ void show_mem(void) | |||
625 | if (pfn_valid(pgdat->node_start_pfn + i)) | 563 | if (pfn_valid(pgdat->node_start_pfn + i)) |
626 | page = pfn_to_page(pgdat->node_start_pfn + i); | 564 | page = pfn_to_page(pgdat->node_start_pfn + i); |
627 | else { | 565 | else { |
628 | i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; | 566 | i = vmemmap_find_next_valid_pfn(pgdat->node_id, |
567 | i) - 1; | ||
629 | continue; | 568 | continue; |
630 | } | 569 | } |
631 | if (PageReserved(page)) | 570 | if (PageReserved(page)) |
@@ -751,7 +690,8 @@ void __init paging_init(void) | |||
751 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); | 690 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
752 | 691 | ||
753 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 692 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
754 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 693 | vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
694 | sizeof(struct page)); | ||
755 | vmem_map = (struct page *) vmalloc_end; | 695 | vmem_map = (struct page *) vmalloc_end; |
756 | efi_memmap_walk(create_mem_map_page_table, NULL); | 696 | efi_memmap_walk(create_mem_map_page_table, NULL); |
757 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 697 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2f50c064513c..30617ccb4f7e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) | |||
415 | } | 415 | } |
416 | 416 | ||
417 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 417 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
418 | int vmemmap_find_next_valid_pfn(int node, int i) | ||
419 | { | ||
420 | unsigned long end_address, hole_next_pfn; | ||
421 | unsigned long stop_address; | ||
422 | pg_data_t *pgdat = NODE_DATA(node); | ||
423 | |||
424 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
425 | end_address = PAGE_ALIGN(end_address); | ||
426 | |||
427 | stop_address = (unsigned long) &vmem_map[ | ||
428 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
429 | |||
430 | do { | ||
431 | pgd_t *pgd; | ||
432 | pud_t *pud; | ||
433 | pmd_t *pmd; | ||
434 | pte_t *pte; | ||
435 | |||
436 | pgd = pgd_offset_k(end_address); | ||
437 | if (pgd_none(*pgd)) { | ||
438 | end_address += PGDIR_SIZE; | ||
439 | continue; | ||
440 | } | ||
441 | |||
442 | pud = pud_offset(pgd, end_address); | ||
443 | if (pud_none(*pud)) { | ||
444 | end_address += PUD_SIZE; | ||
445 | continue; | ||
446 | } | ||
447 | |||
448 | pmd = pmd_offset(pud, end_address); | ||
449 | if (pmd_none(*pmd)) { | ||
450 | end_address += PMD_SIZE; | ||
451 | continue; | ||
452 | } | ||
453 | |||
454 | pte = pte_offset_kernel(pmd, end_address); | ||
455 | retry_pte: | ||
456 | if (pte_none(*pte)) { | ||
457 | end_address += PAGE_SIZE; | ||
458 | pte++; | ||
459 | if ((end_address < stop_address) && | ||
460 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
461 | goto retry_pte; | ||
462 | continue; | ||
463 | } | ||
464 | /* Found next valid vmem_map page */ | ||
465 | break; | ||
466 | } while (end_address < stop_address); | ||
467 | |||
468 | end_address = min(end_address, stop_address); | ||
469 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
470 | hole_next_pfn = end_address / sizeof(struct page); | ||
471 | return hole_next_pfn - pgdat->node_start_pfn; | ||
472 | } | ||
418 | 473 | ||
419 | int __init | 474 | int __init |
420 | create_mem_map_page_table (u64 start, u64 end, void *arg) | 475 | create_mem_map_page_table (u64 start, u64 end, void *arg) |
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 07bd02b6c372..4280c074d64e 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c | |||
@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size) | |||
32 | */ | 32 | */ |
33 | attr = kern_mem_attribute(offset, size); | 33 | attr = kern_mem_attribute(offset, size); |
34 | if (attr & EFI_MEMORY_WB) | 34 | if (attr & EFI_MEMORY_WB) |
35 | return phys_to_virt(offset); | 35 | return (void __iomem *) phys_to_virt(offset); |
36 | else if (attr & EFI_MEMORY_UC) | 36 | else if (attr & EFI_MEMORY_UC) |
37 | return __ioremap(offset, size); | 37 | return __ioremap(offset, size); |
38 | 38 | ||
@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size) | |||
43 | gran_base = GRANULEROUNDDOWN(offset); | 43 | gran_base = GRANULEROUNDDOWN(offset); |
44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; | 44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; |
45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) | 45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) |
46 | return phys_to_virt(offset); | 46 | return (void __iomem *) phys_to_virt(offset); |
47 | 47 | ||
48 | return __ioremap(offset, size); | 48 | return __ioremap(offset, size); |
49 | } | 49 | } |
@@ -53,7 +53,7 @@ void __iomem * | |||
53 | ioremap_nocache (unsigned long offset, unsigned long size) | 53 | ioremap_nocache (unsigned long offset, unsigned long size) |
54 | { | 54 | { |
55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) | 55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) |
56 | return 0; | 56 | return NULL; |
57 | 57 | ||
58 | return __ioremap(offset, size); | 58 | return __ioremap(offset, size); |
59 | } | 59 | } |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 99b123a6421a..5e8e59efb347 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -480,7 +480,7 @@ xpc_activating(void *__partid) | |||
480 | partid_t partid = (u64) __partid; | 480 | partid_t partid = (u64) __partid; |
481 | struct xpc_partition *part = &xpc_partitions[partid]; | 481 | struct xpc_partition *part = &xpc_partitions[partid]; |
482 | unsigned long irq_flags; | 482 | unsigned long irq_flags; |
483 | struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; | 483 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
484 | int ret; | 484 | int ret; |
485 | 485 | ||
486 | 486 | ||
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 17cd34284886..af7171adcd2c 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr) | |||
74 | else | 74 | else |
75 | mmr_war_offset = 0x158; | 75 | mmr_war_offset = 0x158; |
76 | 76 | ||
77 | readq_relaxed((void *)(mmr_base + mmr_war_offset)); | 77 | readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset)); |
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
92 | 92 | ||
93 | if (mmr_offset < 0x45000) { | 93 | if (mmr_offset < 0x45000) { |
94 | if (mmr_offset == 0x100) | 94 | if (mmr_offset == 0x100) |
95 | readq_relaxed((void *)(mmr_base + 0x38)); | 95 | readq_relaxed((void __iomem *)(mmr_base + 0x38)); |
96 | readq_relaxed((void *)(mmr_base + 0xb050)); | 96 | readq_relaxed((void __iomem *)(mmr_base + 0xb050)); |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||