aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-ixp23xx/core.c18
-rw-r--r--arch/sparc64/kernel/head.S30
-rw-r--r--arch/sparc64/kernel/setup.c23
-rw-r--r--arch/sparc64/kernel/smp.c16
-rw-r--r--block/cfq-iosched.c52
-rw-r--r--include/asm-arm/arch-ixp23xx/memory.h2
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-mips/pgtable.h10
-rw-r--r--include/asm-sparc64/pgtable.h17
-rw-r--r--mm/slab.c27
10 files changed, 141 insertions, 65 deletions
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index 092ee12ced42..affd1d5d7440 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -178,8 +178,12 @@ static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
178 178
179static void ixp23xx_irq_mask(unsigned int irq) 179static void ixp23xx_irq_mask(unsigned int irq)
180{ 180{
181 volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 181 volatile unsigned long *intr_reg;
182 182
183 if (irq >= 56)
184 irq += 8;
185
186 intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
183 *intr_reg &= ~(1 << (irq % 32)); 187 *intr_reg &= ~(1 << (irq % 32));
184} 188}
185 189
@@ -199,17 +203,25 @@ static void ixp23xx_irq_ack(unsigned int irq)
199 */ 203 */
200static void ixp23xx_irq_level_unmask(unsigned int irq) 204static void ixp23xx_irq_level_unmask(unsigned int irq)
201{ 205{
202 volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 206 volatile unsigned long *intr_reg;
203 207
204 ixp23xx_irq_ack(irq); 208 ixp23xx_irq_ack(irq);
205 209
210 if (irq >= 56)
211 irq += 8;
212
213 intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
206 *intr_reg |= (1 << (irq % 32)); 214 *intr_reg |= (1 << (irq % 32));
207} 215}
208 216
209static void ixp23xx_irq_edge_unmask(unsigned int irq) 217static void ixp23xx_irq_edge_unmask(unsigned int irq)
210{ 218{
211 volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 219 volatile unsigned long *intr_reg;
220
221 if (irq >= 56)
222 irq += 8;
212 223
224 intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
213 *intr_reg |= (1 << (irq % 32)); 225 *intr_reg |= (1 << (irq % 32));
214} 226}
215 227
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 3eadac5e171e..31c5892f5acc 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -10,6 +10,7 @@
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/version.h> 11#include <linux/version.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/threads.h>
13#include <asm/thread_info.h> 14#include <asm/thread_info.h>
14#include <asm/asi.h> 15#include <asm/asi.h>
15#include <asm/pstate.h> 16#include <asm/pstate.h>
@@ -493,6 +494,35 @@ tlb_fixup_done:
493 call prom_init 494 call prom_init
494 mov %l7, %o0 ! OpenPROM cif handler 495 mov %l7, %o0 ! OpenPROM cif handler
495 496
497 /* Initialize current_thread_info()->cpu as early as possible.
498 * In order to do that accurately we have to patch up the get_cpuid()
499 * assembler sequences. And that, in turn, requires that we know
500 * if we are on a Starfire box or not. While we're here, patch up
501 * the sun4v sequences as well.
502 */
503 call check_if_starfire
504 nop
505 call per_cpu_patch
506 nop
507 call sun4v_patch
508 nop
509
510#ifdef CONFIG_SMP
511 call hard_smp_processor_id
512 nop
513 cmp %o0, NR_CPUS
514 blu,pt %xcc, 1f
515 nop
516 call boot_cpu_id_too_large
517 nop
518 /* Not reached... */
519
5201:
521#else
522 mov 0, %o0
523#endif
524 stb %o0, [%g6 + TI_CPU]
525
496 /* Off we go.... */ 526 /* Off we go.... */
497 call start_kernel 527 call start_kernel
498 nop 528 nop
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 005167f82419..9cf1c88cd774 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE];
220 220
221static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 221static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
222 222
223static void __init per_cpu_patch(void) 223void __init per_cpu_patch(void)
224{ 224{
225 struct cpuid_patch_entry *p; 225 struct cpuid_patch_entry *p;
226 unsigned long ver; 226 unsigned long ver;
@@ -280,7 +280,7 @@ static void __init per_cpu_patch(void)
280 } 280 }
281} 281}
282 282
283static void __init sun4v_patch(void) 283void __init sun4v_patch(void)
284{ 284{
285 struct sun4v_1insn_patch_entry *p1; 285 struct sun4v_1insn_patch_entry *p1;
286 struct sun4v_2insn_patch_entry *p2; 286 struct sun4v_2insn_patch_entry *p2;
@@ -315,6 +315,15 @@ static void __init sun4v_patch(void)
315 } 315 }
316} 316}
317 317
318#ifdef CONFIG_SMP
319void __init boot_cpu_id_too_large(int cpu)
320{
321 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
322 cpu, NR_CPUS);
323 prom_halt();
324}
325#endif
326
318void __init setup_arch(char **cmdline_p) 327void __init setup_arch(char **cmdline_p)
319{ 328{
320 /* Initialize PROM console and command line. */ 329 /* Initialize PROM console and command line. */
@@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p)
332 conswitchp = &prom_con; 341 conswitchp = &prom_con;
333#endif 342#endif
334 343
335 /* Work out if we are starfire early on */
336 check_if_starfire();
337
338 /* Now we know enough to patch the get_cpuid sequences
339 * used by trap code.
340 */
341 per_cpu_patch();
342
343 sun4v_patch();
344
345 boot_flags_init(*cmdline_p); 344 boot_flags_init(*cmdline_p);
346 345
347 idprom_init(); 346 idprom_init();
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 90eaca3ec9a6..4e8cd79156e0 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1264,7 +1264,6 @@ void __init smp_tick_init(void)
1264 boot_cpu_id = hard_smp_processor_id(); 1264 boot_cpu_id = hard_smp_processor_id();
1265 current_tick_offset = timer_tick_offset; 1265 current_tick_offset = timer_tick_offset;
1266 1266
1267 cpu_set(boot_cpu_id, cpu_online_map);
1268 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; 1267 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1269} 1268}
1270 1269
@@ -1345,18 +1344,6 @@ void __init smp_setup_cpu_possible_map(void)
1345 1344
1346void __devinit smp_prepare_boot_cpu(void) 1345void __devinit smp_prepare_boot_cpu(void)
1347{ 1346{
1348 int cpu = hard_smp_processor_id();
1349
1350 if (cpu >= NR_CPUS) {
1351 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1352 prom_halt();
1353 }
1354
1355 current_thread_info()->cpu = cpu;
1356 __local_per_cpu_offset = __per_cpu_offset(cpu);
1357
1358 cpu_set(smp_processor_id(), cpu_online_map);
1359 cpu_set(smp_processor_id(), phys_cpu_present_map);
1360} 1347}
1361 1348
1362int __devinit __cpu_up(unsigned int cpu) 1349int __devinit __cpu_up(unsigned int cpu)
@@ -1433,4 +1420,7 @@ void __init setup_per_cpu_areas(void)
1433 1420
1434 for (i = 0; i < NR_CPUS; i++, ptr += size) 1421 for (i = 0; i < NR_CPUS; i++, ptr += size)
1435 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1422 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1423
1424 /* Setup %g5 for the boot cpu. */
1425 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1436} 1426}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 11ce6aaf1bd0..8e9d84825e1c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -133,6 +133,7 @@ struct cfq_data {
133 mempool_t *crq_pool; 133 mempool_t *crq_pool;
134 134
135 int rq_in_driver; 135 int rq_in_driver;
136 int hw_tag;
136 137
137 /* 138 /*
138 * schedule slice state info 139 * schedule slice state info
@@ -500,10 +501,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
500 501
501 /* 502 /*
502 * if queue was preempted, just add to front to be fair. busy_rr 503 * if queue was preempted, just add to front to be fair. busy_rr
503 * isn't sorted. 504 * isn't sorted, but insert at the back for fairness.
504 */ 505 */
505 if (preempted || list == &cfqd->busy_rr) { 506 if (preempted || list == &cfqd->busy_rr) {
506 list_add(&cfqq->cfq_list, list); 507 if (preempted)
508 list = list->prev;
509
510 list_add_tail(&cfqq->cfq_list, list);
507 return; 511 return;
508 } 512 }
509 513
@@ -664,6 +668,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
664 struct cfq_data *cfqd = q->elevator->elevator_data; 668 struct cfq_data *cfqd = q->elevator->elevator_data;
665 669
666 cfqd->rq_in_driver++; 670 cfqd->rq_in_driver++;
671
672 /*
673 * If the depth is larger 1, it really could be queueing. But lets
674 * make the mark a little higher - idling could still be good for
675 * low queueing, and a low queueing number could also just indicate
676 * a SCSI mid layer like behaviour where limit+1 is often seen.
677 */
678 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
679 cfqd->hw_tag = 1;
667} 680}
668 681
669static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 682static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -879,6 +892,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
879 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 892 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
880 893
881 /* 894 /*
895 * If no new queues are available, check if the busy list has some
896 * before falling back to idle io.
897 */
898 if (!cfqq && !list_empty(&cfqd->busy_rr))
899 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
900
901 /*
882 * if we have idle queues and no rt or be queues had pending 902 * if we have idle queues and no rt or be queues had pending
883 * requests, either allow immediate service if the grace period 903 * requests, either allow immediate service if the grace period
884 * has passed or arm the idle grace timer 904 * has passed or arm the idle grace timer
@@ -1458,7 +1478,8 @@ retry:
1458 * set ->slice_left to allow preemption for a new process 1478 * set ->slice_left to allow preemption for a new process
1459 */ 1479 */
1460 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1480 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1461 cfq_mark_cfqq_idle_window(cfqq); 1481 if (!cfqd->hw_tag)
1482 cfq_mark_cfqq_idle_window(cfqq);
1462 cfq_mark_cfqq_prio_changed(cfqq); 1483 cfq_mark_cfqq_prio_changed(cfqq);
1463 cfq_init_prio_data(cfqq); 1484 cfq_init_prio_data(cfqq);
1464 } 1485 }
@@ -1649,7 +1670,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1649{ 1670{
1650 int enable_idle = cfq_cfqq_idle_window(cfqq); 1671 int enable_idle = cfq_cfqq_idle_window(cfqq);
1651 1672
1652 if (!cic->ioc->task || !cfqd->cfq_slice_idle) 1673 if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
1653 enable_idle = 0; 1674 enable_idle = 0;
1654 else if (sample_valid(cic->ttime_samples)) { 1675 else if (sample_valid(cic->ttime_samples)) {
1655 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1676 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1740,14 +1761,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1740 1761
1741 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1762 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1742 1763
1764 cic = crq->io_context;
1765
1743 /* 1766 /*
1744 * we never wait for an async request and we don't allow preemption 1767 * we never wait for an async request and we don't allow preemption
1745 * of an async request. so just return early 1768 * of an async request. so just return early
1746 */ 1769 */
1747 if (!cfq_crq_is_sync(crq)) 1770 if (!cfq_crq_is_sync(crq)) {
1771 /*
1772 * sync process issued an async request, if it's waiting
1773 * then expire it and kick rq handling.
1774 */
1775 if (cic == cfqd->active_cic &&
1776 del_timer(&cfqd->idle_slice_timer)) {
1777 cfq_slice_expired(cfqd, 0);
1778 cfq_start_queueing(cfqd, cfqq);
1779 }
1748 return; 1780 return;
1749 1781 }
1750 cic = crq->io_context;
1751 1782
1752 cfq_update_io_thinktime(cfqd, cic); 1783 cfq_update_io_thinktime(cfqd, cic);
1753 cfq_update_io_seektime(cfqd, cic, crq); 1784 cfq_update_io_seektime(cfqd, cic, crq);
@@ -2165,10 +2196,9 @@ static void cfq_idle_class_timer(unsigned long data)
2165 * race with a non-idle queue, reset timer 2196 * race with a non-idle queue, reset timer
2166 */ 2197 */
2167 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2198 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2168 if (!time_after_eq(jiffies, end)) { 2199 if (!time_after_eq(jiffies, end))
2169 cfqd->idle_class_timer.expires = end; 2200 mod_timer(&cfqd->idle_class_timer, end);
2170 add_timer(&cfqd->idle_class_timer); 2201 else
2171 } else
2172 cfq_schedule_dispatch(cfqd); 2202 cfq_schedule_dispatch(cfqd);
2173 2203
2174 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2204 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
diff --git a/include/asm-arm/arch-ixp23xx/memory.h b/include/asm-arm/arch-ixp23xx/memory.h
index 6e19f46d54d1..c85fc06a043c 100644
--- a/include/asm-arm/arch-ixp23xx/memory.h
+++ b/include/asm-arm/arch-ixp23xx/memory.h
@@ -49,7 +49,7 @@ static inline int __ixp23xx_arch_is_coherent(void)
49{ 49{
50 extern unsigned int processor_id; 50 extern unsigned int processor_id;
51 51
52 if (((processor_id & 15) >= 2) || machine_is_roadrunner()) 52 if (((processor_id & 15) >= 4) || machine_is_roadrunner())
53 return 1; 53 return 1;
54 54
55 return 0; 55 return 0;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 358e4d309ceb..c2059a3a0621 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
159#define lazy_mmu_prot_update(pte) do { } while (0) 159#define lazy_mmu_prot_update(pte) do { } while (0)
160#endif 160#endif
161 161
162#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE 162#ifndef __HAVE_ARCH_MOVE_PTE
163#define move_pte(pte, prot, old_addr, new_addr) (pte) 163#define move_pte(pte, prot, old_addr, new_addr) (pte)
164#else
165#define move_pte(pte, prot, old_addr, new_addr) \
166({ \
167 pte_t newpte = (pte); \
168 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
169 pte_page(pte) == ZERO_PAGE(old_addr)) \
170 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
171 newpte; \
172})
173#endif 164#endif
174 165
175/* 166/*
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 174a3cda8c26..f80fe75c7800 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -70,7 +70,15 @@ extern unsigned long zero_page_mask;
70#define ZERO_PAGE(vaddr) \ 70#define ZERO_PAGE(vaddr) \
71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) 71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
72 72
73#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE 73#define __HAVE_ARCH_MOVE_PTE
74#define move_pte(pte, prot, old_addr, new_addr) \
75({ \
76 pte_t newpte = (pte); \
77 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
78 pte_page(pte) == ZERO_PAGE(old_addr)) \
79 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
80 newpte; \
81})
74 82
75extern void paging_init(void); 83extern void paging_init(void);
76 84
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index c44e7466534e..cd464f469a2c 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -689,6 +689,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
689#define pte_clear(mm,addr,ptep) \ 689#define pte_clear(mm,addr,ptep) \
690 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 690 set_pte_at((mm), (addr), (ptep), __pte(0UL))
691 691
692#ifdef DCACHE_ALIASING_POSSIBLE
693#define __HAVE_ARCH_MOVE_PTE
694#define move_pte(pte, prot, old_addr, new_addr) \
695({ \
696 pte_t newpte = (pte); \
697 if (tlb_type != hypervisor && pte_present(pte)) { \
698 unsigned long this_pfn = pte_pfn(pte); \
699 \
700 if (pfn_valid(this_pfn) && \
701 (((old_addr) ^ (new_addr)) & (1 << 13))) \
702 flush_dcache_page_all(current->mm, \
703 pfn_to_page(this_pfn)); \
704 } \
705 newpte; \
706})
707#endif
708
692extern pgd_t swapper_pg_dir[2048]; 709extern pgd_t swapper_pg_dir[2048];
693extern pmd_t swapper_low_pmd_dir[2048]; 710extern pmd_t swapper_low_pmd_dir[2048];
694 711
diff --git a/mm/slab.c b/mm/slab.c
index d31a06bfbea5..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
209 209
210/* Max number of objs-per-slab for caches which use off-slab slabs.
211 * Needed to avoid a possible looping condition in cache_grow().
212 */
213static unsigned long offslab_limit;
214
215/* 210/*
216 * struct slab 211 * struct slab
217 * 212 *
@@ -1356,12 +1351,6 @@ void __init kmem_cache_init(void)
1356 NULL, NULL); 1351 NULL, NULL);
1357 } 1352 }
1358 1353
1359 /* Inc off-slab bufctl limit until the ceiling is hit. */
1360 if (!(OFF_SLAB(sizes->cs_cachep))) {
1361 offslab_limit = sizes->cs_size - sizeof(struct slab);
1362 offslab_limit /= sizeof(kmem_bufctl_t);
1363 }
1364
1365 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1354 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1366 sizes->cs_size, 1355 sizes->cs_size,
1367 ARCH_KMALLOC_MINALIGN, 1356 ARCH_KMALLOC_MINALIGN,
@@ -1780,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
1780static size_t calculate_slab_order(struct kmem_cache *cachep, 1769static size_t calculate_slab_order(struct kmem_cache *cachep,
1781 size_t size, size_t align, unsigned long flags) 1770 size_t size, size_t align, unsigned long flags)
1782{ 1771{
1772 unsigned long offslab_limit;
1783 size_t left_over = 0; 1773 size_t left_over = 0;
1784 int gfporder; 1774 int gfporder;
1785 1775
@@ -1791,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1791 if (!num) 1781 if (!num)
1792 continue; 1782 continue;
1793 1783
1794 /* More than offslab_limit objects will cause problems */ 1784 if (flags & CFLGS_OFF_SLAB) {
1795 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1785 /*
1796 break; 1786 * Max number of objs-per-slab for caches which
1787 * use off-slab slabs. Needed to avoid a possible
1788 * looping condition in cache_grow().
1789 */
1790 offslab_limit = size - sizeof(struct slab);
1791 offslab_limit /= sizeof(kmem_bufctl_t);
1792
1793 if (num > offslab_limit)
1794 break;
1795 }
1797 1796
1798 /* Found something acceptable - save it away */ 1797 /* Found something acceptable - save it away */
1799 cachep->num = num; 1798 cachep->num = num;