aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/irq.c11
-rw-r--r--arch/ia64/kernel/kprobes.c12
-rw-r--r--arch/ia64/kernel/machvec.c2
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/init.c51
-rw-r--r--arch/ia64/sn/kernel/io_common.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c2
-rw-r--r--arch/ia64/sn/kernel/xpnet.c4
-rw-r--r--drivers/char/snsc_event.c2
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c36
-rw-r--r--include/asm-ia64/irq.h6
-rw-r--r--include/asm-ia64/kprobes.h4
-rw-r--r--include/asm-ia64/pgalloc.h82
-rw-r--r--include/asm-ia64/unistd.h4
-rw-r--r--kernel/irq/proc.c7
21 files changed, 89 insertions, 152 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6e41471449c..de1bff65996 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -31,6 +31,10 @@ config ZONE_DMA
31 def_bool y 31 def_bool y
32 depends on !IA64_SGI_SN2 32 depends on !IA64_SGI_SN2
33 33
34config QUICKLIST
35 bool
36 default y
37
34config MMU 38config MMU
35 bool 39 bool
36 default y 40 default y
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 2153bcacbe6..94e57109fad 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -63,7 +63,7 @@ use_swiotlb (struct device *dev)
63 return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); 63 return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask);
64} 64}
65 65
66void 66void __init
67hwsw_init (void) 67hwsw_init (void)
68{ 68{
69 /* default to a smallish 2MB sw I/O TLB */ 69 /* default to a smallish 2MB sw I/O TLB */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index b50bf208678..144b056282a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1583,5 +1583,7 @@ sys_call_table:
1583 data8 sys_vmsplice 1583 data8 sys_vmsplice
1584 data8 sys_ni_syscall // reserved for move_pages 1584 data8 sys_ni_syscall // reserved for move_pages
1585 data8 sys_getcpu 1585 data8 sys_getcpu
1586 data8 sys_epoll_pwait // 1305
1587 data8 sys_utimensat
1586 1588
1587 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1589 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 6a49600cf33..b642648cc2a 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -291,5 +291,5 @@ module_init(err_inject_init);
291module_exit(err_inject_exit); 291module_exit(err_inject_exit);
292 292
293MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>"); 293MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>");
294MODULE_DESCRIPTION("MC error injection kenrel sysfs interface"); 294MODULE_DESCRIPTION("MC error injection kernel sysfs interface");
295MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ce49c85c928..b4c239685d2 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -104,6 +104,17 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
104 irq_redir[irq] = (char) (redir & 0xff); 104 irq_redir[irq] = (char) (redir & 0xff);
105 } 105 }
106} 106}
107
108bool is_affinity_mask_valid(cpumask_t cpumask)
109{
110 if (ia64_platform_is("sn2")) {
111 /* Only allow one CPU to be specified in the smp_affinity mask */
112 if (cpus_weight(cpumask) != 1)
113 return false;
114 }
115 return true;
116}
117
107#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
108 119
109#ifdef CONFIG_HOTPLUG_CPU 120#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 4f5fd0960ba..72e593e9405 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -370,14 +370,18 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
370 370
371static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 371static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
372{ 372{
373 kcb->prev_kprobe.kp = kprobe_running(); 373 unsigned int i;
374 kcb->prev_kprobe.status = kcb->kprobe_status; 374 i = atomic_add_return(1, &kcb->prev_kprobe_index);
375 kcb->prev_kprobe[i-1].kp = kprobe_running();
376 kcb->prev_kprobe[i-1].status = kcb->kprobe_status;
375} 377}
376 378
377static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 379static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
378{ 380{
379 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 381 unsigned int i;
380 kcb->kprobe_status = kcb->prev_kprobe.status; 382 i = atomic_sub_return(1, &kcb->prev_kprobe_index);
383 __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i].kp;
384 kcb->kprobe_status = kcb->prev_kprobe[i].status;
381} 385}
382 386
383static void __kprobes set_current_kprobe(struct kprobe *p, 387static void __kprobes set_current_kprobe(struct kprobe *p,
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 9620822270a..13df337508e 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -35,7 +35,7 @@ lookup_machvec (const char *name)
35 return 0; 35 return 0;
36} 36}
37 37
38void 38void __init
39machvec_init (const char *name) 39machvec_init (const char *name)
40{ 40{
41 struct ia64_machine_vector *mv; 41 struct ia64_machine_vector *mv;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index f8ae709de0b..26814de6c29 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -118,7 +118,9 @@ static ia64_mc_info_t ia64_mc_info;
118#define CPE_HISTORY_LENGTH 5 118#define CPE_HISTORY_LENGTH 5
119#define CMC_HISTORY_LENGTH 5 119#define CMC_HISTORY_LENGTH 5
120 120
121#ifdef CONFIG_ACPI
121static struct timer_list cpe_poll_timer; 122static struct timer_list cpe_poll_timer;
123#endif
122static struct timer_list cmc_poll_timer; 124static struct timer_list cmc_poll_timer;
123/* 125/*
124 * This variable tells whether we are currently in polling mode. 126 * This variable tells whether we are currently in polling mode.
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 44ce5ed9444..7ac8592a35b 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -88,7 +88,7 @@ void show_mem(void)
88 printk(KERN_INFO "%d pages shared\n", total_shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
89 printk(KERN_INFO "%d pages swap cached\n", total_cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
90 printk(KERN_INFO "Total of %ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
91 pgtable_quicklist_total_size()); 91 quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); 92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
93} 93}
94 94
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 94844442812..38085ac1833 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -561,7 +561,7 @@ void show_mem(void)
561 printk(KERN_INFO "%d pages shared\n", total_shared); 561 printk(KERN_INFO "%d pages shared\n", total_shared);
562 printk(KERN_INFO "%d pages swap cached\n", total_cached); 562 printk(KERN_INFO "%d pages swap cached\n", total_cached);
563 printk(KERN_INFO "Total of %ld pages in page table cache\n", 563 printk(KERN_INFO "Total of %ld pages in page table cache\n",
564 pgtable_quicklist_total_size()); 564 quicklist_total_size());
565 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); 565 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
566} 566}
567 567
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index cffb1e8325e..c14abefabaf 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -39,9 +39,6 @@
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41 41
42DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
43DEFINE_PER_CPU(long, __pgtable_quicklist_size);
44
45extern void ia64_tlb_init (void); 42extern void ia64_tlb_init (void);
46 43
47unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
@@ -56,54 +53,6 @@ EXPORT_SYMBOL(vmem_map);
56struct page *zero_page_memmap_ptr; /* map entry for zero page */ 53struct page *zero_page_memmap_ptr; /* map entry for zero page */
57EXPORT_SYMBOL(zero_page_memmap_ptr); 54EXPORT_SYMBOL(zero_page_memmap_ptr);
58 55
59#define MIN_PGT_PAGES 25UL
60#define MAX_PGT_FREES_PER_PASS 16L
61#define PGT_FRACTION_OF_NODE_MEM 16
62
63static inline long
64max_pgt_pages(void)
65{
66 u64 node_free_pages, max_pgt_pages;
67
68#ifndef CONFIG_NUMA
69 node_free_pages = nr_free_pages();
70#else
71 node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
72#endif
73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
75 return max_pgt_pages;
76}
77
78static inline long
79min_pages_to_free(void)
80{
81 long pages_to_free;
82
83 pages_to_free = pgtable_quicklist_size - max_pgt_pages();
84 pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
85 return pages_to_free;
86}
87
88void
89check_pgt_cache(void)
90{
91 long pages_to_free;
92
93 if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
94 return;
95
96 preempt_disable();
97 while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
98 while (pages_to_free--) {
99 free_page((unsigned long)pgtable_quicklist_alloc());
100 }
101 preempt_enable();
102 preempt_disable();
103 }
104 preempt_enable();
105}
106
107void 56void
108lazy_mmu_prot_update (pte_t pte) 57lazy_mmu_prot_update (pte_t pte)
109{ 58{
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index d48bcd83253..7ed72d3faf7 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -364,7 +364,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
364 364
365 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); 365 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
366 if (!element) { 366 if (!element) {
367 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); 367 dev_dbg(&dev->dev, "%s: out of memory!\n", __FUNCTION__);
368 return; 368 return;
369 } 369 }
370 element->sysdata = SN_PCIDEV_INFO(dev); 370 element->sysdata = SN_PCIDEV_INFO(dev);
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 57c723f5cba..7ba403232cb 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -574,7 +574,7 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
574 u64 remote_vars_pa, struct xpc_vars *remote_vars) 574 u64 remote_vars_pa, struct xpc_vars *remote_vars)
575{ 575{
576 part->remote_rp_version = remote_rp_version; 576 part->remote_rp_version = remote_rp_version;
577 dev_dbg(xpc_part, " remote_rp_version = 0x%016lx\n", 577 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
578 part->remote_rp_version); 578 part->remote_rp_version);
579 579
580 part->remote_rp_stamp = *remote_rp_stamp; 580 part->remote_rp_stamp = *remote_rp_stamp;
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index 88fad85ceef..da721353097 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -343,8 +343,8 @@ xpnet_dev_open(struct net_device *dev)
343 enum xpc_retval ret; 343 enum xpc_retval ret;
344 344
345 345
346 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, " 346 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
347 "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity, 347 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
348 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, 348 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
349 XPNET_MAX_IDLE_KTHREADS); 349 XPNET_MAX_IDLE_KTHREADS);
350 350
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index 2f56e8c5489..1b75b0b7d54 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -203,8 +203,6 @@ scdrv_dispatch_event(char *event, int len)
203 class = (code & EV_CLASS_MASK); 203 class = (code & EV_CLASS_MASK);
204 204
205 if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) { 205 if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) {
206 struct task_struct *p;
207
208 if (snsc_shutting_down) 206 if (snsc_shutting_down)
209 return; 207 return;
210 208
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 78cf0711d1f..ef07c36bccf 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -249,19 +249,19 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
249 249
250 250
251 if (rc == PCI_SLOT_ALREADY_UP) { 251 if (rc == PCI_SLOT_ALREADY_UP) {
252 dev_dbg(slot->pci_bus->self, "is already active\n"); 252 dev_dbg(&slot->pci_bus->self->dev, "is already active\n");
253 return 1; /* return 1 to user */ 253 return 1; /* return 1 to user */
254 } 254 }
255 255
256 if (rc == PCI_L1_ERR) { 256 if (rc == PCI_L1_ERR) {
257 dev_dbg(slot->pci_bus->self, 257 dev_dbg(&slot->pci_bus->self->dev,
258 "L1 failure %d with message: %s", 258 "L1 failure %d with message: %s",
259 resp.resp_sub_errno, resp.resp_l1_msg); 259 resp.resp_sub_errno, resp.resp_l1_msg);
260 return -EPERM; 260 return -EPERM;
261 } 261 }
262 262
263 if (rc) { 263 if (rc) {
264 dev_dbg(slot->pci_bus->self, 264 dev_dbg(&slot->pci_bus->self->dev,
265 "insert failed with error %d sub-error %d\n", 265 "insert failed with error %d sub-error %d\n",
266 rc, resp.resp_sub_errno); 266 rc, resp.resp_sub_errno);
267 return -EIO; 267 return -EIO;
@@ -287,25 +287,25 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
287 287
288 if ((action == PCI_REQ_SLOT_ELIGIBLE) && 288 if ((action == PCI_REQ_SLOT_ELIGIBLE) &&
289 (rc == PCI_SLOT_ALREADY_DOWN)) { 289 (rc == PCI_SLOT_ALREADY_DOWN)) {
290 dev_dbg(slot->pci_bus->self, "Slot %s already inactive\n"); 290 dev_dbg(&slot->pci_bus->self->dev, "Slot %s already inactive\n", slot->physical_path);
291 return 1; /* return 1 to user */ 291 return 1; /* return 1 to user */
292 } 292 }
293 293
294 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) { 294 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
295 dev_dbg(slot->pci_bus->self, 295 dev_dbg(&slot->pci_bus->self->dev,
296 "Cannot remove last 33MHz card\n"); 296 "Cannot remove last 33MHz card\n");
297 return -EPERM; 297 return -EPERM;
298 } 298 }
299 299
300 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) { 300 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
301 dev_dbg(slot->pci_bus->self, 301 dev_dbg(&slot->pci_bus->self->dev,
302 "L1 failure %d with message \n%s\n", 302 "L1 failure %d with message \n%s\n",
303 resp.resp_sub_errno, resp.resp_l1_msg); 303 resp.resp_sub_errno, resp.resp_l1_msg);
304 return -EPERM; 304 return -EPERM;
305 } 305 }
306 306
307 if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) { 307 if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
308 dev_dbg(slot->pci_bus->self, 308 dev_dbg(&slot->pci_bus->self->dev,
309 "remove failed with error %d sub-error %d\n", 309 "remove failed with error %d sub-error %d\n",
310 rc, resp.resp_sub_errno); 310 rc, resp.resp_sub_errno);
311 return -EIO; 311 return -EIO;
@@ -317,12 +317,12 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
317 if ((action == PCI_REQ_SLOT_DISABLE) && !rc) { 317 if ((action == PCI_REQ_SLOT_DISABLE) && !rc) {
318 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); 318 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
319 pcibus_info->pbi_enabled_devices &= ~(1 << device_num); 319 pcibus_info->pbi_enabled_devices &= ~(1 << device_num);
320 dev_dbg(slot->pci_bus->self, "remove successful\n"); 320 dev_dbg(&slot->pci_bus->self->dev, "remove successful\n");
321 return 0; 321 return 0;
322 } 322 }
323 323
324 if ((action == PCI_REQ_SLOT_DISABLE) && rc) { 324 if ((action == PCI_REQ_SLOT_DISABLE) && rc) {
325 dev_dbg(slot->pci_bus->self,"remove failed rc = %d\n", rc); 325 dev_dbg(&slot->pci_bus->self->dev,"remove failed rc = %d\n", rc);
326 } 326 }
327 327
328 return rc; 328 return rc;
@@ -375,7 +375,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
375 num_funcs = pci_scan_slot(slot->pci_bus, 375 num_funcs = pci_scan_slot(slot->pci_bus,
376 PCI_DEVFN(slot->device_num + 1, 0)); 376 PCI_DEVFN(slot->device_num + 1, 0));
377 if (!num_funcs) { 377 if (!num_funcs) {
378 dev_dbg(slot->pci_bus->self, "no device in slot\n"); 378 dev_dbg(&slot->pci_bus->self->dev, "no device in slot\n");
379 mutex_unlock(&sn_hotplug_mutex); 379 mutex_unlock(&sn_hotplug_mutex);
380 return -ENODEV; 380 return -ENODEV;
381 } 381 }
@@ -427,7 +427,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
427 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; 427 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
428 428
429 if (acpi_bus_get_device(phandle, &pdevice)) { 429 if (acpi_bus_get_device(phandle, &pdevice)) {
430 dev_dbg(slot->pci_bus->self, 430 dev_dbg(&slot->pci_bus->self->dev,
431 "no parent device, assuming NULL\n"); 431 "no parent device, assuming NULL\n");
432 pdevice = NULL; 432 pdevice = NULL;
433 } 433 }
@@ -479,10 +479,10 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
479 mutex_unlock(&sn_hotplug_mutex); 479 mutex_unlock(&sn_hotplug_mutex);
480 480
481 if (rc == 0) 481 if (rc == 0)
482 dev_dbg(slot->pci_bus->self, 482 dev_dbg(&slot->pci_bus->self->dev,
483 "insert operation successful\n"); 483 "insert operation successful\n");
484 else 484 else
485 dev_dbg(slot->pci_bus->self, 485 dev_dbg(&slot->pci_bus->self->dev,
486 "insert operation failed rc = %d\n", rc); 486 "insert operation failed rc = %d\n", rc);
487 487
488 return rc; 488 return rc;
@@ -659,16 +659,16 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
659 if (rc) 659 if (rc)
660 goto register_err; 660 goto register_err;
661 } 661 }
662 dev_dbg(pci_bus->self, "Registered bus with hotplug\n"); 662 dev_dbg(&pci_bus->self->dev, "Registered bus with hotplug\n");
663 return rc; 663 return rc;
664 664
665register_err: 665register_err:
666 dev_dbg(pci_bus->self, "bus failed to register with err = %d\n", 666 dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
667 rc); 667 rc);
668 668
669alloc_err: 669alloc_err:
670 if (rc == -ENOMEM) 670 if (rc == -ENOMEM)
671 dev_dbg(pci_bus->self, "Memory allocation error\n"); 671 dev_dbg(&pci_bus->self->dev, "Memory allocation error\n");
672 672
673 /* destroy THIS element */ 673 /* destroy THIS element */
674 if (bss_hotplug_slot) 674 if (bss_hotplug_slot)
@@ -701,10 +701,10 @@ static int sn_pci_hotplug_init(void)
701 701
702 rc = sn_pci_bus_valid(pci_bus); 702 rc = sn_pci_bus_valid(pci_bus);
703 if (rc != 1) { 703 if (rc != 1) {
704 dev_dbg(pci_bus->self, "not a valid hotplug bus\n"); 704 dev_dbg(&pci_bus->self->dev, "not a valid hotplug bus\n");
705 continue; 705 continue;
706 } 706 }
707 dev_dbg(pci_bus->self, "valid hotplug bus\n"); 707 dev_dbg(&pci_bus->self->dev, "valid hotplug bus\n");
708 708
709 rc = sn_hotplug_slot_register(pci_bus); 709 rc = sn_hotplug_slot_register(pci_bus);
710 if (!rc) { 710 if (!rc) {
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index 79479e2c696..67221615e31 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -11,6 +11,9 @@
11 * 02/29/00 D.Mosberger moved most things into hw_irq.h 11 * 02/29/00 D.Mosberger moved most things into hw_irq.h
12 */ 12 */
13 13
14#include <linux/types.h>
15#include <linux/cpumask.h>
16
14#define NR_IRQS 256 17#define NR_IRQS 256
15#define NR_IRQ_VECTORS NR_IRQS 18#define NR_IRQ_VECTORS NR_IRQS
16 19
@@ -29,5 +32,8 @@ extern void disable_irq (unsigned int);
29extern void disable_irq_nosync (unsigned int); 32extern void disable_irq_nosync (unsigned int);
30extern void enable_irq (unsigned int); 33extern void enable_irq (unsigned int);
31extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); 34extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
35bool is_affinity_mask_valid(cpumask_t cpumask);
36
37#define is_affinity_mask_valid is_affinity_mask_valid
32 38
33#endif /* _ASM_IA64_IRQ_H */ 39#endif /* _ASM_IA64_IRQ_H */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 828ae00e47c..2abc98b336f 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -71,13 +71,15 @@ struct prev_kprobe {
71 71
72#define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f) 72#define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f)
73/* per-cpu kprobe control block */ 73/* per-cpu kprobe control block */
74#define ARCH_PREV_KPROBE_SZ 2
74struct kprobe_ctlblk { 75struct kprobe_ctlblk {
75 unsigned long kprobe_status; 76 unsigned long kprobe_status;
76 struct pt_regs jprobe_saved_regs; 77 struct pt_regs jprobe_saved_regs;
77 unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE]; 78 unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
78 unsigned long *bsp; 79 unsigned long *bsp;
79 unsigned long cfm; 80 unsigned long cfm;
80 struct prev_kprobe prev_kprobe; 81 atomic_t prev_kprobe_index;
82 struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
81}; 83};
82 84
83#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 85#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 560c287b123..67552cad517 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -18,71 +18,18 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/page-flags.h> 19#include <linux/page-flags.h>
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <linux/quicklist.h>
21 22
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23 24
24DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
25#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
26DECLARE_PER_CPU(long, __pgtable_quicklist_size);
27#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
28
29static inline long pgtable_quicklist_total_size(void)
30{
31 long ql_size = 0;
32 int cpuid;
33
34 for_each_online_cpu(cpuid) {
35 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
36 }
37 return ql_size;
38}
39
40static inline void *pgtable_quicklist_alloc(void)
41{
42 unsigned long *ret = NULL;
43
44 preempt_disable();
45
46 ret = pgtable_quicklist;
47 if (likely(ret != NULL)) {
48 pgtable_quicklist = (unsigned long *)(*ret);
49 ret[0] = 0;
50 --pgtable_quicklist_size;
51 preempt_enable();
52 } else {
53 preempt_enable();
54 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
55 }
56
57 return ret;
58}
59
60static inline void pgtable_quicklist_free(void *pgtable_entry)
61{
62#ifdef CONFIG_NUMA
63 int nid = page_to_nid(virt_to_page(pgtable_entry));
64
65 if (unlikely(nid != numa_node_id())) {
66 free_page((unsigned long)pgtable_entry);
67 return;
68 }
69#endif
70
71 preempt_disable();
72 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
73 pgtable_quicklist = (unsigned long *)pgtable_entry;
74 ++pgtable_quicklist_size;
75 preempt_enable();
76}
77
78static inline pgd_t *pgd_alloc(struct mm_struct *mm) 25static inline pgd_t *pgd_alloc(struct mm_struct *mm)
79{ 26{
80 return pgtable_quicklist_alloc(); 27 return quicklist_alloc(0, GFP_KERNEL, NULL);
81} 28}
82 29
83static inline void pgd_free(pgd_t * pgd) 30static inline void pgd_free(pgd_t * pgd)
84{ 31{
85 pgtable_quicklist_free(pgd); 32 quicklist_free(0, NULL, pgd);
86} 33}
87 34
88#ifdef CONFIG_PGTABLE_4 35#ifdef CONFIG_PGTABLE_4
@@ -94,12 +41,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
94 41
95static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
96{ 43{
97 return pgtable_quicklist_alloc(); 44 return quicklist_alloc(0, GFP_KERNEL, NULL);
98} 45}
99 46
100static inline void pud_free(pud_t * pud) 47static inline void pud_free(pud_t * pud)
101{ 48{
102 pgtable_quicklist_free(pud); 49 quicklist_free(0, NULL, pud);
103} 50}
104#define __pud_free_tlb(tlb, pud) pud_free(pud) 51#define __pud_free_tlb(tlb, pud) pud_free(pud)
105#endif /* CONFIG_PGTABLE_4 */ 52#endif /* CONFIG_PGTABLE_4 */
@@ -112,12 +59,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
112 59
113static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 60static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
114{ 61{
115 return pgtable_quicklist_alloc(); 62 return quicklist_alloc(0, GFP_KERNEL, NULL);
116} 63}
117 64
118static inline void pmd_free(pmd_t * pmd) 65static inline void pmd_free(pmd_t * pmd)
119{ 66{
120 pgtable_quicklist_free(pmd); 67 quicklist_free(0, NULL, pmd);
121} 68}
122 69
123#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) 70#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
@@ -137,28 +84,31 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
137static inline struct page *pte_alloc_one(struct mm_struct *mm, 84static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr) 85 unsigned long addr)
139{ 86{
140 void *pg = pgtable_quicklist_alloc(); 87 void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
141 return pg ? virt_to_page(pg) : NULL; 88 return pg ? virt_to_page(pg) : NULL;
142} 89}
143 90
144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
145 unsigned long addr) 92 unsigned long addr)
146{ 93{
147 return pgtable_quicklist_alloc(); 94 return quicklist_alloc(0, GFP_KERNEL, NULL);
148} 95}
149 96
150static inline void pte_free(struct page *pte) 97static inline void pte_free(struct page *pte)
151{ 98{
152 pgtable_quicklist_free(page_address(pte)); 99 quicklist_free_page(0, NULL, pte);
153} 100}
154 101
155static inline void pte_free_kernel(pte_t * pte) 102static inline void pte_free_kernel(pte_t * pte)
156{ 103{
157 pgtable_quicklist_free(pte); 104 quicklist_free(0, NULL, pte);
158} 105}
159 106
160#define __pte_free_tlb(tlb, pte) pte_free(pte) 107static inline void check_pgt_cache(void)
108{
109 quicklist_trim(0, NULL, 25, 16);
110}
161 111
162extern void check_pgt_cache(void); 112#define __pte_free_tlb(tlb, pte) pte_free(pte)
163 113
164#endif /* _ASM_IA64_PGALLOC_H */ 114#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 861c8ec87b0..f049bc40ca7 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -294,11 +294,13 @@
294#define __NR_vmsplice 1302 294#define __NR_vmsplice 1302
295/* 1303 reserved for move_pages */ 295/* 1303 reserved for move_pages */
296#define __NR_getcpu 1304 296#define __NR_getcpu 1304
297#define __NR_epoll_pwait 1305
298#define __NR_utimensat 1306
297 299
298#ifdef __KERNEL__ 300#ifdef __KERNEL__
299 301
300 302
301#define NR_syscalls 281 /* length of syscall table */ 303#define NR_syscalls 283 /* length of syscall table */
302 304
303#define __ARCH_WANT_SYS_RT_SIGACTION 305#define __ARCH_WANT_SYS_RT_SIGACTION
304#define __ARCH_WANT_SYS_RT_SIGSUSPEND 306#define __ARCH_WANT_SYS_RT_SIGSUSPEND
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index ddde0ef9ccd..b4f1674fca7 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -27,6 +27,10 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off,
27 return len; 27 return len;
28} 28}
29 29
30#ifndef is_affinity_mask_valid
31#define is_affinity_mask_valid(val) 1
32#endif
33
30int no_irq_affinity; 34int no_irq_affinity;
31static int irq_affinity_write_proc(struct file *file, const char __user *buffer, 35static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
32 unsigned long count, void *data) 36 unsigned long count, void *data)
@@ -42,6 +46,9 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
42 if (err) 46 if (err)
43 return err; 47 return err;
44 48
49 if (!is_affinity_mask_valid(new_value))
50 return -EINVAL;
51
45 /* 52 /*
46 * Do not allow disabling IRQs completely - it's a too easy 53 * Do not allow disabling IRQs completely - it's a too easy
47 * way to make the system unusable accidentally :-) At least 54 * way to make the system unusable accidentally :-) At least