diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 12:44:55 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 12:44:55 -0400 |
| commit | 4786b4ee22de6304e841b12ee22b849230d7fba3 (patch) | |
| tree | 08793b8fbcd63204d5d3355ac755745adcfef170 | |
| parent | 253ba4e79edc695b2925bd2ef34de06ff4d4070c (diff) | |
| parent | 71b264f85ff50c14fe945ffff06ae0d5e9a9124e (diff) | |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits)
[IA64] kdump: Add crash_save_vmcoreinfo for INIT
[IA64] Fix NUMA configuration issue
[IA64] Itanium Spec updates
[IA64] Untangle sync_icache_dcache() page size determination
[IA64] arch/ia64/kernel/: use time_* macros
[IA64] remove redundant display of free swap space in show_mem()
[IA64] make IOMMU respect the segment boundary limits
[IA64] kprobes: kprobe-booster for ia64
[IA64] fix getpid and set_tid_address fast system calls for pid namespaces
[IA64] Replace explicit jiffies tests with time_* macros.
[IA64] use goto to jump out do/while_each_thread
[IA64] Fix unlock ordering in smp_callin
[IA64] pgd_offset() constfication.
[IA64] kdump: crash.c coding style fix
[IA64] kdump: add kdump_on_fatal_mca
[IA64] Minimize per_cpu reservations.
[IA64] Correct pernodesize calculation.
[IA64] Kernel parameter for max number of concurrent global TLB purges
[IA64] Multiple outstanding ptc.g instruction support
[IA64] Implement smp_call_function_mask for ia64
...
50 files changed, 2965 insertions, 544 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index cd6f464b919c..256a2162503c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1362,6 +1362,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 1362 | 1362 | ||
| 1363 | nowb [ARM] | 1363 | nowb [ARM] |
| 1364 | 1364 | ||
| 1365 | nptcg= [IA64] Override max number of concurrent global TLB | ||
| 1366 | purges which is reported from either PAL_VM_SUMMARY or | ||
| 1367 | SAL PALO. | ||
| 1368 | |||
| 1365 | numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. | 1369 | numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. |
| 1366 | one of ['zone', 'node', 'default'] can be specified | 1370 | one of ['zone', 'node', 'default'] can be specified |
| 1367 | This can be set from sysctl after boot. | 1371 | This can be set from sysctl after boot. |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 8fa3faf5ef1b..ed21737a00c5 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -283,6 +283,17 @@ config FORCE_MAX_ZONEORDER | |||
| 283 | default "17" if HUGETLB_PAGE | 283 | default "17" if HUGETLB_PAGE |
| 284 | default "11" | 284 | default "11" |
| 285 | 285 | ||
| 286 | config VIRT_CPU_ACCOUNTING | ||
| 287 | bool "Deterministic task and CPU time accounting" | ||
| 288 | default n | ||
| 289 | help | ||
| 290 | Select this option to enable more accurate task and CPU time | ||
| 291 | accounting. This is done by reading a CPU counter on each | ||
| 292 | kernel entry and exit and on transitions within the kernel | ||
| 293 | between system, softirq and hardirq state, so there is a | ||
| 294 | small performance impact. | ||
| 295 | If in doubt, say N here. | ||
| 296 | |||
| 286 | config SMP | 297 | config SMP |
| 287 | bool "Symmetric multi-processing support" | 298 | bool "Symmetric multi-processing support" |
| 288 | help | 299 | help |
| @@ -611,6 +622,9 @@ config IRQ_PER_CPU | |||
| 611 | bool | 622 | bool |
| 612 | default y | 623 | default y |
| 613 | 624 | ||
| 625 | config IOMMU_HELPER | ||
| 626 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC) | ||
| 627 | |||
| 614 | source "arch/ia64/hp/sim/Kconfig" | 628 | source "arch/ia64/hp/sim/Kconfig" |
| 615 | 629 | ||
| 616 | source "arch/ia64/Kconfig.debug" | 630 | source "arch/ia64/Kconfig.debug" |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 523eae6d3e49..9409de5c9441 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/nodemask.h> | 35 | #include <linux/nodemask.h> |
| 36 | #include <linux/bitops.h> /* hweight64() */ | 36 | #include <linux/bitops.h> /* hweight64() */ |
| 37 | #include <linux/crash_dump.h> | 37 | #include <linux/crash_dump.h> |
| 38 | #include <linux/iommu-helper.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/delay.h> /* ia64_get_itc() */ | 40 | #include <asm/delay.h> /* ia64_get_itc() */ |
| 40 | #include <asm/io.h> | 41 | #include <asm/io.h> |
| @@ -460,6 +461,13 @@ get_iovp_order (unsigned long size) | |||
| 460 | return order; | 461 | return order; |
| 461 | } | 462 | } |
| 462 | 463 | ||
| 464 | static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, | ||
| 465 | unsigned int bitshiftcnt) | ||
| 466 | { | ||
| 467 | return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) | ||
| 468 | + bitshiftcnt; | ||
| 469 | } | ||
| 470 | |||
| 463 | /** | 471 | /** |
| 464 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | 472 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
| 465 | * @ioc: IO MMU structure which owns the pdir we are interested in. | 473 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
| @@ -471,15 +479,25 @@ get_iovp_order (unsigned long size) | |||
| 471 | * Cool perf optimization: search for log2(size) bits at a time. | 479 | * Cool perf optimization: search for log2(size) bits at a time. |
| 472 | */ | 480 | */ |
| 473 | static SBA_INLINE unsigned long | 481 | static SBA_INLINE unsigned long |
| 474 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) | 482 | sba_search_bitmap(struct ioc *ioc, struct device *dev, |
| 483 | unsigned long bits_wanted, int use_hint) | ||
| 475 | { | 484 | { |
| 476 | unsigned long *res_ptr; | 485 | unsigned long *res_ptr; |
| 477 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | 486 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
| 478 | unsigned long flags, pide = ~0UL; | 487 | unsigned long flags, pide = ~0UL, tpide; |
| 488 | unsigned long boundary_size; | ||
| 489 | unsigned long shift; | ||
| 490 | int ret; | ||
| 479 | 491 | ||
| 480 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); | 492 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); |
| 481 | ASSERT(res_ptr < res_end); | 493 | ASSERT(res_ptr < res_end); |
| 482 | 494 | ||
| 495 | boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; | ||
| 496 | boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift; | ||
| 497 | |||
| 498 | BUG_ON(ioc->ibase & ~iovp_mask); | ||
| 499 | shift = ioc->ibase >> iovp_shift; | ||
| 500 | |||
| 483 | spin_lock_irqsave(&ioc->res_lock, flags); | 501 | spin_lock_irqsave(&ioc->res_lock, flags); |
| 484 | 502 | ||
| 485 | /* Allow caller to force a search through the entire resource space */ | 503 | /* Allow caller to force a search through the entire resource space */ |
| @@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) | |||
| 504 | if (likely(*res_ptr != ~0UL)) { | 522 | if (likely(*res_ptr != ~0UL)) { |
| 505 | bitshiftcnt = ffz(*res_ptr); | 523 | bitshiftcnt = ffz(*res_ptr); |
| 506 | *res_ptr |= (1UL << bitshiftcnt); | 524 | *res_ptr |= (1UL << bitshiftcnt); |
| 507 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 525 | pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
| 508 | pide <<= 3; /* convert to bit address */ | ||
| 509 | pide += bitshiftcnt; | ||
| 510 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | 526 | ioc->res_bitshift = bitshiftcnt + bits_wanted; |
| 511 | goto found_it; | 527 | goto found_it; |
| 512 | } | 528 | } |
| @@ -535,11 +551,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) | |||
| 535 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | 551 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); |
| 536 | ASSERT(0 != mask); | 552 | ASSERT(0 != mask); |
| 537 | for (; mask ; mask <<= o, bitshiftcnt += o) { | 553 | for (; mask ; mask <<= o, bitshiftcnt += o) { |
| 538 | if(0 == ((*res_ptr) & mask)) { | 554 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
| 555 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
| 556 | shift, | ||
| 557 | boundary_size); | ||
| 558 | if ((0 == ((*res_ptr) & mask)) && !ret) { | ||
| 539 | *res_ptr |= mask; /* mark resources busy! */ | 559 | *res_ptr |= mask; /* mark resources busy! */ |
| 540 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 560 | pide = tpide; |
| 541 | pide <<= 3; /* convert to bit address */ | ||
| 542 | pide += bitshiftcnt; | ||
| 543 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | 561 | ioc->res_bitshift = bitshiftcnt + bits_wanted; |
| 544 | goto found_it; | 562 | goto found_it; |
| 545 | } | 563 | } |
| @@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) | |||
| 560 | end = res_end - qwords; | 578 | end = res_end - qwords; |
| 561 | 579 | ||
| 562 | for (; res_ptr < end; res_ptr++) { | 580 | for (; res_ptr < end; res_ptr++) { |
| 581 | tpide = ptr_to_pide(ioc, res_ptr, 0); | ||
| 582 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
| 583 | shift, boundary_size); | ||
| 584 | if (ret) | ||
| 585 | goto next_ptr; | ||
| 563 | for (i = 0 ; i < qwords ; i++) { | 586 | for (i = 0 ; i < qwords ; i++) { |
| 564 | if (res_ptr[i] != 0) | 587 | if (res_ptr[i] != 0) |
| 565 | goto next_ptr; | 588 | goto next_ptr; |
| @@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) | |||
| 572 | res_ptr[i] = ~0UL; | 595 | res_ptr[i] = ~0UL; |
| 573 | res_ptr[i] |= RESMAP_MASK(bits); | 596 | res_ptr[i] |= RESMAP_MASK(bits); |
| 574 | 597 | ||
| 575 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 598 | pide = tpide; |
| 576 | pide <<= 3; /* convert to bit address */ | ||
| 577 | res_ptr += qwords; | 599 | res_ptr += qwords; |
| 578 | ioc->res_bitshift = bits; | 600 | ioc->res_bitshift = bits; |
| 579 | goto found_it; | 601 | goto found_it; |
| @@ -605,7 +627,7 @@ found_it: | |||
| 605 | * resource bit map. | 627 | * resource bit map. |
| 606 | */ | 628 | */ |
| 607 | static int | 629 | static int |
| 608 | sba_alloc_range(struct ioc *ioc, size_t size) | 630 | sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
| 609 | { | 631 | { |
| 610 | unsigned int pages_needed = size >> iovp_shift; | 632 | unsigned int pages_needed = size >> iovp_shift; |
| 611 | #ifdef PDIR_SEARCH_TIMING | 633 | #ifdef PDIR_SEARCH_TIMING |
| @@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
| 622 | /* | 644 | /* |
| 623 | ** "seek and ye shall find"...praying never hurts either... | 645 | ** "seek and ye shall find"...praying never hurts either... |
| 624 | */ | 646 | */ |
| 625 | pide = sba_search_bitmap(ioc, pages_needed, 1); | 647 | pide = sba_search_bitmap(ioc, dev, pages_needed, 1); |
| 626 | if (unlikely(pide >= (ioc->res_size << 3))) { | 648 | if (unlikely(pide >= (ioc->res_size << 3))) { |
| 627 | pide = sba_search_bitmap(ioc, pages_needed, 0); | 649 | pide = sba_search_bitmap(ioc, dev, pages_needed, 0); |
| 628 | if (unlikely(pide >= (ioc->res_size << 3))) { | 650 | if (unlikely(pide >= (ioc->res_size << 3))) { |
| 629 | #if DELAYED_RESOURCE_CNT > 0 | 651 | #if DELAYED_RESOURCE_CNT > 0 |
| 630 | unsigned long flags; | 652 | unsigned long flags; |
| @@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
| 653 | } | 675 | } |
| 654 | spin_unlock_irqrestore(&ioc->saved_lock, flags); | 676 | spin_unlock_irqrestore(&ioc->saved_lock, flags); |
| 655 | 677 | ||
| 656 | pide = sba_search_bitmap(ioc, pages_needed, 0); | 678 | pide = sba_search_bitmap(ioc, dev, pages_needed, 0); |
| 657 | if (unlikely(pide >= (ioc->res_size << 3))) | 679 | if (unlikely(pide >= (ioc->res_size << 3))) |
| 658 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | 680 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", |
| 659 | ioc->ioc_hpa); | 681 | ioc->ioc_hpa); |
| @@ -936,7 +958,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |||
| 936 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 958 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
| 937 | #endif | 959 | #endif |
| 938 | 960 | ||
| 939 | pide = sba_alloc_range(ioc, size); | 961 | pide = sba_alloc_range(ioc, dev, size); |
| 940 | 962 | ||
| 941 | iovp = (dma_addr_t) pide << iovp_shift; | 963 | iovp = (dma_addr_t) pide << iovp_shift; |
| 942 | 964 | ||
| @@ -1373,7 +1395,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
| 1373 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; | 1395 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; |
| 1374 | ASSERT(dma_len <= DMA_CHUNK_SIZE); | 1396 | ASSERT(dma_len <= DMA_CHUNK_SIZE); |
| 1375 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG | 1397 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG |
| 1376 | | (sba_alloc_range(ioc, dma_len) << iovp_shift) | 1398 | | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) |
| 1377 | | dma_offset); | 1399 | | dma_offset); |
| 1378 | n_mappings++; | 1400 | n_mappings++; |
| 1379 | } | 1401 | } |
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h index 446c9aac924d..9a3abf58cea3 100644 --- a/arch/ia64/ia32/elfcore32.h +++ b/arch/ia64/ia32/elfcore32.h | |||
| @@ -30,7 +30,19 @@ struct elf_siginfo | |||
| 30 | int si_errno; /* errno */ | 30 | int si_errno; /* errno */ |
| 31 | }; | 31 | }; |
| 32 | 32 | ||
| 33 | #define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0) | 33 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 34 | /* | ||
| 35 | * Hacks are here since types between compat_timeval (= pair of s32) and | ||
| 36 | * ia64-native timeval (= pair of s64) are not compatible, at least a file | ||
| 37 | * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on | ||
| 38 | * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval(). | ||
| 39 | */ | ||
| 40 | #define cputime_to_timeval(a,b) \ | ||
| 41 | do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0) | ||
| 42 | #else | ||
| 43 | #define jiffies_to_timeval(a,b) \ | ||
| 44 | do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0) | ||
| 45 | #endif | ||
| 34 | 46 | ||
| 35 | struct elf_prstatus | 47 | struct elf_prstatus |
| 36 | { | 48 | { |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index b1bf51fe97b4..7e028ceb93ba 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/eventpoll.h> | 38 | #include <linux/eventpoll.h> |
| 39 | #include <linux/personality.h> | 39 | #include <linux/personality.h> |
| 40 | #include <linux/ptrace.h> | 40 | #include <linux/ptrace.h> |
| 41 | #include <linux/regset.h> | ||
| 41 | #include <linux/stat.h> | 42 | #include <linux/stat.h> |
| 42 | #include <linux/ipc.h> | 43 | #include <linux/ipc.h> |
| 43 | #include <linux/capability.h> | 44 | #include <linux/capability.h> |
| @@ -2387,16 +2388,45 @@ get_free_idx (void) | |||
| 2387 | return -ESRCH; | 2388 | return -ESRCH; |
| 2388 | } | 2389 | } |
| 2389 | 2390 | ||
| 2391 | static void set_tls_desc(struct task_struct *p, int idx, | ||
| 2392 | const struct ia32_user_desc *info, int n) | ||
| 2393 | { | ||
| 2394 | struct thread_struct *t = &p->thread; | ||
| 2395 | struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
| 2396 | int cpu; | ||
| 2397 | |||
| 2398 | /* | ||
| 2399 | * We must not get preempted while modifying the TLS. | ||
| 2400 | */ | ||
| 2401 | cpu = get_cpu(); | ||
| 2402 | |||
| 2403 | while (n-- > 0) { | ||
| 2404 | if (LDT_empty(info)) { | ||
| 2405 | desc->a = 0; | ||
| 2406 | desc->b = 0; | ||
| 2407 | } else { | ||
| 2408 | desc->a = LDT_entry_a(info); | ||
| 2409 | desc->b = LDT_entry_b(info); | ||
| 2410 | } | ||
| 2411 | |||
| 2412 | ++info; | ||
| 2413 | ++desc; | ||
| 2414 | } | ||
| 2415 | |||
| 2416 | if (t == ¤t->thread) | ||
| 2417 | load_TLS(t, cpu); | ||
| 2418 | |||
| 2419 | put_cpu(); | ||
| 2420 | } | ||
| 2421 | |||
| 2390 | /* | 2422 | /* |
| 2391 | * Set a given TLS descriptor: | 2423 | * Set a given TLS descriptor: |
| 2392 | */ | 2424 | */ |
| 2393 | asmlinkage int | 2425 | asmlinkage int |
| 2394 | sys32_set_thread_area (struct ia32_user_desc __user *u_info) | 2426 | sys32_set_thread_area (struct ia32_user_desc __user *u_info) |
| 2395 | { | 2427 | { |
| 2396 | struct thread_struct *t = ¤t->thread; | ||
| 2397 | struct ia32_user_desc info; | 2428 | struct ia32_user_desc info; |
| 2398 | struct desc_struct *desc; | 2429 | int idx; |
| 2399 | int cpu, idx; | ||
| 2400 | 2430 | ||
| 2401 | if (copy_from_user(&info, u_info, sizeof(info))) | 2431 | if (copy_from_user(&info, u_info, sizeof(info))) |
| 2402 | return -EFAULT; | 2432 | return -EFAULT; |
| @@ -2416,18 +2446,7 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info) | |||
| 2416 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | 2446 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) |
| 2417 | return -EINVAL; | 2447 | return -EINVAL; |
| 2418 | 2448 | ||
| 2419 | desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; | 2449 | set_tls_desc(current, idx, &info, 1); |
| 2420 | |||
| 2421 | cpu = smp_processor_id(); | ||
| 2422 | |||
| 2423 | if (LDT_empty(&info)) { | ||
| 2424 | desc->a = 0; | ||
| 2425 | desc->b = 0; | ||
| 2426 | } else { | ||
| 2427 | desc->a = LDT_entry_a(&info); | ||
| 2428 | desc->b = LDT_entry_b(&info); | ||
| 2429 | } | ||
| 2430 | load_TLS(t, cpu); | ||
| 2431 | return 0; | 2450 | return 0; |
| 2432 | } | 2451 | } |
| 2433 | 2452 | ||
| @@ -2451,6 +2470,20 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info) | |||
| 2451 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) | 2470 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) |
| 2452 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) | 2471 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) |
| 2453 | 2472 | ||
| 2473 | static void fill_user_desc(struct ia32_user_desc *info, int idx, | ||
| 2474 | const struct desc_struct *desc) | ||
| 2475 | { | ||
| 2476 | info->entry_number = idx; | ||
| 2477 | info->base_addr = GET_BASE(desc); | ||
| 2478 | info->limit = GET_LIMIT(desc); | ||
| 2479 | info->seg_32bit = GET_32BIT(desc); | ||
| 2480 | info->contents = GET_CONTENTS(desc); | ||
| 2481 | info->read_exec_only = !GET_WRITABLE(desc); | ||
| 2482 | info->limit_in_pages = GET_LIMIT_PAGES(desc); | ||
| 2483 | info->seg_not_present = !GET_PRESENT(desc); | ||
| 2484 | info->useable = GET_USEABLE(desc); | ||
| 2485 | } | ||
| 2486 | |||
| 2454 | asmlinkage int | 2487 | asmlinkage int |
| 2455 | sys32_get_thread_area (struct ia32_user_desc __user *u_info) | 2488 | sys32_get_thread_area (struct ia32_user_desc __user *u_info) |
| 2456 | { | 2489 | { |
| @@ -2464,22 +2497,588 @@ sys32_get_thread_area (struct ia32_user_desc __user *u_info) | |||
| 2464 | return -EINVAL; | 2497 | return -EINVAL; |
| 2465 | 2498 | ||
| 2466 | desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | 2499 | desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; |
| 2467 | 2500 | fill_user_desc(&info, idx, desc); | |
| 2468 | info.entry_number = idx; | ||
| 2469 | info.base_addr = GET_BASE(desc); | ||
| 2470 | info.limit = GET_LIMIT(desc); | ||
| 2471 | info.seg_32bit = GET_32BIT(desc); | ||
| 2472 | info.contents = GET_CONTENTS(desc); | ||
| 2473 | info.read_exec_only = !GET_WRITABLE(desc); | ||
| 2474 | info.limit_in_pages = GET_LIMIT_PAGES(desc); | ||
| 2475 | info.seg_not_present = !GET_PRESENT(desc); | ||
| 2476 | info.useable = GET_USEABLE(desc); | ||
| 2477 | 2501 | ||
| 2478 | if (copy_to_user(u_info, &info, sizeof(info))) | 2502 | if (copy_to_user(u_info, &info, sizeof(info))) |
| 2479 | return -EFAULT; | 2503 | return -EFAULT; |
| 2480 | return 0; | 2504 | return 0; |
| 2481 | } | 2505 | } |
| 2482 | 2506 | ||
| 2507 | struct regset_get { | ||
| 2508 | void *kbuf; | ||
| 2509 | void __user *ubuf; | ||
| 2510 | }; | ||
| 2511 | |||
| 2512 | struct regset_set { | ||
| 2513 | const void *kbuf; | ||
| 2514 | const void __user *ubuf; | ||
| 2515 | }; | ||
| 2516 | |||
| 2517 | struct regset_getset { | ||
| 2518 | struct task_struct *target; | ||
| 2519 | const struct user_regset *regset; | ||
| 2520 | union { | ||
| 2521 | struct regset_get get; | ||
| 2522 | struct regset_set set; | ||
| 2523 | } u; | ||
| 2524 | unsigned int pos; | ||
| 2525 | unsigned int count; | ||
| 2526 | int ret; | ||
| 2527 | }; | ||
| 2528 | |||
| 2529 | static void getfpreg(struct task_struct *task, int regno, int *val) | ||
| 2530 | { | ||
| 2531 | switch (regno / sizeof(int)) { | ||
| 2532 | case 0: | ||
| 2533 | *val = task->thread.fcr & 0xffff; | ||
| 2534 | break; | ||
| 2535 | case 1: | ||
| 2536 | *val = task->thread.fsr & 0xffff; | ||
| 2537 | break; | ||
| 2538 | case 2: | ||
| 2539 | *val = (task->thread.fsr>>16) & 0xffff; | ||
| 2540 | break; | ||
| 2541 | case 3: | ||
| 2542 | *val = task->thread.fir; | ||
| 2543 | break; | ||
| 2544 | case 4: | ||
| 2545 | *val = (task->thread.fir>>32) & 0xffff; | ||
| 2546 | break; | ||
| 2547 | case 5: | ||
| 2548 | *val = task->thread.fdr; | ||
| 2549 | break; | ||
| 2550 | case 6: | ||
| 2551 | *val = (task->thread.fdr >> 32) & 0xffff; | ||
| 2552 | break; | ||
| 2553 | } | ||
| 2554 | } | ||
| 2555 | |||
| 2556 | static void setfpreg(struct task_struct *task, int regno, int val) | ||
| 2557 | { | ||
| 2558 | switch (regno / sizeof(int)) { | ||
| 2559 | case 0: | ||
| 2560 | task->thread.fcr = (task->thread.fcr & (~0x1f3f)) | ||
| 2561 | | (val & 0x1f3f); | ||
| 2562 | break; | ||
| 2563 | case 1: | ||
| 2564 | task->thread.fsr = (task->thread.fsr & (~0xffff)) | val; | ||
| 2565 | break; | ||
| 2566 | case 2: | ||
| 2567 | task->thread.fsr = (task->thread.fsr & (~0xffff0000)) | ||
| 2568 | | (val << 16); | ||
| 2569 | break; | ||
| 2570 | case 3: | ||
| 2571 | task->thread.fir = (task->thread.fir & (~0xffffffff)) | val; | ||
| 2572 | break; | ||
| 2573 | case 5: | ||
| 2574 | task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val; | ||
| 2575 | break; | ||
| 2576 | } | ||
| 2577 | } | ||
| 2578 | |||
| 2579 | static void access_fpreg_ia32(int regno, void *reg, | ||
| 2580 | struct pt_regs *pt, struct switch_stack *sw, | ||
| 2581 | int tos, int write) | ||
| 2582 | { | ||
| 2583 | void *f; | ||
| 2584 | |||
| 2585 | if ((regno += tos) >= 8) | ||
| 2586 | regno -= 8; | ||
| 2587 | if (regno < 4) | ||
| 2588 | f = &pt->f8 + regno; | ||
| 2589 | else if (regno <= 7) | ||
| 2590 | f = &sw->f12 + (regno - 4); | ||
| 2591 | else { | ||
| 2592 | printk(KERN_ERR "regno must be less than 7 \n"); | ||
| 2593 | return; | ||
| 2594 | } | ||
| 2595 | |||
| 2596 | if (write) | ||
| 2597 | memcpy(f, reg, sizeof(struct _fpreg_ia32)); | ||
| 2598 | else | ||
| 2599 | memcpy(reg, f, sizeof(struct _fpreg_ia32)); | ||
| 2600 | } | ||
| 2601 | |||
| 2602 | static void do_fpregs_get(struct unw_frame_info *info, void *arg) | ||
| 2603 | { | ||
| 2604 | struct regset_getset *dst = arg; | ||
| 2605 | struct task_struct *task = dst->target; | ||
| 2606 | struct pt_regs *pt; | ||
| 2607 | int start, end, tos; | ||
| 2608 | char buf[80]; | ||
| 2609 | |||
| 2610 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
| 2611 | return; | ||
| 2612 | if (dst->pos < 7 * sizeof(int)) { | ||
| 2613 | end = min((dst->pos + dst->count), | ||
| 2614 | (unsigned int)(7 * sizeof(int))); | ||
| 2615 | for (start = dst->pos; start < end; start += sizeof(int)) | ||
| 2616 | getfpreg(task, start, (int *)(buf + start)); | ||
| 2617 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 2618 | &dst->u.get.kbuf, &dst->u.get.ubuf, buf, | ||
| 2619 | 0, 7 * sizeof(int)); | ||
| 2620 | if (dst->ret || dst->count == 0) | ||
| 2621 | return; | ||
| 2622 | } | ||
| 2623 | if (dst->pos < sizeof(struct ia32_user_i387_struct)) { | ||
| 2624 | pt = task_pt_regs(task); | ||
| 2625 | tos = (task->thread.fsr >> 11) & 7; | ||
| 2626 | end = min(dst->pos + dst->count, | ||
| 2627 | (unsigned int)(sizeof(struct ia32_user_i387_struct))); | ||
| 2628 | start = (dst->pos - 7 * sizeof(int)) / | ||
| 2629 | sizeof(struct _fpreg_ia32); | ||
| 2630 | end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32); | ||
| 2631 | for (; start < end; start++) | ||
| 2632 | access_fpreg_ia32(start, | ||
| 2633 | (struct _fpreg_ia32 *)buf + start, | ||
| 2634 | pt, info->sw, tos, 0); | ||
| 2635 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 2636 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
| 2637 | buf, 7 * sizeof(int), | ||
| 2638 | sizeof(struct ia32_user_i387_struct)); | ||
| 2639 | if (dst->ret || dst->count == 0) | ||
| 2640 | return; | ||
| 2641 | } | ||
| 2642 | } | ||
| 2643 | |||
| 2644 | static void do_fpregs_set(struct unw_frame_info *info, void *arg) | ||
| 2645 | { | ||
| 2646 | struct regset_getset *dst = arg; | ||
| 2647 | struct task_struct *task = dst->target; | ||
| 2648 | struct pt_regs *pt; | ||
| 2649 | char buf[80]; | ||
| 2650 | int end, start, tos; | ||
| 2651 | |||
| 2652 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
| 2653 | return; | ||
| 2654 | |||
| 2655 | if (dst->pos < 7 * sizeof(int)) { | ||
| 2656 | start = dst->pos; | ||
| 2657 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 2658 | &dst->u.set.kbuf, &dst->u.set.ubuf, buf, | ||
| 2659 | 0, 7 * sizeof(int)); | ||
| 2660 | if (dst->ret) | ||
| 2661 | return; | ||
| 2662 | for (; start < dst->pos; start += sizeof(int)) | ||
| 2663 | setfpreg(task, start, *((int *)(buf + start))); | ||
| 2664 | if (dst->count == 0) | ||
| 2665 | return; | ||
| 2666 | } | ||
| 2667 | if (dst->pos < sizeof(struct ia32_user_i387_struct)) { | ||
| 2668 | start = (dst->pos - 7 * sizeof(int)) / | ||
| 2669 | sizeof(struct _fpreg_ia32); | ||
| 2670 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 2671 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
| 2672 | buf, 7 * sizeof(int), | ||
| 2673 | sizeof(struct ia32_user_i387_struct)); | ||
| 2674 | if (dst->ret) | ||
| 2675 | return; | ||
| 2676 | pt = task_pt_regs(task); | ||
| 2677 | tos = (task->thread.fsr >> 11) & 7; | ||
| 2678 | end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32); | ||
| 2679 | for (; start < end; start++) | ||
| 2680 | access_fpreg_ia32(start, | ||
| 2681 | (struct _fpreg_ia32 *)buf + start, | ||
| 2682 | pt, info->sw, tos, 1); | ||
| 2683 | if (dst->count == 0) | ||
| 2684 | return; | ||
| 2685 | } | ||
| 2686 | } | ||
| 2687 | |||
| 2688 | #define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member))) | ||
| 2689 | static void getfpxreg(struct task_struct *task, int start, int end, char *buf) | ||
| 2690 | { | ||
| 2691 | int min_val; | ||
| 2692 | |||
| 2693 | min_val = min(end, OFFSET(fop)); | ||
| 2694 | while (start < min_val) { | ||
| 2695 | if (start == OFFSET(cwd)) | ||
| 2696 | *((short *)buf) = task->thread.fcr & 0xffff; | ||
| 2697 | else if (start == OFFSET(swd)) | ||
| 2698 | *((short *)buf) = task->thread.fsr & 0xffff; | ||
| 2699 | else if (start == OFFSET(twd)) | ||
| 2700 | *((short *)buf) = (task->thread.fsr>>16) & 0xffff; | ||
| 2701 | buf += 2; | ||
| 2702 | start += 2; | ||
| 2703 | } | ||
| 2704 | /* skip fop element */ | ||
| 2705 | if (start == OFFSET(fop)) { | ||
| 2706 | start += 2; | ||
| 2707 | buf += 2; | ||
| 2708 | } | ||
| 2709 | while (start < end) { | ||
| 2710 | if (start == OFFSET(fip)) | ||
| 2711 | *((int *)buf) = task->thread.fir; | ||
| 2712 | else if (start == OFFSET(fcs)) | ||
| 2713 | *((int *)buf) = (task->thread.fir>>32) & 0xffff; | ||
| 2714 | else if (start == OFFSET(foo)) | ||
| 2715 | *((int *)buf) = task->thread.fdr; | ||
| 2716 | else if (start == OFFSET(fos)) | ||
| 2717 | *((int *)buf) = (task->thread.fdr>>32) & 0xffff; | ||
| 2718 | else if (start == OFFSET(mxcsr)) | ||
| 2719 | *((int *)buf) = ((task->thread.fcr>>32) & 0xff80) | ||
| 2720 | | ((task->thread.fsr>>32) & 0x3f); | ||
| 2721 | buf += 4; | ||
| 2722 | start += 4; | ||
| 2723 | } | ||
| 2724 | } | ||
| 2725 | |||
| 2726 | static void setfpxreg(struct task_struct *task, int start, int end, char *buf) | ||
| 2727 | { | ||
| 2728 | int min_val, num32; | ||
| 2729 | short num; | ||
| 2730 | unsigned long num64; | ||
| 2731 | |||
| 2732 | min_val = min(end, OFFSET(fop)); | ||
| 2733 | while (start < min_val) { | ||
| 2734 | num = *((short *)buf); | ||
| 2735 | if (start == OFFSET(cwd)) { | ||
| 2736 | task->thread.fcr = (task->thread.fcr & (~0x1f3f)) | ||
| 2737 | | (num & 0x1f3f); | ||
| 2738 | } else if (start == OFFSET(swd)) { | ||
| 2739 | task->thread.fsr = (task->thread.fsr & (~0xffff)) | num; | ||
| 2740 | } else if (start == OFFSET(twd)) { | ||
| 2741 | task->thread.fsr = (task->thread.fsr & (~0xffff0000)) | ||
| 2742 | | (((int)num) << 16); | ||
| 2743 | } | ||
| 2744 | buf += 2; | ||
| 2745 | start += 2; | ||
| 2746 | } | ||
| 2747 | /* skip fop element */ | ||
| 2748 | if (start == OFFSET(fop)) { | ||
| 2749 | start += 2; | ||
| 2750 | buf += 2; | ||
| 2751 | } | ||
| 2752 | while (start < end) { | ||
| 2753 | num32 = *((int *)buf); | ||
| 2754 | if (start == OFFSET(fip)) | ||
| 2755 | task->thread.fir = (task->thread.fir & (~0xffffffff)) | ||
| 2756 | | num32; | ||
| 2757 | else if (start == OFFSET(foo)) | ||
| 2758 | task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | ||
| 2759 | | num32; | ||
| 2760 | else if (start == OFFSET(mxcsr)) { | ||
| 2761 | num64 = num32 & 0xff10; | ||
| 2762 | task->thread.fcr = (task->thread.fcr & | ||
| 2763 | (~0xff1000000000UL)) | (num64<<32); | ||
| 2764 | num64 = num32 & 0x3f; | ||
| 2765 | task->thread.fsr = (task->thread.fsr & | ||
| 2766 | (~0x3f00000000UL)) | (num64<<32); | ||
| 2767 | } | ||
| 2768 | buf += 4; | ||
| 2769 | start += 4; | ||
| 2770 | } | ||
| 2771 | } | ||
| 2772 | |||
| 2773 | static void do_fpxregs_get(struct unw_frame_info *info, void *arg) | ||
| 2774 | { | ||
| 2775 | struct regset_getset *dst = arg; | ||
| 2776 | struct task_struct *task = dst->target; | ||
| 2777 | struct pt_regs *pt; | ||
| 2778 | char buf[128]; | ||
| 2779 | int start, end, tos; | ||
| 2780 | |||
| 2781 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
| 2782 | return; | ||
| 2783 | if (dst->pos < OFFSET(st_space[0])) { | ||
| 2784 | end = min(dst->pos + dst->count, (unsigned int)32); | ||
| 2785 | getfpxreg(task, dst->pos, end, buf); | ||
| 2786 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 2787 | &dst->u.get.kbuf, &dst->u.get.ubuf, buf, | ||
| 2788 | 0, OFFSET(st_space[0])); | ||
| 2789 | if (dst->ret || dst->count == 0) | ||
| 2790 | return; | ||
| 2791 | } | ||
| 2792 | if (dst->pos < OFFSET(xmm_space[0])) { | ||
| 2793 | pt = task_pt_regs(task); | ||
| 2794 | tos = (task->thread.fsr >> 11) & 7; | ||
| 2795 | end = min(dst->pos + dst->count, | ||
| 2796 | (unsigned int)OFFSET(xmm_space[0])); | ||
| 2797 | start = (dst->pos - OFFSET(st_space[0])) / 16; | ||
| 2798 | end = (end - OFFSET(st_space[0])) / 16; | ||
| 2799 | for (; start < end; start++) | ||
| 2800 | access_fpreg_ia32(start, buf + 16 * start, pt, | ||
| 2801 | info->sw, tos, 0); | ||
| 2802 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 2803 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
| 2804 | buf, OFFSET(st_space[0]), OFFSET(xmm_space[0])); | ||
| 2805 | if (dst->ret || dst->count == 0) | ||
| 2806 | return; | ||
| 2807 | } | ||
| 2808 | if (dst->pos < OFFSET(padding[0])) | ||
| 2809 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 2810 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
| 2811 | &info->sw->f16, OFFSET(xmm_space[0]), | ||
| 2812 | OFFSET(padding[0])); | ||
| 2813 | } | ||
| 2814 | |||
| 2815 | static void do_fpxregs_set(struct unw_frame_info *info, void *arg) | ||
| 2816 | { | ||
| 2817 | struct regset_getset *dst = arg; | ||
| 2818 | struct task_struct *task = dst->target; | ||
| 2819 | char buf[128]; | ||
| 2820 | int start, end; | ||
| 2821 | |||
| 2822 | if (dst->count == 0 || unw_unwind_to_user(info) < 0) | ||
| 2823 | return; | ||
| 2824 | |||
| 2825 | if (dst->pos < OFFSET(st_space[0])) { | ||
| 2826 | start = dst->pos; | ||
| 2827 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 2828 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
| 2829 | buf, 0, OFFSET(st_space[0])); | ||
| 2830 | if (dst->ret) | ||
| 2831 | return; | ||
| 2832 | setfpxreg(task, start, dst->pos, buf); | ||
| 2833 | if (dst->count == 0) | ||
| 2834 | return; | ||
| 2835 | } | ||
| 2836 | if (dst->pos < OFFSET(xmm_space[0])) { | ||
| 2837 | struct pt_regs *pt; | ||
| 2838 | int tos; | ||
| 2839 | pt = task_pt_regs(task); | ||
| 2840 | tos = (task->thread.fsr >> 11) & 7; | ||
| 2841 | start = (dst->pos - OFFSET(st_space[0])) / 16; | ||
| 2842 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 2843 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
| 2844 | buf, OFFSET(st_space[0]), OFFSET(xmm_space[0])); | ||
| 2845 | if (dst->ret) | ||
| 2846 | return; | ||
| 2847 | end = (dst->pos - OFFSET(st_space[0])) / 16; | ||
| 2848 | for (; start < end; start++) | ||
| 2849 | access_fpreg_ia32(start, buf + 16 * start, pt, info->sw, | ||
| 2850 | tos, 1); | ||
| 2851 | if (dst->count == 0) | ||
| 2852 | return; | ||
| 2853 | } | ||
| 2854 | if (dst->pos < OFFSET(padding[0])) | ||
| 2855 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 2856 | &dst->u.set.kbuf, &dst->u.set.ubuf, | ||
| 2857 | &info->sw->f16, OFFSET(xmm_space[0]), | ||
| 2858 | OFFSET(padding[0])); | ||
| 2859 | } | ||
| 2860 | #undef OFFSET | ||
| 2861 | |||
| 2862 | static int do_regset_call(void (*call)(struct unw_frame_info *, void *), | ||
| 2863 | struct task_struct *target, | ||
| 2864 | const struct user_regset *regset, | ||
| 2865 | unsigned int pos, unsigned int count, | ||
| 2866 | const void *kbuf, const void __user *ubuf) | ||
| 2867 | { | ||
| 2868 | struct regset_getset info = { .target = target, .regset = regset, | ||
| 2869 | .pos = pos, .count = count, | ||
| 2870 | .u.set = { .kbuf = kbuf, .ubuf = ubuf }, | ||
| 2871 | .ret = 0 }; | ||
| 2872 | |||
| 2873 | if (target == current) | ||
| 2874 | unw_init_running(call, &info); | ||
| 2875 | else { | ||
| 2876 | struct unw_frame_info ufi; | ||
| 2877 | memset(&ufi, 0, sizeof(ufi)); | ||
| 2878 | unw_init_from_blocked_task(&ufi, target); | ||
| 2879 | (*call)(&ufi, &info); | ||
| 2880 | } | ||
| 2881 | |||
| 2882 | return info.ret; | ||
| 2883 | } | ||
| 2884 | |||
| 2885 | static int ia32_fpregs_get(struct task_struct *target, | ||
| 2886 | const struct user_regset *regset, | ||
| 2887 | unsigned int pos, unsigned int count, | ||
| 2888 | void *kbuf, void __user *ubuf) | ||
| 2889 | { | ||
| 2890 | return do_regset_call(do_fpregs_get, target, regset, pos, count, | ||
| 2891 | kbuf, ubuf); | ||
| 2892 | } | ||
| 2893 | |||
| 2894 | static int ia32_fpregs_set(struct task_struct *target, | ||
| 2895 | const struct user_regset *regset, | ||
| 2896 | unsigned int pos, unsigned int count, | ||
| 2897 | const void *kbuf, const void __user *ubuf) | ||
| 2898 | { | ||
| 2899 | return do_regset_call(do_fpregs_set, target, regset, pos, count, | ||
| 2900 | kbuf, ubuf); | ||
| 2901 | } | ||
| 2902 | |||
| 2903 | static int ia32_fpxregs_get(struct task_struct *target, | ||
| 2904 | const struct user_regset *regset, | ||
| 2905 | unsigned int pos, unsigned int count, | ||
| 2906 | void *kbuf, void __user *ubuf) | ||
| 2907 | { | ||
| 2908 | return do_regset_call(do_fpxregs_get, target, regset, pos, count, | ||
| 2909 | kbuf, ubuf); | ||
| 2910 | } | ||
| 2911 | |||
| 2912 | static int ia32_fpxregs_set(struct task_struct *target, | ||
| 2913 | const struct user_regset *regset, | ||
| 2914 | unsigned int pos, unsigned int count, | ||
| 2915 | const void *kbuf, const void __user *ubuf) | ||
| 2916 | { | ||
| 2917 | return do_regset_call(do_fpxregs_set, target, regset, pos, count, | ||
| 2918 | kbuf, ubuf); | ||
| 2919 | } | ||
| 2920 | |||
| 2921 | static int ia32_genregs_get(struct task_struct *target, | ||
| 2922 | const struct user_regset *regset, | ||
| 2923 | unsigned int pos, unsigned int count, | ||
| 2924 | void *kbuf, void __user *ubuf) | ||
| 2925 | { | ||
| 2926 | if (kbuf) { | ||
| 2927 | u32 *kp = kbuf; | ||
| 2928 | while (count > 0) { | ||
| 2929 | *kp++ = getreg(target, pos); | ||
| 2930 | pos += 4; | ||
| 2931 | count -= 4; | ||
| 2932 | } | ||
| 2933 | } else { | ||
| 2934 | u32 __user *up = ubuf; | ||
| 2935 | while (count > 0) { | ||
| 2936 | if (__put_user(getreg(target, pos), up++)) | ||
| 2937 | return -EFAULT; | ||
| 2938 | pos += 4; | ||
| 2939 | count -= 4; | ||
| 2940 | } | ||
| 2941 | } | ||
| 2942 | return 0; | ||
| 2943 | } | ||
| 2944 | |||
| 2945 | static int ia32_genregs_set(struct task_struct *target, | ||
| 2946 | const struct user_regset *regset, | ||
| 2947 | unsigned int pos, unsigned int count, | ||
| 2948 | const void *kbuf, const void __user *ubuf) | ||
| 2949 | { | ||
| 2950 | int ret = 0; | ||
| 2951 | |||
| 2952 | if (kbuf) { | ||
| 2953 | const u32 *kp = kbuf; | ||
| 2954 | while (!ret && count > 0) { | ||
| 2955 | putreg(target, pos, *kp++); | ||
| 2956 | pos += 4; | ||
| 2957 | count -= 4; | ||
| 2958 | } | ||
| 2959 | } else { | ||
| 2960 | const u32 __user *up = ubuf; | ||
| 2961 | u32 val; | ||
| 2962 | while (!ret && count > 0) { | ||
| 2963 | ret = __get_user(val, up++); | ||
| 2964 | if (!ret) | ||
| 2965 | putreg(target, pos, val); | ||
| 2966 | pos += 4; | ||
| 2967 | count -= 4; | ||
| 2968 | } | ||
| 2969 | } | ||
| 2970 | return ret; | ||
| 2971 | } | ||
| 2972 | |||
| 2973 | static int ia32_tls_active(struct task_struct *target, | ||
| 2974 | const struct user_regset *regset) | ||
| 2975 | { | ||
| 2976 | struct thread_struct *t = &target->thread; | ||
| 2977 | int n = GDT_ENTRY_TLS_ENTRIES; | ||
| 2978 | while (n > 0 && desc_empty(&t->tls_array[n -1])) | ||
| 2979 | --n; | ||
| 2980 | return n; | ||
| 2981 | } | ||
| 2982 | |||
| 2983 | static int ia32_tls_get(struct task_struct *target, | ||
| 2984 | const struct user_regset *regset, unsigned int pos, | ||
| 2985 | unsigned int count, void *kbuf, void __user *ubuf) | ||
| 2986 | { | ||
| 2987 | const struct desc_struct *tls; | ||
| 2988 | |||
| 2989 | if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) || | ||
| 2990 | (pos % sizeof(struct ia32_user_desc)) != 0 || | ||
| 2991 | (count % sizeof(struct ia32_user_desc)) != 0) | ||
| 2992 | return -EINVAL; | ||
| 2993 | |||
| 2994 | pos /= sizeof(struct ia32_user_desc); | ||
| 2995 | count /= sizeof(struct ia32_user_desc); | ||
| 2996 | |||
| 2997 | tls = &target->thread.tls_array[pos]; | ||
| 2998 | |||
| 2999 | if (kbuf) { | ||
| 3000 | struct ia32_user_desc *info = kbuf; | ||
| 3001 | while (count-- > 0) | ||
| 3002 | fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, | ||
| 3003 | tls++); | ||
| 3004 | } else { | ||
| 3005 | struct ia32_user_desc __user *u_info = ubuf; | ||
| 3006 | while (count-- > 0) { | ||
| 3007 | struct ia32_user_desc info; | ||
| 3008 | fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); | ||
| 3009 | if (__copy_to_user(u_info++, &info, sizeof(info))) | ||
| 3010 | return -EFAULT; | ||
| 3011 | } | ||
| 3012 | } | ||
| 3013 | |||
| 3014 | return 0; | ||
| 3015 | } | ||
| 3016 | |||
| 3017 | static int ia32_tls_set(struct task_struct *target, | ||
| 3018 | const struct user_regset *regset, unsigned int pos, | ||
| 3019 | unsigned int count, const void *kbuf, const void __user *ubuf) | ||
| 3020 | { | ||
| 3021 | struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; | ||
| 3022 | const struct ia32_user_desc *info; | ||
| 3023 | |||
| 3024 | if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) || | ||
| 3025 | (pos % sizeof(struct ia32_user_desc)) != 0 || | ||
| 3026 | (count % sizeof(struct ia32_user_desc)) != 0) | ||
| 3027 | return -EINVAL; | ||
| 3028 | |||
| 3029 | if (kbuf) | ||
| 3030 | info = kbuf; | ||
| 3031 | else if (__copy_from_user(infobuf, ubuf, count)) | ||
| 3032 | return -EFAULT; | ||
| 3033 | else | ||
| 3034 | info = infobuf; | ||
| 3035 | |||
| 3036 | set_tls_desc(target, | ||
| 3037 | GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)), | ||
| 3038 | info, count / sizeof(struct ia32_user_desc)); | ||
| 3039 | |||
| 3040 | return 0; | ||
| 3041 | } | ||
| 3042 | |||
| 3043 | /* | ||
| 3044 | * This should match arch/i386/kernel/ptrace.c:native_regsets. | ||
| 3045 | * XXX ioperm? vm86? | ||
| 3046 | */ | ||
| 3047 | static const struct user_regset ia32_regsets[] = { | ||
| 3048 | { | ||
| 3049 | .core_note_type = NT_PRSTATUS, | ||
| 3050 | .n = sizeof(struct user_regs_struct32)/4, | ||
| 3051 | .size = 4, .align = 4, | ||
| 3052 | .get = ia32_genregs_get, .set = ia32_genregs_set | ||
| 3053 | }, | ||
| 3054 | { | ||
| 3055 | .core_note_type = NT_PRFPREG, | ||
| 3056 | .n = sizeof(struct ia32_user_i387_struct) / 4, | ||
| 3057 | .size = 4, .align = 4, | ||
| 3058 | .get = ia32_fpregs_get, .set = ia32_fpregs_set | ||
| 3059 | }, | ||
| 3060 | { | ||
| 3061 | .core_note_type = NT_PRXFPREG, | ||
| 3062 | .n = sizeof(struct ia32_user_fxsr_struct) / 4, | ||
| 3063 | .size = 4, .align = 4, | ||
| 3064 | .get = ia32_fpxregs_get, .set = ia32_fpxregs_set | ||
| 3065 | }, | ||
| 3066 | { | ||
| 3067 | .core_note_type = NT_386_TLS, | ||
| 3068 | .n = GDT_ENTRY_TLS_ENTRIES, | ||
| 3069 | .bias = GDT_ENTRY_TLS_MIN, | ||
| 3070 | .size = sizeof(struct ia32_user_desc), | ||
| 3071 | .align = sizeof(struct ia32_user_desc), | ||
| 3072 | .active = ia32_tls_active, | ||
| 3073 | .get = ia32_tls_get, .set = ia32_tls_set, | ||
| 3074 | }, | ||
| 3075 | }; | ||
| 3076 | |||
| 3077 | const struct user_regset_view user_ia32_view = { | ||
| 3078 | .name = "i386", .e_machine = EM_386, | ||
| 3079 | .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets) | ||
| 3080 | }; | ||
| 3081 | |||
| 2483 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | 3082 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, |
| 2484 | __u32 len_low, __u32 len_high, int advice) | 3083 | __u32 len_low, __u32 len_high, int advice) |
| 2485 | { | 3084 | { |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 78f28d825f30..c7467f863c7a 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -423,6 +423,7 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; | |||
| 423 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) | 423 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) |
| 424 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) | 424 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) |
| 425 | static struct acpi_table_slit __initdata *slit_table; | 425 | static struct acpi_table_slit __initdata *slit_table; |
| 426 | cpumask_t early_cpu_possible_map = CPU_MASK_NONE; | ||
| 426 | 427 | ||
| 427 | static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) | 428 | static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) |
| 428 | { | 429 | { |
| @@ -482,6 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
| 482 | (pa->apic_id << 8) | (pa->local_sapic_eid); | 483 | (pa->apic_id << 8) | (pa->local_sapic_eid); |
| 483 | /* nid should be overridden as logical node id later */ | 484 | /* nid should be overridden as logical node id later */ |
| 484 | node_cpuid[srat_num_cpus].nid = pxm; | 485 | node_cpuid[srat_num_cpus].nid = pxm; |
| 486 | cpu_set(srat_num_cpus, early_cpu_possible_map); | ||
| 485 | srat_num_cpus++; | 487 | srat_num_cpus++; |
| 486 | } | 488 | } |
| 487 | 489 | ||
| @@ -559,7 +561,7 @@ void __init acpi_numa_arch_fixup(void) | |||
| 559 | } | 561 | } |
| 560 | 562 | ||
| 561 | /* set logical node id in cpu structure */ | 563 | /* set logical node id in cpu structure */ |
| 562 | for (i = 0; i < srat_num_cpus; i++) | 564 | for_each_possible_early_cpu(i) |
| 563 | node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); | 565 | node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); |
| 564 | 566 | ||
| 565 | printk(KERN_INFO "Number of logical nodes in system = %d\n", | 567 | printk(KERN_INFO "Number of logical nodes in system = %d\n", |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 0aebc6f79e95..230a6f92367f 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #define ASM_OFFSETS_C 1 | 7 | #define ASM_OFFSETS_C 1 |
| 8 | 8 | ||
| 9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
| 10 | #include <linux/pid.h> | ||
| 10 | #include <linux/clocksource.h> | 11 | #include <linux/clocksource.h> |
| 11 | 12 | ||
| 12 | #include <asm-ia64/processor.h> | 13 | #include <asm-ia64/processor.h> |
| @@ -34,17 +35,29 @@ void foo(void) | |||
| 34 | DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); | 35 | DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); |
| 35 | DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); | 36 | DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); |
| 36 | 37 | ||
| 38 | BUILD_BUG_ON(sizeof(struct upid) != 32); | ||
| 39 | DEFINE(IA64_UPID_SHIFT, 5); | ||
| 40 | |||
| 37 | BLANK(); | 41 | BLANK(); |
| 38 | 42 | ||
| 39 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 43 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
| 40 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | 44 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
| 41 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | 45 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); |
| 46 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 47 | DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); | ||
| 48 | DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); | ||
| 49 | DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); | ||
| 50 | DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime)); | ||
| 51 | #endif | ||
| 42 | 52 | ||
| 43 | BLANK(); | 53 | BLANK(); |
| 44 | 54 | ||
| 45 | DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); | 55 | DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); |
| 46 | DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); | 56 | DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); |
| 47 | DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); | 57 | DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); |
| 58 | DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid)); | ||
| 59 | DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level)); | ||
| 60 | DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0])); | ||
| 48 | DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); | 61 | DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); |
| 49 | DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); | 62 | DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); |
| 50 | DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); | 63 | DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index fbe742ad2fde..90ef338cf46f 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
| @@ -24,6 +24,7 @@ int kdump_status[NR_CPUS]; | |||
| 24 | static atomic_t kdump_cpu_frozen; | 24 | static atomic_t kdump_cpu_frozen; |
| 25 | atomic_t kdump_in_progress; | 25 | atomic_t kdump_in_progress; |
| 26 | static int kdump_on_init = 1; | 26 | static int kdump_on_init = 1; |
| 27 | static int kdump_on_fatal_mca = 1; | ||
| 27 | 28 | ||
| 28 | static inline Elf64_Word | 29 | static inline Elf64_Word |
| 29 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, | 30 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, |
| @@ -118,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt) | |||
| 118 | static void | 119 | static void |
| 119 | machine_kdump_on_init(void) | 120 | machine_kdump_on_init(void) |
| 120 | { | 121 | { |
| 122 | crash_save_vmcoreinfo(); | ||
| 121 | local_irq_disable(); | 123 | local_irq_disable(); |
| 122 | kexec_disable_iosapic(); | 124 | kexec_disable_iosapic(); |
| 123 | machine_kexec(ia64_kimage); | 125 | machine_kexec(ia64_kimage); |
| @@ -148,7 +150,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
| 148 | struct ia64_mca_notify_die *nd; | 150 | struct ia64_mca_notify_die *nd; |
| 149 | struct die_args *args = data; | 151 | struct die_args *args = data; |
| 150 | 152 | ||
| 151 | if (!kdump_on_init) | 153 | if (!kdump_on_init && !kdump_on_fatal_mca) |
| 152 | return NOTIFY_DONE; | 154 | return NOTIFY_DONE; |
| 153 | 155 | ||
| 154 | if (!ia64_kimage) { | 156 | if (!ia64_kimage) { |
| @@ -173,32 +175,38 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
| 173 | return NOTIFY_DONE; | 175 | return NOTIFY_DONE; |
| 174 | 176 | ||
| 175 | switch (val) { | 177 | switch (val) { |
| 176 | case DIE_INIT_MONARCH_PROCESS: | 178 | case DIE_INIT_MONARCH_PROCESS: |
| 179 | if (kdump_on_init) { | ||
| 177 | atomic_set(&kdump_in_progress, 1); | 180 | atomic_set(&kdump_in_progress, 1); |
| 178 | *(nd->monarch_cpu) = -1; | 181 | *(nd->monarch_cpu) = -1; |
| 179 | break; | 182 | } |
| 180 | case DIE_INIT_MONARCH_LEAVE: | 183 | break; |
| 184 | case DIE_INIT_MONARCH_LEAVE: | ||
| 185 | if (kdump_on_init) | ||
| 181 | machine_kdump_on_init(); | 186 | machine_kdump_on_init(); |
| 182 | break; | 187 | break; |
| 183 | case DIE_INIT_SLAVE_LEAVE: | 188 | case DIE_INIT_SLAVE_LEAVE: |
| 184 | if (atomic_read(&kdump_in_progress)) | 189 | if (atomic_read(&kdump_in_progress)) |
| 185 | unw_init_running(kdump_cpu_freeze, NULL); | 190 | unw_init_running(kdump_cpu_freeze, NULL); |
| 186 | break; | 191 | break; |
| 187 | case DIE_MCA_RENDZVOUS_LEAVE: | 192 | case DIE_MCA_RENDZVOUS_LEAVE: |
| 188 | if (atomic_read(&kdump_in_progress)) | 193 | if (atomic_read(&kdump_in_progress)) |
| 189 | unw_init_running(kdump_cpu_freeze, NULL); | 194 | unw_init_running(kdump_cpu_freeze, NULL); |
| 190 | break; | 195 | break; |
| 191 | case DIE_MCA_MONARCH_LEAVE: | 196 | case DIE_MCA_MONARCH_LEAVE: |
| 192 | /* die_register->signr indicate if MCA is recoverable */ | 197 | /* die_register->signr indicate if MCA is recoverable */ |
| 193 | if (!args->signr) | 198 | if (kdump_on_fatal_mca && !args->signr) { |
| 194 | machine_kdump_on_init(); | 199 | atomic_set(&kdump_in_progress, 1); |
| 195 | break; | 200 | *(nd->monarch_cpu) = -1; |
| 201 | machine_kdump_on_init(); | ||
| 202 | } | ||
| 203 | break; | ||
| 196 | } | 204 | } |
| 197 | return NOTIFY_DONE; | 205 | return NOTIFY_DONE; |
| 198 | } | 206 | } |
| 199 | 207 | ||
| 200 | #ifdef CONFIG_SYSCTL | 208 | #ifdef CONFIG_SYSCTL |
| 201 | static ctl_table kdump_on_init_table[] = { | 209 | static ctl_table kdump_ctl_table[] = { |
| 202 | { | 210 | { |
| 203 | .ctl_name = CTL_UNNUMBERED, | 211 | .ctl_name = CTL_UNNUMBERED, |
| 204 | .procname = "kdump_on_init", | 212 | .procname = "kdump_on_init", |
| @@ -207,6 +215,14 @@ static ctl_table kdump_on_init_table[] = { | |||
| 207 | .mode = 0644, | 215 | .mode = 0644, |
| 208 | .proc_handler = &proc_dointvec, | 216 | .proc_handler = &proc_dointvec, |
| 209 | }, | 217 | }, |
| 218 | { | ||
| 219 | .ctl_name = CTL_UNNUMBERED, | ||
| 220 | .procname = "kdump_on_fatal_mca", | ||
| 221 | .data = &kdump_on_fatal_mca, | ||
| 222 | .maxlen = sizeof(int), | ||
| 223 | .mode = 0644, | ||
| 224 | .proc_handler = &proc_dointvec, | ||
| 225 | }, | ||
| 210 | { .ctl_name = 0 } | 226 | { .ctl_name = 0 } |
| 211 | }; | 227 | }; |
| 212 | 228 | ||
| @@ -215,7 +231,7 @@ static ctl_table sys_table[] = { | |||
| 215 | .ctl_name = CTL_KERN, | 231 | .ctl_name = CTL_KERN, |
| 216 | .procname = "kernel", | 232 | .procname = "kernel", |
| 217 | .mode = 0555, | 233 | .mode = 0555, |
| 218 | .child = kdump_on_init_table, | 234 | .child = kdump_ctl_table, |
| 219 | }, | 235 | }, |
| 220 | { .ctl_name = 0 } | 236 | { .ctl_name = 0 } |
| 221 | }; | 237 | }; |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 728d7247a1a6..d45f215bc8fc 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
| 38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
| 39 | #include <asm/mca.h> | 39 | #include <asm/mca.h> |
| 40 | #include <asm/tlbflush.h> | ||
| 40 | 41 | ||
| 41 | #define EFI_DEBUG 0 | 42 | #define EFI_DEBUG 0 |
| 42 | 43 | ||
| @@ -403,6 +404,41 @@ efi_get_pal_addr (void) | |||
| 403 | return NULL; | 404 | return NULL; |
| 404 | } | 405 | } |
| 405 | 406 | ||
| 407 | |||
| 408 | static u8 __init palo_checksum(u8 *buffer, u32 length) | ||
| 409 | { | ||
| 410 | u8 sum = 0; | ||
| 411 | u8 *end = buffer + length; | ||
| 412 | |||
| 413 | while (buffer < end) | ||
| 414 | sum = (u8) (sum + *(buffer++)); | ||
| 415 | |||
| 416 | return sum; | ||
| 417 | } | ||
| 418 | |||
| 419 | /* | ||
| 420 | * Parse and handle PALO table which is published at: | ||
| 421 | * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf | ||
| 422 | */ | ||
| 423 | static void __init handle_palo(unsigned long palo_phys) | ||
| 424 | { | ||
| 425 | struct palo_table *palo = __va(palo_phys); | ||
| 426 | u8 checksum; | ||
| 427 | |||
| 428 | if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { | ||
| 429 | printk(KERN_INFO "PALO signature incorrect.\n"); | ||
| 430 | return; | ||
| 431 | } | ||
| 432 | |||
| 433 | checksum = palo_checksum((u8 *)palo, palo->length); | ||
| 434 | if (checksum) { | ||
| 435 | printk(KERN_INFO "PALO checksum incorrect.\n"); | ||
| 436 | return; | ||
| 437 | } | ||
| 438 | |||
| 439 | setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); | ||
| 440 | } | ||
| 441 | |||
| 406 | void | 442 | void |
| 407 | efi_map_pal_code (void) | 443 | efi_map_pal_code (void) |
| 408 | { | 444 | { |
| @@ -432,6 +468,7 @@ efi_init (void) | |||
| 432 | u64 efi_desc_size; | 468 | u64 efi_desc_size; |
| 433 | char *cp, vendor[100] = "unknown"; | 469 | char *cp, vendor[100] = "unknown"; |
| 434 | int i; | 470 | int i; |
| 471 | unsigned long palo_phys; | ||
| 435 | 472 | ||
| 436 | /* | 473 | /* |
| 437 | * It's too early to be able to use the standard kernel command line | 474 | * It's too early to be able to use the standard kernel command line |
| @@ -496,6 +533,8 @@ efi_init (void) | |||
| 496 | efi.hcdp = EFI_INVALID_TABLE_ADDR; | 533 | efi.hcdp = EFI_INVALID_TABLE_ADDR; |
| 497 | efi.uga = EFI_INVALID_TABLE_ADDR; | 534 | efi.uga = EFI_INVALID_TABLE_ADDR; |
| 498 | 535 | ||
| 536 | palo_phys = EFI_INVALID_TABLE_ADDR; | ||
| 537 | |||
| 499 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { | 538 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { |
| 500 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { | 539 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { |
| 501 | efi.mps = config_tables[i].table; | 540 | efi.mps = config_tables[i].table; |
| @@ -515,10 +554,17 @@ efi_init (void) | |||
| 515 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { | 554 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { |
| 516 | efi.hcdp = config_tables[i].table; | 555 | efi.hcdp = config_tables[i].table; |
| 517 | printk(" HCDP=0x%lx", config_tables[i].table); | 556 | printk(" HCDP=0x%lx", config_tables[i].table); |
| 557 | } else if (efi_guidcmp(config_tables[i].guid, | ||
| 558 | PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { | ||
| 559 | palo_phys = config_tables[i].table; | ||
| 560 | printk(" PALO=0x%lx", config_tables[i].table); | ||
| 518 | } | 561 | } |
| 519 | } | 562 | } |
| 520 | printk("\n"); | 563 | printk("\n"); |
| 521 | 564 | ||
| 565 | if (palo_phys != EFI_INVALID_TABLE_ADDR) | ||
| 566 | handle_palo(palo_phys); | ||
| 567 | |||
| 522 | runtime = __va(efi.systab->runtime); | 568 | runtime = __va(efi.systab->runtime); |
| 523 | efi.get_time = phys_get_time; | 569 | efi.get_time = phys_get_time; |
| 524 | efi.set_time = phys_set_time; | 570 | efi.set_time = phys_set_time; |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 3c331c464b40..b0be4a280174 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
| @@ -710,6 +710,16 @@ ENTRY(ia64_leave_syscall) | |||
| 710 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk | 710 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
| 711 | #endif | 711 | #endif |
| 712 | .work_processed_syscall: | 712 | .work_processed_syscall: |
| 713 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 714 | adds r2=PT(LOADRS)+16,r12 | ||
| 715 | (pUStk) mov.m r22=ar.itc // fetch time at leave | ||
| 716 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
| 717 | ;; | ||
| 718 | (p6) ld4 r31=[r18] // load current_thread_info()->flags | ||
| 719 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" | ||
| 720 | adds r3=PT(AR_BSPSTORE)+16,r12 // deferred | ||
| 721 | ;; | ||
| 722 | #else | ||
| 713 | adds r2=PT(LOADRS)+16,r12 | 723 | adds r2=PT(LOADRS)+16,r12 |
| 714 | adds r3=PT(AR_BSPSTORE)+16,r12 | 724 | adds r3=PT(AR_BSPSTORE)+16,r12 |
| 715 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | 725 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
| @@ -718,6 +728,7 @@ ENTRY(ia64_leave_syscall) | |||
| 718 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" | 728 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" |
| 719 | nop.i 0 | 729 | nop.i 0 |
| 720 | ;; | 730 | ;; |
| 731 | #endif | ||
| 721 | mov r16=ar.bsp // M2 get existing backing store pointer | 732 | mov r16=ar.bsp // M2 get existing backing store pointer |
| 722 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 | 733 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 |
| 723 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? | 734 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
| @@ -737,12 +748,21 @@ ENTRY(ia64_leave_syscall) | |||
| 737 | 748 | ||
| 738 | ld8 r29=[r2],16 // M0|1 load cr.ipsr | 749 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
| 739 | ld8 r28=[r3],16 // M0|1 load cr.iip | 750 | ld8 r28=[r3],16 // M0|1 load cr.iip |
| 751 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 752 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | ||
| 753 | ;; | ||
| 754 | ld8 r30=[r2],16 // M0|1 load cr.ifs | ||
| 755 | ld8 r25=[r3],16 // M0|1 load ar.unat | ||
| 756 | (pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | ||
| 757 | ;; | ||
| 758 | #else | ||
| 740 | mov r22=r0 // A clear r22 | 759 | mov r22=r0 // A clear r22 |
| 741 | ;; | 760 | ;; |
| 742 | ld8 r30=[r2],16 // M0|1 load cr.ifs | 761 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
| 743 | ld8 r25=[r3],16 // M0|1 load ar.unat | 762 | ld8 r25=[r3],16 // M0|1 load ar.unat |
| 744 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | 763 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
| 745 | ;; | 764 | ;; |
| 765 | #endif | ||
| 746 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs | 766 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs |
| 747 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | 767 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
| 748 | nop 0 | 768 | nop 0 |
| @@ -759,7 +779,11 @@ ENTRY(ia64_leave_syscall) | |||
| 759 | ld8.fill r1=[r3],16 // M0|1 load r1 | 779 | ld8.fill r1=[r3],16 // M0|1 load r1 |
| 760 | (pUStk) mov r17=1 // A | 780 | (pUStk) mov r17=1 // A |
| 761 | ;; | 781 | ;; |
| 782 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 783 | (pUStk) st1 [r15]=r17 // M2|3 | ||
| 784 | #else | ||
| 762 | (pUStk) st1 [r14]=r17 // M2|3 | 785 | (pUStk) st1 [r14]=r17 // M2|3 |
| 786 | #endif | ||
| 763 | ld8.fill r13=[r3],16 // M0|1 | 787 | ld8.fill r13=[r3],16 // M0|1 |
| 764 | mov f8=f0 // F clear f8 | 788 | mov f8=f0 // F clear f8 |
| 765 | ;; | 789 | ;; |
| @@ -775,12 +799,22 @@ ENTRY(ia64_leave_syscall) | |||
| 775 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 799 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
| 776 | cover // B add current frame into dirty partition & set cr.ifs | 800 | cover // B add current frame into dirty partition & set cr.ifs |
| 777 | ;; | 801 | ;; |
| 802 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 803 | mov r19=ar.bsp // M2 get new backing store pointer | ||
| 804 | st8 [r14]=r22 // M save time at leave | ||
| 805 | mov f10=f0 // F clear f10 | ||
| 806 | |||
| 807 | mov r22=r0 // A clear r22 | ||
| 808 | movl r14=__kernel_syscall_via_epc // X | ||
| 809 | ;; | ||
| 810 | #else | ||
| 778 | mov r19=ar.bsp // M2 get new backing store pointer | 811 | mov r19=ar.bsp // M2 get new backing store pointer |
| 779 | mov f10=f0 // F clear f10 | 812 | mov f10=f0 // F clear f10 |
| 780 | 813 | ||
| 781 | nop.m 0 | 814 | nop.m 0 |
| 782 | movl r14=__kernel_syscall_via_epc // X | 815 | movl r14=__kernel_syscall_via_epc // X |
| 783 | ;; | 816 | ;; |
| 817 | #endif | ||
| 784 | mov.m ar.csd=r0 // M2 clear ar.csd | 818 | mov.m ar.csd=r0 // M2 clear ar.csd |
| 785 | mov.m ar.ccv=r0 // M2 clear ar.ccv | 819 | mov.m ar.ccv=r0 // M2 clear ar.ccv |
| 786 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) | 820 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) |
| @@ -913,10 +947,18 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
| 913 | adds r16=PT(CR_IPSR)+16,r12 | 947 | adds r16=PT(CR_IPSR)+16,r12 |
| 914 | adds r17=PT(CR_IIP)+16,r12 | 948 | adds r17=PT(CR_IIP)+16,r12 |
| 915 | 949 | ||
| 950 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 951 | .pred.rel.mutex pUStk,pKStk | ||
| 952 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | ||
| 953 | (pUStk) mov.m r22=ar.itc // M fetch time at leave | ||
| 954 | nop.i 0 | ||
| 955 | ;; | ||
| 956 | #else | ||
| 916 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | 957 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
| 917 | nop.i 0 | 958 | nop.i 0 |
| 918 | nop.i 0 | 959 | nop.i 0 |
| 919 | ;; | 960 | ;; |
| 961 | #endif | ||
| 920 | ld8 r29=[r16],16 // load cr.ipsr | 962 | ld8 r29=[r16],16 // load cr.ipsr |
| 921 | ld8 r28=[r17],16 // load cr.iip | 963 | ld8 r28=[r17],16 // load cr.iip |
| 922 | ;; | 964 | ;; |
| @@ -938,15 +980,37 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
| 938 | ;; | 980 | ;; |
| 939 | ld8.fill r12=[r16],16 | 981 | ld8.fill r12=[r16],16 |
| 940 | ld8.fill r13=[r17],16 | 982 | ld8.fill r13=[r17],16 |
| 983 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 984 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 | ||
| 985 | #else | ||
| 941 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 | 986 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 |
| 987 | #endif | ||
| 942 | ;; | 988 | ;; |
| 943 | ld8 r20=[r16],16 // ar.fpsr | 989 | ld8 r20=[r16],16 // ar.fpsr |
| 944 | ld8.fill r15=[r17],16 | 990 | ld8.fill r15=[r17],16 |
| 991 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 992 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred | ||
| 993 | #endif | ||
| 945 | ;; | 994 | ;; |
| 946 | ld8.fill r14=[r16],16 | 995 | ld8.fill r14=[r16],16 |
| 947 | ld8.fill r2=[r17] | 996 | ld8.fill r2=[r17] |
| 948 | (pUStk) mov r17=1 | 997 | (pUStk) mov r17=1 |
| 949 | ;; | 998 | ;; |
| 999 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 1000 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; | ||
| 1001 | // mib : mov add br -> mib : ld8 add br | ||
| 1002 | // bbb_ : br nop cover;; mbb_ : mov br cover;; | ||
| 1003 | // | ||
| 1004 | // no one require bsp in r16 if (pKStk) branch is selected. | ||
| 1005 | (pUStk) st8 [r3]=r22 // save time at leave | ||
| 1006 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack | ||
| 1007 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | ||
| 1008 | ;; | ||
| 1009 | ld8.fill r3=[r16] // deferred | ||
| 1010 | LOAD_PHYS_STACK_REG_SIZE(r17) | ||
| 1011 | (pKStk) br.cond.dpnt skip_rbs_switch | ||
| 1012 | mov r16=ar.bsp // get existing backing store pointer | ||
| 1013 | #else | ||
| 950 | ld8.fill r3=[r16] | 1014 | ld8.fill r3=[r16] |
| 951 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack | 1015 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack |
| 952 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | 1016 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
| @@ -954,6 +1018,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
| 954 | mov r16=ar.bsp // get existing backing store pointer | 1018 | mov r16=ar.bsp // get existing backing store pointer |
| 955 | LOAD_PHYS_STACK_REG_SIZE(r17) | 1019 | LOAD_PHYS_STACK_REG_SIZE(r17) |
| 956 | (pKStk) br.cond.dpnt skip_rbs_switch | 1020 | (pKStk) br.cond.dpnt skip_rbs_switch |
| 1021 | #endif | ||
| 957 | 1022 | ||
| 958 | /* | 1023 | /* |
| 959 | * Restore user backing store. | 1024 | * Restore user backing store. |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 44841971f077..c1625c7e1779 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
| @@ -61,13 +61,29 @@ ENTRY(fsys_getpid) | |||
| 61 | .prologue | 61 | .prologue |
| 62 | .altrp b6 | 62 | .altrp b6 |
| 63 | .body | 63 | .body |
| 64 | add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 | ||
| 65 | ;; | ||
| 66 | ld8 r17=[r17] // r17 = current->group_leader | ||
| 64 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 | 67 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 |
| 65 | ;; | 68 | ;; |
| 66 | ld4 r9=[r9] | 69 | ld4 r9=[r9] |
| 67 | add r8=IA64_TASK_TGID_OFFSET,r16 | 70 | add r17=IA64_TASK_TGIDLINK_OFFSET,r17 |
| 68 | ;; | 71 | ;; |
| 69 | and r9=TIF_ALLWORK_MASK,r9 | 72 | and r9=TIF_ALLWORK_MASK,r9 |
| 70 | ld4 r8=[r8] // r8 = current->tgid | 73 | ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid |
| 74 | ;; | ||
| 75 | add r8=IA64_PID_LEVEL_OFFSET,r17 | ||
| 76 | ;; | ||
| 77 | ld4 r8=[r8] // r8 = pid->level | ||
| 78 | add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] | ||
| 79 | ;; | ||
| 80 | shl r8=r8,IA64_UPID_SHIFT | ||
| 81 | ;; | ||
| 82 | add r17=r17,r8 // r17 = &pid->numbers[pid->level] | ||
| 83 | ;; | ||
| 84 | ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr | ||
| 85 | ;; | ||
| 86 | mov r17=0 | ||
| 71 | ;; | 87 | ;; |
| 72 | cmp.ne p8,p0=0,r9 | 88 | cmp.ne p8,p0=0,r9 |
| 73 | (p8) br.spnt.many fsys_fallback_syscall | 89 | (p8) br.spnt.many fsys_fallback_syscall |
| @@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address) | |||
| 126 | .altrp b6 | 142 | .altrp b6 |
| 127 | .body | 143 | .body |
| 128 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 | 144 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 |
| 145 | add r17=IA64_TASK_TGIDLINK_OFFSET,r16 | ||
| 129 | ;; | 146 | ;; |
| 130 | ld4 r9=[r9] | 147 | ld4 r9=[r9] |
| 131 | tnat.z p6,p7=r32 // check argument register for being NaT | 148 | tnat.z p6,p7=r32 // check argument register for being NaT |
| 149 | ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid | ||
| 132 | ;; | 150 | ;; |
| 133 | and r9=TIF_ALLWORK_MASK,r9 | 151 | and r9=TIF_ALLWORK_MASK,r9 |
| 134 | add r8=IA64_TASK_PID_OFFSET,r16 | 152 | add r8=IA64_PID_LEVEL_OFFSET,r17 |
| 135 | add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 | 153 | add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 |
| 136 | ;; | 154 | ;; |
| 137 | ld4 r8=[r8] | 155 | ld4 r8=[r8] // r8 = pid->level |
| 156 | add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] | ||
| 157 | ;; | ||
| 158 | shl r8=r8,IA64_UPID_SHIFT | ||
| 159 | ;; | ||
| 160 | add r17=r17,r8 // r17 = &pid->numbers[pid->level] | ||
| 161 | ;; | ||
| 162 | ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr | ||
| 163 | ;; | ||
| 138 | cmp.ne p8,p0=0,r9 | 164 | cmp.ne p8,p0=0,r9 |
| 139 | mov r17=-1 | 165 | mov r17=-1 |
| 140 | ;; | 166 | ;; |
| @@ -210,27 +236,25 @@ ENTRY(fsys_gettimeofday) | |||
| 210 | // Note that instructions are optimized for McKinley. McKinley can | 236 | // Note that instructions are optimized for McKinley. McKinley can |
| 211 | // process two bundles simultaneously and therefore we continuously | 237 | // process two bundles simultaneously and therefore we continuously |
| 212 | // try to feed the CPU two bundles and then a stop. | 238 | // try to feed the CPU two bundles and then a stop. |
| 213 | // | 239 | |
| 214 | // Additional note that code has changed a lot. Optimization is TBD. | ||
| 215 | // Comments begin with "?" are maybe outdated. | ||
| 216 | tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle | ||
| 217 | mov pr = r30,0xc000 // Set predicates according to function | ||
| 218 | add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 | 240 | add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 |
| 241 | tnat.nz p6,p0 = r31 // guard against Nat argument | ||
| 242 | (p6) br.cond.spnt.few .fail_einval | ||
| 219 | movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address | 243 | movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address |
| 220 | ;; | 244 | ;; |
| 245 | ld4 r2 = [r2] // process work pending flags | ||
| 221 | movl r29 = itc_jitter_data // itc_jitter | 246 | movl r29 = itc_jitter_data // itc_jitter |
| 222 | add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time | 247 | add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time |
| 223 | ld4 r2 = [r2] // process work pending flags | ||
| 224 | ;; | ||
| 225 | (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time | ||
| 226 | add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 | 248 | add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 |
| 227 | add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 | 249 | mov pr = r30,0xc000 // Set predicates according to function |
| 250 | ;; | ||
| 228 | and r2 = TIF_ALLWORK_MASK,r2 | 251 | and r2 = TIF_ALLWORK_MASK,r2 |
| 229 | (p6) br.cond.spnt.few .fail_einval // ? deferred branch | 252 | add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 |
| 253 | (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time | ||
| 230 | ;; | 254 | ;; |
| 231 | add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last | 255 | add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last |
| 232 | cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled | 256 | cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled |
| 233 | (p6) br.cond.spnt.many fsys_fallback_syscall | 257 | (p6) br.cond.spnt.many fsys_fallback_syscall |
| 234 | ;; | 258 | ;; |
| 235 | // Begin critical section | 259 | // Begin critical section |
| 236 | .time_redo: | 260 | .time_redo: |
| @@ -258,7 +282,6 @@ ENTRY(fsys_gettimeofday) | |||
| 258 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! | 282 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! |
| 259 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. | 283 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. |
| 260 | (p13) ld8 r25 = [r19] // get itc_lastcycle value | 284 | (p13) ld8 r25 = [r19] // get itc_lastcycle value |
| 261 | ;; // ? could be removed by moving the last add upward | ||
| 262 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec | 285 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec |
| 263 | ;; | 286 | ;; |
| 264 | ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec | 287 | ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec |
| @@ -285,13 +308,12 @@ ENTRY(fsys_gettimeofday) | |||
| 285 | EX(.fail_efault, probe.w.fault r31, 3) | 308 | EX(.fail_efault, probe.w.fault r31, 3) |
| 286 | xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) | 309 | xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) |
| 287 | ;; | 310 | ;; |
| 288 | // ? simulate tbit.nz.or p7,p0 = r28,0 | ||
| 289 | getf.sig r2 = f8 | 311 | getf.sig r2 = f8 |
| 290 | mf | 312 | mf |
| 291 | ;; | 313 | ;; |
| 292 | ld4 r10 = [r20] // gtod_lock.sequence | 314 | ld4 r10 = [r20] // gtod_lock.sequence |
| 293 | shr.u r2 = r2,r23 // shift by factor | 315 | shr.u r2 = r2,r23 // shift by factor |
| 294 | ;; // ? overloaded 3 bundles! | 316 | ;; |
| 295 | add r8 = r8,r2 // Add xtime.nsecs | 317 | add r8 = r8,r2 // Add xtime.nsecs |
| 296 | cmp4.ne p7,p0 = r28,r10 | 318 | cmp4.ne p7,p0 = r28,r10 |
| 297 | (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo | 319 | (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo |
| @@ -319,9 +341,9 @@ EX(.fail_efault, probe.w.fault r31, 3) | |||
| 319 | EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles | 341 | EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles |
| 320 | (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it | 342 | (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it |
| 321 | ;; | 343 | ;; |
| 322 | mov r8 = r0 | ||
| 323 | (p14) getf.sig r2 = f8 | 344 | (p14) getf.sig r2 = f8 |
| 324 | ;; | 345 | ;; |
| 346 | mov r8 = r0 | ||
| 325 | (p14) shr.u r21 = r2, 4 | 347 | (p14) shr.u r21 = r2, 4 |
| 326 | ;; | 348 | ;; |
| 327 | EX(.fail_efault, st8 [r31] = r9) | 349 | EX(.fail_efault, st8 [r31] = r9) |
| @@ -660,7 +682,11 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
| 660 | nop.i 0 | 682 | nop.i 0 |
| 661 | ;; | 683 | ;; |
| 662 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 | 684 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
| 685 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 686 | mov.m r30=ar.itc // M get cycle for accounting | ||
| 687 | #else | ||
| 663 | nop.m 0 | 688 | nop.m 0 |
| 689 | #endif | ||
| 664 | nop.i 0 | 690 | nop.i 0 |
| 665 | ;; | 691 | ;; |
| 666 | mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore | 692 | mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore |
| @@ -682,6 +708,28 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
| 682 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 | 708 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 |
| 683 | br.call.sptk.many b7=ia64_syscall_setup // B | 709 | br.call.sptk.many b7=ia64_syscall_setup // B |
| 684 | ;; | 710 | ;; |
| 711 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 712 | // mov.m r30=ar.itc is called in advance | ||
| 713 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 | ||
| 714 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 | ||
| 715 | ;; | ||
| 716 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel | ||
| 717 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel | ||
| 718 | ;; | ||
| 719 | ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime | ||
| 720 | ld8 r21=[r17] // cumulated utime | ||
| 721 | sub r22=r19,r18 // stime before leave kernel | ||
| 722 | ;; | ||
| 723 | st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp | ||
| 724 | sub r18=r30,r19 // elapsed time in user mode | ||
| 725 | ;; | ||
| 726 | add r20=r20,r22 // sum stime | ||
| 727 | add r21=r21,r18 // sum utime | ||
| 728 | ;; | ||
| 729 | st8 [r16]=r20 // update stime | ||
| 730 | st8 [r17]=r21 // update utime | ||
| 731 | ;; | ||
| 732 | #endif | ||
| 685 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | 733 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
| 686 | mov rp=r14 // I0 set the real return addr | 734 | mov rp=r14 // I0 set the real return addr |
| 687 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A | 735 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index d3a41d5f8d12..ddeab4e36fd5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
| @@ -1002,6 +1002,26 @@ GLOBAL_ENTRY(sched_clock) | |||
| 1002 | br.ret.sptk.many rp | 1002 | br.ret.sptk.many rp |
| 1003 | END(sched_clock) | 1003 | END(sched_clock) |
| 1004 | 1004 | ||
| 1005 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 1006 | GLOBAL_ENTRY(cycle_to_cputime) | ||
| 1007 | alloc r16=ar.pfs,1,0,0,0 | ||
| 1008 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | ||
| 1009 | ;; | ||
| 1010 | ldf8 f8=[r8] | ||
| 1011 | ;; | ||
| 1012 | setf.sig f9=r32 | ||
| 1013 | ;; | ||
| 1014 | xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc) | ||
| 1015 | xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product | ||
| 1016 | ;; | ||
| 1017 | getf.sig r8=f10 // (5 cyc) | ||
| 1018 | getf.sig r9=f11 | ||
| 1019 | ;; | ||
| 1020 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT | ||
| 1021 | br.ret.sptk.many rp | ||
| 1022 | END(cycle_to_cputime) | ||
| 1023 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
| 1024 | |||
| 1005 | GLOBAL_ENTRY(start_kernel_thread) | 1025 | GLOBAL_ENTRY(start_kernel_thread) |
| 1006 | .prologue | 1026 | .prologue |
| 1007 | .save rp, r0 // this is the end of the call-chain | 1027 | .save rp, r0 // this is the end of the call-chain |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index d8be23fbe6bc..5538471e8d68 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
| @@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
| 472 | static unsigned char count; | 472 | static unsigned char count; |
| 473 | static long last_time; | 473 | static long last_time; |
| 474 | 474 | ||
| 475 | if (jiffies - last_time > 5*HZ) | 475 | if (time_after(jiffies, last_time + 5 * HZ)) |
| 476 | count = 0; | 476 | count = 0; |
| 477 | if (++count < 5) { | 477 | if (++count < 5) { |
| 478 | last_time = jiffies; | 478 | last_time = jiffies; |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 34f44d8be00d..6678c49daba3 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
| @@ -805,8 +805,13 @@ ENTRY(break_fault) | |||
| 805 | 805 | ||
| 806 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle | 806 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle |
| 807 | (p9) adds r8=1,r8 // A increment ei to next slot | 807 | (p9) adds r8=1,r8 // A increment ei to next slot |
| 808 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 809 | ;; | ||
| 810 | mov b6=r30 // I0 setup syscall handler branch reg early | ||
| 811 | #else | ||
| 808 | nop.i 0 | 812 | nop.i 0 |
| 809 | ;; | 813 | ;; |
| 814 | #endif | ||
| 810 | 815 | ||
| 811 | mov.m r25=ar.unat // M2 (5 cyc) | 816 | mov.m r25=ar.unat // M2 (5 cyc) |
| 812 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr | 817 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr |
| @@ -817,7 +822,11 @@ ENTRY(break_fault) | |||
| 817 | // | 822 | // |
| 818 | /////////////////////////////////////////////////////////////////////// | 823 | /////////////////////////////////////////////////////////////////////// |
| 819 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | 824 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
| 825 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 826 | mov.m r30=ar.itc // M get cycle for accounting | ||
| 827 | #else | ||
| 820 | mov b6=r30 // I0 setup syscall handler branch reg early | 828 | mov b6=r30 // I0 setup syscall handler branch reg early |
| 829 | #endif | ||
| 821 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? | 830 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? |
| 822 | 831 | ||
| 823 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit | 832 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit |
| @@ -829,6 +838,30 @@ ENTRY(break_fault) | |||
| 829 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? | 838 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? |
| 830 | br.call.sptk.many b7=ia64_syscall_setup // B | 839 | br.call.sptk.many b7=ia64_syscall_setup // B |
| 831 | 1: | 840 | 1: |
| 841 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 842 | // mov.m r30=ar.itc is called in advance, and r13 is current | ||
| 843 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A | ||
| 844 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A | ||
| 845 | (pKStk) br.cond.spnt .skip_accounting // B unlikely skip | ||
| 846 | ;; | ||
| 847 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp | ||
| 848 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave | ||
| 849 | ;; | ||
| 850 | ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime | ||
| 851 | ld8 r21=[r17] // M cumulated utime | ||
| 852 | sub r22=r19,r18 // A stime before leave | ||
| 853 | ;; | ||
| 854 | st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp | ||
| 855 | sub r18=r30,r19 // A elapsed time in user | ||
| 856 | ;; | ||
| 857 | add r20=r20,r22 // A sum stime | ||
| 858 | add r21=r21,r18 // A sum utime | ||
| 859 | ;; | ||
| 860 | st8 [r16]=r20 // M update stime | ||
| 861 | st8 [r17]=r21 // M update utime | ||
| 862 | ;; | ||
| 863 | .skip_accounting: | ||
| 864 | #endif | ||
| 832 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | 865 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
| 833 | nop 0 | 866 | nop 0 |
| 834 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 | 867 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 |
| @@ -928,6 +961,7 @@ END(interrupt) | |||
| 928 | * - r27: saved ar.rsc | 961 | * - r27: saved ar.rsc |
| 929 | * - r28: saved cr.iip | 962 | * - r28: saved cr.iip |
| 930 | * - r29: saved cr.ipsr | 963 | * - r29: saved cr.ipsr |
| 964 | * - r30: ar.itc for accounting (don't touch) | ||
| 931 | * - r31: saved pr | 965 | * - r31: saved pr |
| 932 | * - b0: original contents (to be saved) | 966 | * - b0: original contents (to be saved) |
| 933 | * On exit: | 967 | * On exit: |
| @@ -1090,6 +1124,41 @@ END(dispatch_illegal_op_fault) | |||
| 1090 | DBG_FAULT(16) | 1124 | DBG_FAULT(16) |
| 1091 | FAULT(16) | 1125 | FAULT(16) |
| 1092 | 1126 | ||
| 1127 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 1128 | /* | ||
| 1129 | * There is no particular reason for this code to be here, other than | ||
| 1130 | * that there happens to be space here that would go unused otherwise. | ||
| 1131 | * If this fault ever gets "unreserved", simply moved the following | ||
| 1132 | * code to a more suitable spot... | ||
| 1133 | * | ||
| 1134 | * account_sys_enter is called from SAVE_MIN* macros if accounting is | ||
| 1135 | * enabled and if the macro is entered from user mode. | ||
| 1136 | */ | ||
| 1137 | ENTRY(account_sys_enter) | ||
| 1138 | // mov.m r20=ar.itc is called in advance, and r13 is current | ||
| 1139 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 | ||
| 1140 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | ||
| 1141 | ;; | ||
| 1142 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel | ||
| 1143 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel | ||
| 1144 | ;; | ||
| 1145 | ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime | ||
| 1146 | ld8 r21=[r17] // cumulated utime | ||
| 1147 | sub r22=r19,r18 // stime before leave kernel | ||
| 1148 | ;; | ||
| 1149 | st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp | ||
| 1150 | sub r18=r20,r19 // elapsed time in user mode | ||
| 1151 | ;; | ||
| 1152 | add r23=r23,r22 // sum stime | ||
| 1153 | add r21=r21,r18 // sum utime | ||
| 1154 | ;; | ||
| 1155 | st8 [r16]=r23 // update stime | ||
| 1156 | st8 [r17]=r21 // update utime | ||
| 1157 | ;; | ||
| 1158 | br.ret.sptk.many rp | ||
| 1159 | END(account_sys_enter) | ||
| 1160 | #endif | ||
| 1161 | |||
| 1093 | .org ia64_ivt+0x4400 | 1162 | .org ia64_ivt+0x4400 |
| 1094 | ///////////////////////////////////////////////////////////////////////////////////////// | 1163 | ///////////////////////////////////////////////////////////////////////////////////////// |
| 1095 | // 0x4400 Entry 17 (size 64 bundles) Reserved | 1164 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 8d9a446a0d17..233434f4f88f 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
| @@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = { | |||
| 78 | { u, u, u }, /* 1F */ | 78 | { u, u, u }, /* 1F */ |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | /* Insert a long branch code */ | ||
| 82 | static void __kprobes set_brl_inst(void *from, void *to) | ||
| 83 | { | ||
| 84 | s64 rel = ((s64) to - (s64) from) >> 4; | ||
| 85 | bundle_t *brl; | ||
| 86 | brl = (bundle_t *) ((u64) from & ~0xf); | ||
| 87 | brl->quad0.template = 0x05; /* [MLX](stop) */ | ||
| 88 | brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ | ||
| 89 | brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; | ||
| 90 | brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); | ||
| 91 | /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ | ||
| 92 | brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); | ||
| 93 | } | ||
| 94 | |||
| 81 | /* | 95 | /* |
| 82 | * In this function we check to see if the instruction | 96 | * In this function we check to see if the instruction |
| 83 | * is IP relative instruction and update the kprobe | 97 | * is IP relative instruction and update the kprobe |
| @@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
| 496 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; | 510 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; |
| 497 | } | 511 | } |
| 498 | 512 | ||
| 513 | /* Check the instruction in the slot is break */ | ||
| 514 | static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) | ||
| 515 | { | ||
| 516 | unsigned int major_opcode; | ||
| 517 | unsigned int template = bundle->quad0.template; | ||
| 518 | unsigned long kprobe_inst; | ||
| 519 | |||
| 520 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
| 521 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
| 522 | slot++; | ||
| 523 | |||
| 524 | /* Get Kprobe probe instruction at given slot*/ | ||
| 525 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); | ||
| 526 | |||
| 527 | /* For break instruction, | ||
| 528 | * Bits 37:40 Major opcode to be zero | ||
| 529 | * Bits 27:32 X6 to be zero | ||
| 530 | * Bits 32:35 X3 to be zero | ||
| 531 | */ | ||
| 532 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { | ||
| 533 | /* Not a break instruction */ | ||
| 534 | return 0; | ||
| 535 | } | ||
| 536 | |||
| 537 | /* Is a break instruction */ | ||
| 538 | return 1; | ||
| 539 | } | ||
| 540 | |||
| 541 | /* | ||
| 542 | * In this function, we check whether the target bundle modifies IP or | ||
| 543 | * it triggers an exception. If so, it cannot be boostable. | ||
| 544 | */ | ||
| 545 | static int __kprobes can_boost(bundle_t *bundle, uint slot, | ||
| 546 | unsigned long bundle_addr) | ||
| 547 | { | ||
| 548 | unsigned int template = bundle->quad0.template; | ||
| 549 | |||
| 550 | do { | ||
| 551 | if (search_exception_tables(bundle_addr + slot) || | ||
| 552 | __is_ia64_break_inst(bundle, slot)) | ||
| 553 | return 0; /* exception may occur in this bundle*/ | ||
| 554 | } while ((++slot) < 3); | ||
| 555 | template &= 0x1e; | ||
| 556 | if (template >= 0x10 /* including B unit */ || | ||
| 557 | template == 0x04 /* including X unit */ || | ||
| 558 | template == 0x06) /* undefined */ | ||
| 559 | return 0; | ||
| 560 | |||
| 561 | return 1; | ||
| 562 | } | ||
| 563 | |||
| 564 | /* Prepare long jump bundle and disables other boosters if need */ | ||
| 565 | static void __kprobes prepare_booster(struct kprobe *p) | ||
| 566 | { | ||
| 567 | unsigned long addr = (unsigned long)p->addr & ~0xFULL; | ||
| 568 | unsigned int slot = (unsigned long)p->addr & 0xf; | ||
| 569 | struct kprobe *other_kp; | ||
| 570 | |||
| 571 | if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { | ||
| 572 | set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); | ||
| 573 | p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; | ||
| 574 | } | ||
| 575 | |||
| 576 | /* disables boosters in previous slots */ | ||
| 577 | for (; addr < (unsigned long)p->addr; addr++) { | ||
| 578 | other_kp = get_kprobe((void *)addr); | ||
| 579 | if (other_kp) | ||
| 580 | other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; | ||
| 581 | } | ||
| 582 | } | ||
| 583 | |||
| 499 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 584 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 500 | { | 585 | { |
| 501 | unsigned long addr = (unsigned long) p->addr; | 586 | unsigned long addr = (unsigned long) p->addr; |
| @@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
| 530 | 615 | ||
| 531 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); | 616 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); |
| 532 | 617 | ||
| 618 | prepare_booster(p); | ||
| 619 | |||
| 533 | return 0; | 620 | return 0; |
| 534 | } | 621 | } |
| 535 | 622 | ||
| @@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
| 543 | src = &p->opcode.bundle; | 630 | src = &p->opcode.bundle; |
| 544 | 631 | ||
| 545 | flush_icache_range((unsigned long)p->ainsn.insn, | 632 | flush_icache_range((unsigned long)p->ainsn.insn, |
| 546 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); | 633 | (unsigned long)p->ainsn.insn + |
| 634 | sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); | ||
| 635 | |||
| 547 | switch (p->ainsn.slot) { | 636 | switch (p->ainsn.slot) { |
| 548 | case 0: | 637 | case 0: |
| 549 | dest->quad0.slot0 = src->quad0.slot0; | 638 | dest->quad0.slot0 = src->quad0.slot0; |
| @@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
| 584 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 673 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 585 | { | 674 | { |
| 586 | mutex_lock(&kprobe_mutex); | 675 | mutex_lock(&kprobe_mutex); |
| 587 | free_insn_slot(p->ainsn.insn, 0); | 676 | free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); |
| 588 | mutex_unlock(&kprobe_mutex); | 677 | mutex_unlock(&kprobe_mutex); |
| 589 | } | 678 | } |
| 590 | /* | 679 | /* |
| 591 | * We are resuming execution after a single step fault, so the pt_regs | 680 | * We are resuming execution after a single step fault, so the pt_regs |
| 592 | * structure reflects the register state after we executed the instruction | 681 | * structure reflects the register state after we executed the instruction |
| 593 | * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust | 682 | * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust |
| 594 | * the ip to point back to the original stack address. To set the IP address | 683 | * the ip to point back to the original stack address. To set the IP address |
| 595 | * to original stack address, handle the case where we need to fixup the | 684 | * to original stack address, handle the case where we need to fixup the |
| 596 | * relative IP address and/or fixup branch register. | 685 | * relative IP address and/or fixup branch register. |
| @@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
| 607 | if (slot == 1 && bundle_encoding[template][1] == L) | 696 | if (slot == 1 && bundle_encoding[template][1] == L) |
| 608 | slot = 2; | 697 | slot = 2; |
| 609 | 698 | ||
| 610 | if (p->ainsn.inst_flag) { | 699 | if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { |
| 611 | 700 | ||
| 612 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { | 701 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { |
| 613 | /* Fix relative IP address */ | 702 | /* Fix relative IP address */ |
| @@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) | |||
| 686 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) | 775 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) |
| 687 | { | 776 | { |
| 688 | unsigned int slot = ia64_psr(regs)->ri; | 777 | unsigned int slot = ia64_psr(regs)->ri; |
| 689 | unsigned int template, major_opcode; | ||
| 690 | unsigned long kprobe_inst; | ||
| 691 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; | 778 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; |
| 692 | bundle_t bundle; | 779 | bundle_t bundle; |
| 693 | 780 | ||
| 694 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); | 781 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); |
| 695 | template = bundle.quad0.template; | ||
| 696 | |||
| 697 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
| 698 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
| 699 | slot++; | ||
| 700 | 782 | ||
| 701 | /* Get Kprobe probe instruction at given slot*/ | 783 | return __is_ia64_break_inst(&bundle, slot); |
| 702 | get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); | ||
| 703 | |||
| 704 | /* For break instruction, | ||
| 705 | * Bits 37:40 Major opcode to be zero | ||
| 706 | * Bits 27:32 X6 to be zero | ||
| 707 | * Bits 32:35 X3 to be zero | ||
| 708 | */ | ||
| 709 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { | ||
| 710 | /* Not a break instruction */ | ||
| 711 | return 0; | ||
| 712 | } | ||
| 713 | |||
| 714 | /* Is a break instruction */ | ||
| 715 | return 1; | ||
| 716 | } | 784 | } |
| 717 | 785 | ||
| 718 | static int __kprobes pre_kprobes_handler(struct die_args *args) | 786 | static int __kprobes pre_kprobes_handler(struct die_args *args) |
| @@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
| 802 | return 1; | 870 | return 1; |
| 803 | 871 | ||
| 804 | ss_probe: | 872 | ss_probe: |
| 873 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) | ||
| 874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { | ||
| 875 | /* Boost up -- we can execute copied instructions directly */ | ||
| 876 | ia64_psr(regs)->ri = p->ainsn.slot; | ||
| 877 | regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; | ||
| 878 | /* turn single stepping off */ | ||
| 879 | ia64_psr(regs)->ss = 0; | ||
| 880 | |||
| 881 | reset_current_kprobe(); | ||
| 882 | preempt_enable_no_resched(); | ||
| 883 | return 1; | ||
| 884 | } | ||
| 885 | #endif | ||
| 805 | prepare_ss(p, regs); | 886 | prepare_ss(p, regs); |
| 806 | kcb->kprobe_status = KPROBE_HIT_SS; | 887 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 807 | return 1; | 888 | return 1; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6c18221dba36..e51bced3b0fa 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -69,6 +69,7 @@ | |||
| 69 | * 2007-04-27 Russ Anderson <rja@sgi.com> | 69 | * 2007-04-27 Russ Anderson <rja@sgi.com> |
| 70 | * Support multiple cpus going through OS_MCA in the same event. | 70 | * Support multiple cpus going through OS_MCA in the same event. |
| 71 | */ | 71 | */ |
| 72 | #include <linux/jiffies.h> | ||
| 72 | #include <linux/types.h> | 73 | #include <linux/types.h> |
| 73 | #include <linux/init.h> | 74 | #include <linux/init.h> |
| 74 | #include <linux/sched.h> | 75 | #include <linux/sched.h> |
| @@ -97,6 +98,7 @@ | |||
| 97 | 98 | ||
| 98 | #include <asm/irq.h> | 99 | #include <asm/irq.h> |
| 99 | #include <asm/hw_irq.h> | 100 | #include <asm/hw_irq.h> |
| 101 | #include <asm/tlb.h> | ||
| 100 | 102 | ||
| 101 | #include "mca_drv.h" | 103 | #include "mca_drv.h" |
| 102 | #include "entry.h" | 104 | #include "entry.h" |
| @@ -112,6 +114,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ | |||
| 112 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ | 114 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ |
| 113 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ | 115 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ |
| 114 | DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ | 116 | DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ |
| 117 | DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */ | ||
| 115 | 118 | ||
| 116 | unsigned long __per_cpu_mca[NR_CPUS]; | 119 | unsigned long __per_cpu_mca[NR_CPUS]; |
| 117 | 120 | ||
| @@ -293,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void) | |||
| 293 | if (mlogbuf_finished) | 296 | if (mlogbuf_finished) |
| 294 | return; | 297 | return; |
| 295 | 298 | ||
| 296 | if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { | 299 | if (mlogbuf_timestamp && |
| 300 | time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { | ||
| 297 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " | 301 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " |
| 298 | " and the system seems to be messed up.\n"); | 302 | " and the system seems to be messed up.\n"); |
| 299 | ia64_mlogbuf_finish(0); | 303 | ia64_mlogbuf_finish(0); |
| @@ -1182,6 +1186,49 @@ all_in: | |||
| 1182 | return; | 1186 | return; |
| 1183 | } | 1187 | } |
| 1184 | 1188 | ||
| 1189 | /* mca_insert_tr | ||
| 1190 | * | ||
| 1191 | * Switch rid when TR reload and needed! | ||
| 1192 | * iord: 1: itr, 2: itr; | ||
| 1193 | * | ||
| 1194 | */ | ||
| 1195 | static void mca_insert_tr(u64 iord) | ||
| 1196 | { | ||
| 1197 | |||
| 1198 | int i; | ||
| 1199 | u64 old_rr; | ||
| 1200 | struct ia64_tr_entry *p; | ||
| 1201 | unsigned long psr; | ||
| 1202 | int cpu = smp_processor_id(); | ||
| 1203 | |||
| 1204 | psr = ia64_clear_ic(); | ||
| 1205 | for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { | ||
| 1206 | p = &__per_cpu_idtrs[cpu][iord-1][i]; | ||
| 1207 | if (p->pte & 0x1) { | ||
| 1208 | old_rr = ia64_get_rr(p->ifa); | ||
| 1209 | if (old_rr != p->rr) { | ||
| 1210 | ia64_set_rr(p->ifa, p->rr); | ||
| 1211 | ia64_srlz_d(); | ||
| 1212 | } | ||
| 1213 | ia64_ptr(iord, p->ifa, p->itir >> 2); | ||
| 1214 | ia64_srlz_i(); | ||
| 1215 | if (iord & 0x1) { | ||
| 1216 | ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2); | ||
| 1217 | ia64_srlz_i(); | ||
| 1218 | } | ||
| 1219 | if (iord & 0x2) { | ||
| 1220 | ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2); | ||
| 1221 | ia64_srlz_i(); | ||
| 1222 | } | ||
| 1223 | if (old_rr != p->rr) { | ||
| 1224 | ia64_set_rr(p->ifa, old_rr); | ||
| 1225 | ia64_srlz_d(); | ||
| 1226 | } | ||
| 1227 | } | ||
| 1228 | } | ||
| 1229 | ia64_set_psr(psr); | ||
| 1230 | } | ||
| 1231 | |||
| 1185 | /* | 1232 | /* |
| 1186 | * ia64_mca_handler | 1233 | * ia64_mca_handler |
| 1187 | * | 1234 | * |
| @@ -1266,16 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
| 1266 | } else { | 1313 | } else { |
| 1267 | /* Dump buffered message to console */ | 1314 | /* Dump buffered message to console */ |
| 1268 | ia64_mlogbuf_finish(1); | 1315 | ia64_mlogbuf_finish(1); |
| 1269 | #ifdef CONFIG_KEXEC | ||
| 1270 | atomic_set(&kdump_in_progress, 1); | ||
| 1271 | monarch_cpu = -1; | ||
| 1272 | #endif | ||
| 1273 | } | 1316 | } |
| 1317 | |||
| 1318 | if (__get_cpu_var(ia64_mca_tr_reload)) { | ||
| 1319 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ | ||
| 1320 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ | ||
| 1321 | } | ||
| 1322 | |||
| 1274 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) | 1323 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
| 1275 | == NOTIFY_STOP) | 1324 | == NOTIFY_STOP) |
| 1276 | ia64_mca_spin(__func__); | 1325 | ia64_mca_spin(__func__); |
| 1277 | 1326 | ||
| 1278 | |||
| 1279 | if (atomic_dec_return(&mca_count) > 0) { | 1327 | if (atomic_dec_return(&mca_count) > 0) { |
| 1280 | int i; | 1328 | int i; |
| 1281 | 1329 | ||
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 8bc7d259e0c6..a06d46548ff9 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
| @@ -219,8 +219,13 @@ ia64_reload_tr: | |||
| 219 | mov r20=IA64_TR_CURRENT_STACK | 219 | mov r20=IA64_TR_CURRENT_STACK |
| 220 | ;; | 220 | ;; |
| 221 | itr.d dtr[r20]=r16 | 221 | itr.d dtr[r20]=r16 |
| 222 | GET_THIS_PADDR(r2, ia64_mca_tr_reload) | ||
| 223 | mov r18 = 1 | ||
| 222 | ;; | 224 | ;; |
| 223 | srlz.d | 225 | srlz.d |
| 226 | ;; | ||
| 227 | st8 [r2] =r18 | ||
| 228 | ;; | ||
| 224 | 229 | ||
| 225 | done_tlb_purge_and_reload: | 230 | done_tlb_purge_and_reload: |
| 226 | 231 | ||
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index c9ac8bada786..7c548ac52bbc 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
| @@ -3,6 +3,18 @@ | |||
| 3 | 3 | ||
| 4 | #include "entry.h" | 4 | #include "entry.h" |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 7 | /* read ar.itc in advance, and use it before leaving bank 0 */ | ||
| 8 | #define ACCOUNT_GET_STAMP \ | ||
| 9 | (pUStk) mov.m r20=ar.itc; | ||
| 10 | #define ACCOUNT_SYS_ENTER \ | ||
| 11 | (pUStk) br.call.spnt rp=account_sys_enter \ | ||
| 12 | ;; | ||
| 13 | #else | ||
| 14 | #define ACCOUNT_GET_STAMP | ||
| 15 | #define ACCOUNT_SYS_ENTER | ||
| 16 | #endif | ||
| 17 | |||
| 6 | /* | 18 | /* |
| 7 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 19 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
| 8 | * the minimum state necessary that allows us to turn psr.ic back | 20 | * the minimum state necessary that allows us to turn psr.ic back |
| @@ -122,11 +134,13 @@ | |||
| 122 | ;; \ | 134 | ;; \ |
| 123 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ | 135 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ |
| 124 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ | 136 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ |
| 137 | ACCOUNT_GET_STAMP \ | ||
| 125 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ | 138 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ |
| 126 | ;; \ | 139 | ;; \ |
| 127 | EXTRA; \ | 140 | EXTRA; \ |
| 128 | movl r1=__gp; /* establish kernel global pointer */ \ | 141 | movl r1=__gp; /* establish kernel global pointer */ \ |
| 129 | ;; \ | 142 | ;; \ |
| 143 | ACCOUNT_SYS_ENTER \ | ||
| 130 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ | 144 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ |
| 131 | ;; | 145 | ;; |
| 132 | 146 | ||
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index a78b45f5fe2f..c93420c97409 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c | |||
| @@ -73,7 +73,7 @@ void __init build_cpu_to_node_map(void) | |||
| 73 | for(node=0; node < MAX_NUMNODES; node++) | 73 | for(node=0; node < MAX_NUMNODES; node++) |
| 74 | cpus_clear(node_to_cpu_mask[node]); | 74 | cpus_clear(node_to_cpu_mask[node]); |
| 75 | 75 | ||
| 76 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | 76 | for_each_possible_early_cpu(cpu) { |
| 77 | node = -1; | 77 | node = -1; |
| 78 | for (i = 0; i < NR_CPUS; ++i) | 78 | for (i = 0; i < NR_CPUS; ++i) |
| 79 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | 79 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { |
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index 2cb9425e0421..e0dca8743dbb 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
| @@ -135,10 +135,10 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) | |||
| 135 | 135 | ||
| 136 | while (offp < (s32 *) end) { | 136 | while (offp < (s32 *) end) { |
| 137 | wp = (u64 *) ia64_imva((char *) offp + *offp); | 137 | wp = (u64 *) ia64_imva((char *) offp + *offp); |
| 138 | wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ | 138 | wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ |
| 139 | wp[1] = 0x0004000000000200UL; | 139 | wp[1] = 0x0084006880000200UL; |
| 140 | wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ | 140 | wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ |
| 141 | wp[3] = 0x0084006880000200UL; | 141 | wp[3] = 0x0004000000000200UL; |
| 142 | ia64_fc(wp); ia64_fc(wp + 2); | 142 | ia64_fc(wp); ia64_fc(wp + 2); |
| 143 | ++offp; | 143 | ++offp; |
| 144 | } | 144 | } |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index a2aabfdc80d9..d1d24f4598da 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx) | |||
| 4204 | do_each_thread (g, t) { | 4204 | do_each_thread (g, t) { |
| 4205 | if (t->thread.pfm_context == ctx) { | 4205 | if (t->thread.pfm_context == ctx) { |
| 4206 | ret = 0; | 4206 | ret = 0; |
| 4207 | break; | 4207 | goto out; |
| 4208 | } | 4208 | } |
| 4209 | } while_each_thread (g, t); | 4209 | } while_each_thread (g, t); |
| 4210 | 4210 | out: | |
| 4211 | read_unlock(&tasklist_lock); | 4211 | read_unlock(&tasklist_lock); |
| 4212 | 4212 | ||
| 4213 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); | 4213 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 49937a383b23..a5ea817cbcbf 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
| @@ -625,21 +625,6 @@ do_dump_fpu (struct unw_frame_info *info, void *arg) | |||
| 625 | do_dump_task_fpu(current, info, arg); | 625 | do_dump_task_fpu(current, info, arg); |
| 626 | } | 626 | } |
| 627 | 627 | ||
| 628 | int | ||
| 629 | dump_task_regs(struct task_struct *task, elf_gregset_t *regs) | ||
| 630 | { | ||
| 631 | struct unw_frame_info tcore_info; | ||
| 632 | |||
| 633 | if (current == task) { | ||
| 634 | unw_init_running(do_copy_regs, regs); | ||
| 635 | } else { | ||
| 636 | memset(&tcore_info, 0, sizeof(tcore_info)); | ||
| 637 | unw_init_from_blocked_task(&tcore_info, task); | ||
| 638 | do_copy_task_regs(task, &tcore_info, regs); | ||
| 639 | } | ||
| 640 | return 1; | ||
| 641 | } | ||
| 642 | |||
| 643 | void | 628 | void |
| 644 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) | 629 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) |
| 645 | { | 630 | { |
| @@ -647,21 +632,6 @@ ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) | |||
| 647 | } | 632 | } |
| 648 | 633 | ||
| 649 | int | 634 | int |
| 650 | dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst) | ||
| 651 | { | ||
| 652 | struct unw_frame_info tcore_info; | ||
| 653 | |||
| 654 | if (current == task) { | ||
| 655 | unw_init_running(do_dump_fpu, dst); | ||
| 656 | } else { | ||
| 657 | memset(&tcore_info, 0, sizeof(tcore_info)); | ||
| 658 | unw_init_from_blocked_task(&tcore_info, task); | ||
| 659 | do_dump_task_fpu(task, &tcore_info, dst); | ||
| 660 | } | ||
| 661 | return 1; | ||
| 662 | } | ||
| 663 | |||
| 664 | int | ||
| 665 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) | 635 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) |
| 666 | { | 636 | { |
| 667 | unw_init_running(do_dump_fpu, dst); | 637 | unw_init_running(do_dump_fpu, dst); |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index ab784ec4319d..2a9943b5947f 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
| @@ -3,6 +3,9 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (C) 1999-2005 Hewlett-Packard Co | 4 | * Copyright (C) 1999-2005 Hewlett-Packard Co |
| 5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 6 | * Copyright (C) 2006 Intel Co | ||
| 7 | * 2006-08-12 - IA64 Native Utrace implementation support added by | ||
| 8 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
| 6 | * | 9 | * |
| 7 | * Derived from the x86 and Alpha versions. | 10 | * Derived from the x86 and Alpha versions. |
| 8 | */ | 11 | */ |
| @@ -17,6 +20,8 @@ | |||
| 17 | #include <linux/security.h> | 20 | #include <linux/security.h> |
| 18 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
| 19 | #include <linux/signal.h> | 22 | #include <linux/signal.h> |
| 23 | #include <linux/regset.h> | ||
| 24 | #include <linux/elf.h> | ||
| 20 | 25 | ||
| 21 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
| 22 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
| @@ -740,25 +745,6 @@ ia64_sync_fph (struct task_struct *task) | |||
| 740 | psr->dfh = 1; | 745 | psr->dfh = 1; |
| 741 | } | 746 | } |
| 742 | 747 | ||
| 743 | static int | ||
| 744 | access_fr (struct unw_frame_info *info, int regnum, int hi, | ||
| 745 | unsigned long *data, int write_access) | ||
| 746 | { | ||
| 747 | struct ia64_fpreg fpval; | ||
| 748 | int ret; | ||
| 749 | |||
| 750 | ret = unw_get_fr(info, regnum, &fpval); | ||
| 751 | if (ret < 0) | ||
| 752 | return ret; | ||
| 753 | |||
| 754 | if (write_access) { | ||
| 755 | fpval.u.bits[hi] = *data; | ||
| 756 | ret = unw_set_fr(info, regnum, fpval); | ||
| 757 | } else | ||
| 758 | *data = fpval.u.bits[hi]; | ||
| 759 | return ret; | ||
| 760 | } | ||
| 761 | |||
| 762 | /* | 748 | /* |
| 763 | * Change the machine-state of CHILD such that it will return via the normal | 749 | * Change the machine-state of CHILD such that it will return via the normal |
| 764 | * kernel exit-path, rather than the syscall-exit path. | 750 | * kernel exit-path, rather than the syscall-exit path. |
| @@ -860,309 +846,7 @@ access_nat_bits (struct task_struct *child, struct pt_regs *pt, | |||
| 860 | 846 | ||
| 861 | static int | 847 | static int |
| 862 | access_uarea (struct task_struct *child, unsigned long addr, | 848 | access_uarea (struct task_struct *child, unsigned long addr, |
| 863 | unsigned long *data, int write_access) | 849 | unsigned long *data, int write_access); |
| 864 | { | ||
| 865 | unsigned long *ptr, regnum, urbs_end, cfm; | ||
| 866 | struct switch_stack *sw; | ||
| 867 | struct pt_regs *pt; | ||
| 868 | # define pt_reg_addr(pt, reg) ((void *) \ | ||
| 869 | ((unsigned long) (pt) \ | ||
| 870 | + offsetof(struct pt_regs, reg))) | ||
| 871 | |||
| 872 | |||
| 873 | pt = task_pt_regs(child); | ||
| 874 | sw = (struct switch_stack *) (child->thread.ksp + 16); | ||
| 875 | |||
| 876 | if ((addr & 0x7) != 0) { | ||
| 877 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | ||
| 878 | return -1; | ||
| 879 | } | ||
| 880 | |||
| 881 | if (addr < PT_F127 + 16) { | ||
| 882 | /* accessing fph */ | ||
| 883 | if (write_access) | ||
| 884 | ia64_sync_fph(child); | ||
| 885 | else | ||
| 886 | ia64_flush_fph(child); | ||
| 887 | ptr = (unsigned long *) | ||
| 888 | ((unsigned long) &child->thread.fph + addr); | ||
| 889 | } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { | ||
| 890 | /* scratch registers untouched by kernel (saved in pt_regs) */ | ||
| 891 | ptr = pt_reg_addr(pt, f10) + (addr - PT_F10); | ||
| 892 | } else if (addr >= PT_F12 && addr < PT_F15 + 16) { | ||
| 893 | /* | ||
| 894 | * Scratch registers untouched by kernel (saved in | ||
| 895 | * switch_stack). | ||
| 896 | */ | ||
| 897 | ptr = (unsigned long *) ((long) sw | ||
| 898 | + (addr - PT_NAT_BITS - 32)); | ||
| 899 | } else if (addr < PT_AR_LC + 8) { | ||
| 900 | /* preserved state: */ | ||
| 901 | struct unw_frame_info info; | ||
| 902 | char nat = 0; | ||
| 903 | int ret; | ||
| 904 | |||
| 905 | unw_init_from_blocked_task(&info, child); | ||
| 906 | if (unw_unwind_to_user(&info) < 0) | ||
| 907 | return -1; | ||
| 908 | |||
| 909 | switch (addr) { | ||
| 910 | case PT_NAT_BITS: | ||
| 911 | return access_nat_bits(child, pt, &info, | ||
| 912 | data, write_access); | ||
| 913 | |||
| 914 | case PT_R4: case PT_R5: case PT_R6: case PT_R7: | ||
| 915 | if (write_access) { | ||
| 916 | /* read NaT bit first: */ | ||
| 917 | unsigned long dummy; | ||
| 918 | |||
| 919 | ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, | ||
| 920 | &dummy, &nat); | ||
| 921 | if (ret < 0) | ||
| 922 | return ret; | ||
| 923 | } | ||
| 924 | return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, | ||
| 925 | &nat, write_access); | ||
| 926 | |||
| 927 | case PT_B1: case PT_B2: case PT_B3: | ||
| 928 | case PT_B4: case PT_B5: | ||
| 929 | return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, | ||
| 930 | write_access); | ||
| 931 | |||
| 932 | case PT_AR_EC: | ||
| 933 | return unw_access_ar(&info, UNW_AR_EC, data, | ||
| 934 | write_access); | ||
| 935 | |||
| 936 | case PT_AR_LC: | ||
| 937 | return unw_access_ar(&info, UNW_AR_LC, data, | ||
| 938 | write_access); | ||
| 939 | |||
| 940 | default: | ||
| 941 | if (addr >= PT_F2 && addr < PT_F5 + 16) | ||
| 942 | return access_fr(&info, (addr - PT_F2)/16 + 2, | ||
| 943 | (addr & 8) != 0, data, | ||
| 944 | write_access); | ||
| 945 | else if (addr >= PT_F16 && addr < PT_F31 + 16) | ||
| 946 | return access_fr(&info, | ||
| 947 | (addr - PT_F16)/16 + 16, | ||
| 948 | (addr & 8) != 0, | ||
| 949 | data, write_access); | ||
| 950 | else { | ||
| 951 | dprintk("ptrace: rejecting access to register " | ||
| 952 | "address 0x%lx\n", addr); | ||
| 953 | return -1; | ||
| 954 | } | ||
| 955 | } | ||
| 956 | } else if (addr < PT_F9+16) { | ||
| 957 | /* scratch state */ | ||
| 958 | switch (addr) { | ||
| 959 | case PT_AR_BSP: | ||
| 960 | /* | ||
| 961 | * By convention, we use PT_AR_BSP to refer to | ||
| 962 | * the end of the user-level backing store. | ||
| 963 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | ||
| 964 | * to get the real value of ar.bsp at the time | ||
| 965 | * the kernel was entered. | ||
| 966 | * | ||
| 967 | * Furthermore, when changing the contents of | ||
| 968 | * PT_AR_BSP (or PT_CFM) while the task is | ||
| 969 | * blocked in a system call, convert the state | ||
| 970 | * so that the non-system-call exit | ||
| 971 | * path is used. This ensures that the proper | ||
| 972 | * state will be picked up when resuming | ||
| 973 | * execution. However, it *also* means that | ||
| 974 | * once we write PT_AR_BSP/PT_CFM, it won't be | ||
| 975 | * possible to modify the syscall arguments of | ||
| 976 | * the pending system call any longer. This | ||
| 977 | * shouldn't be an issue because modifying | ||
| 978 | * PT_AR_BSP/PT_CFM generally implies that | ||
| 979 | * we're either abandoning the pending system | ||
| 980 | * call or that we defer it's re-execution | ||
| 981 | * (e.g., due to GDB doing an inferior | ||
| 982 | * function call). | ||
| 983 | */ | ||
| 984 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | ||
| 985 | if (write_access) { | ||
| 986 | if (*data != urbs_end) { | ||
| 987 | if (in_syscall(pt)) | ||
| 988 | convert_to_non_syscall(child, | ||
| 989 | pt, | ||
| 990 | cfm); | ||
| 991 | /* | ||
| 992 | * Simulate user-level write | ||
| 993 | * of ar.bsp: | ||
| 994 | */ | ||
| 995 | pt->loadrs = 0; | ||
| 996 | pt->ar_bspstore = *data; | ||
| 997 | } | ||
| 998 | } else | ||
| 999 | *data = urbs_end; | ||
| 1000 | return 0; | ||
| 1001 | |||
| 1002 | case PT_CFM: | ||
| 1003 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | ||
| 1004 | if (write_access) { | ||
| 1005 | if (((cfm ^ *data) & PFM_MASK) != 0) { | ||
| 1006 | if (in_syscall(pt)) | ||
| 1007 | convert_to_non_syscall(child, | ||
| 1008 | pt, | ||
| 1009 | cfm); | ||
| 1010 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | ||
| 1011 | | (*data & PFM_MASK)); | ||
| 1012 | } | ||
| 1013 | } else | ||
| 1014 | *data = cfm; | ||
| 1015 | return 0; | ||
| 1016 | |||
| 1017 | case PT_CR_IPSR: | ||
| 1018 | if (write_access) { | ||
| 1019 | unsigned long tmp = *data; | ||
| 1020 | /* psr.ri==3 is a reserved value: SDM 2:25 */ | ||
| 1021 | if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) | ||
| 1022 | tmp &= ~IA64_PSR_RI; | ||
| 1023 | pt->cr_ipsr = ((tmp & IPSR_MASK) | ||
| 1024 | | (pt->cr_ipsr & ~IPSR_MASK)); | ||
| 1025 | } else | ||
| 1026 | *data = (pt->cr_ipsr & IPSR_MASK); | ||
| 1027 | return 0; | ||
| 1028 | |||
| 1029 | case PT_AR_RSC: | ||
| 1030 | if (write_access) | ||
| 1031 | pt->ar_rsc = *data | (3 << 2); /* force PL3 */ | ||
| 1032 | else | ||
| 1033 | *data = pt->ar_rsc; | ||
| 1034 | return 0; | ||
| 1035 | |||
| 1036 | case PT_AR_RNAT: | ||
| 1037 | ptr = pt_reg_addr(pt, ar_rnat); | ||
| 1038 | break; | ||
| 1039 | case PT_R1: | ||
| 1040 | ptr = pt_reg_addr(pt, r1); | ||
| 1041 | break; | ||
| 1042 | case PT_R2: case PT_R3: | ||
| 1043 | ptr = pt_reg_addr(pt, r2) + (addr - PT_R2); | ||
| 1044 | break; | ||
| 1045 | case PT_R8: case PT_R9: case PT_R10: case PT_R11: | ||
| 1046 | ptr = pt_reg_addr(pt, r8) + (addr - PT_R8); | ||
| 1047 | break; | ||
| 1048 | case PT_R12: case PT_R13: | ||
| 1049 | ptr = pt_reg_addr(pt, r12) + (addr - PT_R12); | ||
| 1050 | break; | ||
| 1051 | case PT_R14: | ||
| 1052 | ptr = pt_reg_addr(pt, r14); | ||
| 1053 | break; | ||
| 1054 | case PT_R15: | ||
| 1055 | ptr = pt_reg_addr(pt, r15); | ||
| 1056 | break; | ||
| 1057 | case PT_R16: case PT_R17: case PT_R18: case PT_R19: | ||
| 1058 | case PT_R20: case PT_R21: case PT_R22: case PT_R23: | ||
| 1059 | case PT_R24: case PT_R25: case PT_R26: case PT_R27: | ||
| 1060 | case PT_R28: case PT_R29: case PT_R30: case PT_R31: | ||
| 1061 | ptr = pt_reg_addr(pt, r16) + (addr - PT_R16); | ||
| 1062 | break; | ||
| 1063 | case PT_B0: | ||
| 1064 | ptr = pt_reg_addr(pt, b0); | ||
| 1065 | break; | ||
| 1066 | case PT_B6: | ||
| 1067 | ptr = pt_reg_addr(pt, b6); | ||
| 1068 | break; | ||
| 1069 | case PT_B7: | ||
| 1070 | ptr = pt_reg_addr(pt, b7); | ||
| 1071 | break; | ||
| 1072 | case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: | ||
| 1073 | case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: | ||
| 1074 | ptr = pt_reg_addr(pt, f6) + (addr - PT_F6); | ||
| 1075 | break; | ||
| 1076 | case PT_AR_BSPSTORE: | ||
| 1077 | ptr = pt_reg_addr(pt, ar_bspstore); | ||
| 1078 | break; | ||
| 1079 | case PT_AR_UNAT: | ||
| 1080 | ptr = pt_reg_addr(pt, ar_unat); | ||
| 1081 | break; | ||
| 1082 | case PT_AR_PFS: | ||
| 1083 | ptr = pt_reg_addr(pt, ar_pfs); | ||
| 1084 | break; | ||
| 1085 | case PT_AR_CCV: | ||
| 1086 | ptr = pt_reg_addr(pt, ar_ccv); | ||
| 1087 | break; | ||
| 1088 | case PT_AR_FPSR: | ||
| 1089 | ptr = pt_reg_addr(pt, ar_fpsr); | ||
| 1090 | break; | ||
| 1091 | case PT_CR_IIP: | ||
| 1092 | ptr = pt_reg_addr(pt, cr_iip); | ||
| 1093 | break; | ||
| 1094 | case PT_PR: | ||
| 1095 | ptr = pt_reg_addr(pt, pr); | ||
| 1096 | break; | ||
| 1097 | /* scratch register */ | ||
| 1098 | |||
| 1099 | default: | ||
| 1100 | /* disallow accessing anything else... */ | ||
| 1101 | dprintk("ptrace: rejecting access to register " | ||
| 1102 | "address 0x%lx\n", addr); | ||
| 1103 | return -1; | ||
| 1104 | } | ||
| 1105 | } else if (addr <= PT_AR_SSD) { | ||
| 1106 | ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD); | ||
| 1107 | } else { | ||
| 1108 | /* access debug registers */ | ||
| 1109 | |||
| 1110 | if (addr >= PT_IBR) { | ||
| 1111 | regnum = (addr - PT_IBR) >> 3; | ||
| 1112 | ptr = &child->thread.ibr[0]; | ||
| 1113 | } else { | ||
| 1114 | regnum = (addr - PT_DBR) >> 3; | ||
| 1115 | ptr = &child->thread.dbr[0]; | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | if (regnum >= 8) { | ||
| 1119 | dprintk("ptrace: rejecting access to register " | ||
| 1120 | "address 0x%lx\n", addr); | ||
| 1121 | return -1; | ||
| 1122 | } | ||
| 1123 | #ifdef CONFIG_PERFMON | ||
| 1124 | /* | ||
| 1125 | * Check if debug registers are used by perfmon. This | ||
| 1126 | * test must be done once we know that we can do the | ||
| 1127 | * operation, i.e. the arguments are all valid, but | ||
| 1128 | * before we start modifying the state. | ||
| 1129 | * | ||
| 1130 | * Perfmon needs to keep a count of how many processes | ||
| 1131 | * are trying to modify the debug registers for system | ||
| 1132 | * wide monitoring sessions. | ||
| 1133 | * | ||
| 1134 | * We also include read access here, because they may | ||
| 1135 | * cause the PMU-installed debug register state | ||
| 1136 | * (dbr[], ibr[]) to be reset. The two arrays are also | ||
| 1137 | * used by perfmon, but we do not use | ||
| 1138 | * IA64_THREAD_DBG_VALID. The registers are restored | ||
| 1139 | * by the PMU context switch code. | ||
| 1140 | */ | ||
| 1141 | if (pfm_use_debug_registers(child)) return -1; | ||
| 1142 | #endif | ||
| 1143 | |||
| 1144 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | ||
| 1145 | child->thread.flags |= IA64_THREAD_DBG_VALID; | ||
| 1146 | memset(child->thread.dbr, 0, | ||
| 1147 | sizeof(child->thread.dbr)); | ||
| 1148 | memset(child->thread.ibr, 0, | ||
| 1149 | sizeof(child->thread.ibr)); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | ptr += regnum; | ||
| 1153 | |||
| 1154 | if ((regnum & 1) && write_access) { | ||
| 1155 | /* don't let the user set kernel-level breakpoints: */ | ||
| 1156 | *ptr = *data & ~(7UL << 56); | ||
| 1157 | return 0; | ||
| 1158 | } | ||
| 1159 | } | ||
| 1160 | if (write_access) | ||
| 1161 | *ptr = *data; | ||
| 1162 | else | ||
| 1163 | *data = *ptr; | ||
| 1164 | return 0; | ||
| 1165 | } | ||
| 1166 | 850 | ||
| 1167 | static long | 851 | static long |
| 1168 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | 852 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) |
| @@ -1626,3 +1310,892 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | |||
| 1626 | if (test_thread_flag(TIF_RESTORE_RSE)) | 1310 | if (test_thread_flag(TIF_RESTORE_RSE)) |
| 1627 | ia64_sync_krbs(); | 1311 | ia64_sync_krbs(); |
| 1628 | } | 1312 | } |
| 1313 | |||
| 1314 | /* Utrace implementation starts here */ | ||
| 1315 | struct regset_get { | ||
| 1316 | void *kbuf; | ||
| 1317 | void __user *ubuf; | ||
| 1318 | }; | ||
| 1319 | |||
| 1320 | struct regset_set { | ||
| 1321 | const void *kbuf; | ||
| 1322 | const void __user *ubuf; | ||
| 1323 | }; | ||
| 1324 | |||
| 1325 | struct regset_getset { | ||
| 1326 | struct task_struct *target; | ||
| 1327 | const struct user_regset *regset; | ||
| 1328 | union { | ||
| 1329 | struct regset_get get; | ||
| 1330 | struct regset_set set; | ||
| 1331 | } u; | ||
| 1332 | unsigned int pos; | ||
| 1333 | unsigned int count; | ||
| 1334 | int ret; | ||
| 1335 | }; | ||
| 1336 | |||
| 1337 | static int | ||
| 1338 | access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, | ||
| 1339 | unsigned long addr, unsigned long *data, int write_access) | ||
| 1340 | { | ||
| 1341 | struct pt_regs *pt; | ||
| 1342 | unsigned long *ptr = NULL; | ||
| 1343 | int ret; | ||
| 1344 | char nat = 0; | ||
| 1345 | |||
| 1346 | pt = task_pt_regs(target); | ||
| 1347 | switch (addr) { | ||
| 1348 | case ELF_GR_OFFSET(1): | ||
| 1349 | ptr = &pt->r1; | ||
| 1350 | break; | ||
| 1351 | case ELF_GR_OFFSET(2): | ||
| 1352 | case ELF_GR_OFFSET(3): | ||
| 1353 | ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); | ||
| 1354 | break; | ||
| 1355 | case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): | ||
| 1356 | if (write_access) { | ||
| 1357 | /* read NaT bit first: */ | ||
| 1358 | unsigned long dummy; | ||
| 1359 | |||
| 1360 | ret = unw_get_gr(info, addr/8, &dummy, &nat); | ||
| 1361 | if (ret < 0) | ||
| 1362 | return ret; | ||
| 1363 | } | ||
| 1364 | return unw_access_gr(info, addr/8, data, &nat, write_access); | ||
| 1365 | case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): | ||
| 1366 | ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); | ||
| 1367 | break; | ||
| 1368 | case ELF_GR_OFFSET(12): | ||
| 1369 | case ELF_GR_OFFSET(13): | ||
| 1370 | ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); | ||
| 1371 | break; | ||
| 1372 | case ELF_GR_OFFSET(14): | ||
| 1373 | ptr = &pt->r14; | ||
| 1374 | break; | ||
| 1375 | case ELF_GR_OFFSET(15): | ||
| 1376 | ptr = &pt->r15; | ||
| 1377 | } | ||
| 1378 | if (write_access) | ||
| 1379 | *ptr = *data; | ||
| 1380 | else | ||
| 1381 | *data = *ptr; | ||
| 1382 | return 0; | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | static int | ||
| 1386 | access_elf_breg(struct task_struct *target, struct unw_frame_info *info, | ||
| 1387 | unsigned long addr, unsigned long *data, int write_access) | ||
| 1388 | { | ||
| 1389 | struct pt_regs *pt; | ||
| 1390 | unsigned long *ptr = NULL; | ||
| 1391 | |||
| 1392 | pt = task_pt_regs(target); | ||
| 1393 | switch (addr) { | ||
| 1394 | case ELF_BR_OFFSET(0): | ||
| 1395 | ptr = &pt->b0; | ||
| 1396 | break; | ||
| 1397 | case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): | ||
| 1398 | return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, | ||
| 1399 | data, write_access); | ||
| 1400 | case ELF_BR_OFFSET(6): | ||
| 1401 | ptr = &pt->b6; | ||
| 1402 | break; | ||
| 1403 | case ELF_BR_OFFSET(7): | ||
| 1404 | ptr = &pt->b7; | ||
| 1405 | } | ||
| 1406 | if (write_access) | ||
| 1407 | *ptr = *data; | ||
| 1408 | else | ||
| 1409 | *data = *ptr; | ||
| 1410 | return 0; | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | static int | ||
| 1414 | access_elf_areg(struct task_struct *target, struct unw_frame_info *info, | ||
| 1415 | unsigned long addr, unsigned long *data, int write_access) | ||
| 1416 | { | ||
| 1417 | struct pt_regs *pt; | ||
| 1418 | unsigned long cfm, urbs_end; | ||
| 1419 | unsigned long *ptr = NULL; | ||
| 1420 | |||
| 1421 | pt = task_pt_regs(target); | ||
| 1422 | if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { | ||
| 1423 | switch (addr) { | ||
| 1424 | case ELF_AR_RSC_OFFSET: | ||
| 1425 | /* force PL3 */ | ||
| 1426 | if (write_access) | ||
| 1427 | pt->ar_rsc = *data | (3 << 2); | ||
| 1428 | else | ||
| 1429 | *data = pt->ar_rsc; | ||
| 1430 | return 0; | ||
| 1431 | case ELF_AR_BSP_OFFSET: | ||
| 1432 | /* | ||
| 1433 | * By convention, we use PT_AR_BSP to refer to | ||
| 1434 | * the end of the user-level backing store. | ||
| 1435 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | ||
| 1436 | * to get the real value of ar.bsp at the time | ||
| 1437 | * the kernel was entered. | ||
| 1438 | * | ||
| 1439 | * Furthermore, when changing the contents of | ||
| 1440 | * PT_AR_BSP (or PT_CFM) while the task is | ||
| 1441 | * blocked in a system call, convert the state | ||
| 1442 | * so that the non-system-call exit | ||
| 1443 | * path is used. This ensures that the proper | ||
| 1444 | * state will be picked up when resuming | ||
| 1445 | * execution. However, it *also* means that | ||
| 1446 | * once we write PT_AR_BSP/PT_CFM, it won't be | ||
| 1447 | * possible to modify the syscall arguments of | ||
| 1448 | * the pending system call any longer. This | ||
| 1449 | * shouldn't be an issue because modifying | ||
| 1450 | * PT_AR_BSP/PT_CFM generally implies that | ||
| 1451 | * we're either abandoning the pending system | ||
| 1452 | * call or that we defer it's re-execution | ||
| 1453 | * (e.g., due to GDB doing an inferior | ||
| 1454 | * function call). | ||
| 1455 | */ | ||
| 1456 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | ||
| 1457 | if (write_access) { | ||
| 1458 | if (*data != urbs_end) { | ||
| 1459 | if (in_syscall(pt)) | ||
| 1460 | convert_to_non_syscall(target, | ||
| 1461 | pt, | ||
| 1462 | cfm); | ||
| 1463 | /* | ||
| 1464 | * Simulate user-level write | ||
| 1465 | * of ar.bsp: | ||
| 1466 | */ | ||
| 1467 | pt->loadrs = 0; | ||
| 1468 | pt->ar_bspstore = *data; | ||
| 1469 | } | ||
| 1470 | } else | ||
| 1471 | *data = urbs_end; | ||
| 1472 | return 0; | ||
| 1473 | case ELF_AR_BSPSTORE_OFFSET: | ||
| 1474 | ptr = &pt->ar_bspstore; | ||
| 1475 | break; | ||
| 1476 | case ELF_AR_RNAT_OFFSET: | ||
| 1477 | ptr = &pt->ar_rnat; | ||
| 1478 | break; | ||
| 1479 | case ELF_AR_CCV_OFFSET: | ||
| 1480 | ptr = &pt->ar_ccv; | ||
| 1481 | break; | ||
| 1482 | case ELF_AR_UNAT_OFFSET: | ||
| 1483 | ptr = &pt->ar_unat; | ||
| 1484 | break; | ||
| 1485 | case ELF_AR_FPSR_OFFSET: | ||
| 1486 | ptr = &pt->ar_fpsr; | ||
| 1487 | break; | ||
| 1488 | case ELF_AR_PFS_OFFSET: | ||
| 1489 | ptr = &pt->ar_pfs; | ||
| 1490 | break; | ||
| 1491 | case ELF_AR_LC_OFFSET: | ||
| 1492 | return unw_access_ar(info, UNW_AR_LC, data, | ||
| 1493 | write_access); | ||
| 1494 | case ELF_AR_EC_OFFSET: | ||
| 1495 | return unw_access_ar(info, UNW_AR_EC, data, | ||
| 1496 | write_access); | ||
| 1497 | case ELF_AR_CSD_OFFSET: | ||
| 1498 | ptr = &pt->ar_csd; | ||
| 1499 | break; | ||
| 1500 | case ELF_AR_SSD_OFFSET: | ||
| 1501 | ptr = &pt->ar_ssd; | ||
| 1502 | } | ||
| 1503 | } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { | ||
| 1504 | switch (addr) { | ||
| 1505 | case ELF_CR_IIP_OFFSET: | ||
| 1506 | ptr = &pt->cr_iip; | ||
| 1507 | break; | ||
| 1508 | case ELF_CFM_OFFSET: | ||
| 1509 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | ||
| 1510 | if (write_access) { | ||
| 1511 | if (((cfm ^ *data) & PFM_MASK) != 0) { | ||
| 1512 | if (in_syscall(pt)) | ||
| 1513 | convert_to_non_syscall(target, | ||
| 1514 | pt, | ||
| 1515 | cfm); | ||
| 1516 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | ||
| 1517 | | (*data & PFM_MASK)); | ||
| 1518 | } | ||
| 1519 | } else | ||
| 1520 | *data = cfm; | ||
| 1521 | return 0; | ||
| 1522 | case ELF_CR_IPSR_OFFSET: | ||
| 1523 | if (write_access) { | ||
| 1524 | unsigned long tmp = *data; | ||
| 1525 | /* psr.ri==3 is a reserved value: SDM 2:25 */ | ||
| 1526 | if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) | ||
| 1527 | tmp &= ~IA64_PSR_RI; | ||
| 1528 | pt->cr_ipsr = ((tmp & IPSR_MASK) | ||
| 1529 | | (pt->cr_ipsr & ~IPSR_MASK)); | ||
| 1530 | } else | ||
| 1531 | *data = (pt->cr_ipsr & IPSR_MASK); | ||
| 1532 | return 0; | ||
| 1533 | } | ||
| 1534 | } else if (addr == ELF_NAT_OFFSET) | ||
| 1535 | return access_nat_bits(target, pt, info, | ||
| 1536 | data, write_access); | ||
| 1537 | else if (addr == ELF_PR_OFFSET) | ||
| 1538 | ptr = &pt->pr; | ||
| 1539 | else | ||
| 1540 | return -1; | ||
| 1541 | |||
| 1542 | if (write_access) | ||
| 1543 | *ptr = *data; | ||
| 1544 | else | ||
| 1545 | *data = *ptr; | ||
| 1546 | |||
| 1547 | return 0; | ||
| 1548 | } | ||
| 1549 | |||
| 1550 | static int | ||
| 1551 | access_elf_reg(struct task_struct *target, struct unw_frame_info *info, | ||
| 1552 | unsigned long addr, unsigned long *data, int write_access) | ||
| 1553 | { | ||
| 1554 | if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) | ||
| 1555 | return access_elf_gpreg(target, info, addr, data, write_access); | ||
| 1556 | else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) | ||
| 1557 | return access_elf_breg(target, info, addr, data, write_access); | ||
| 1558 | else | ||
| 1559 | return access_elf_areg(target, info, addr, data, write_access); | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | void do_gpregs_get(struct unw_frame_info *info, void *arg) | ||
| 1563 | { | ||
| 1564 | struct pt_regs *pt; | ||
| 1565 | struct regset_getset *dst = arg; | ||
| 1566 | elf_greg_t tmp[16]; | ||
| 1567 | unsigned int i, index, min_copy; | ||
| 1568 | |||
| 1569 | if (unw_unwind_to_user(info) < 0) | ||
| 1570 | return; | ||
| 1571 | |||
| 1572 | /* | ||
| 1573 | * coredump format: | ||
| 1574 | * r0-r31 | ||
| 1575 | * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) | ||
| 1576 | * predicate registers (p0-p63) | ||
| 1577 | * b0-b7 | ||
| 1578 | * ip cfm user-mask | ||
| 1579 | * ar.rsc ar.bsp ar.bspstore ar.rnat | ||
| 1580 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec | ||
| 1581 | */ | ||
| 1582 | |||
| 1583 | |||
| 1584 | /* Skip r0 */ | ||
| 1585 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | ||
| 1586 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | ||
| 1587 | &dst->u.get.kbuf, | ||
| 1588 | &dst->u.get.ubuf, | ||
| 1589 | 0, ELF_GR_OFFSET(1)); | ||
| 1590 | if (dst->ret || dst->count == 0) | ||
| 1591 | return; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | /* gr1 - gr15 */ | ||
| 1595 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | ||
| 1596 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | ||
| 1597 | min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? | ||
| 1598 | (dst->pos + dst->count) : ELF_GR_OFFSET(16); | ||
| 1599 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | ||
| 1600 | index++) | ||
| 1601 | if (access_elf_reg(dst->target, info, i, | ||
| 1602 | &tmp[index], 0) < 0) { | ||
| 1603 | dst->ret = -EIO; | ||
| 1604 | return; | ||
| 1605 | } | ||
| 1606 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 1607 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
| 1608 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | ||
| 1609 | if (dst->ret || dst->count == 0) | ||
| 1610 | return; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | /* r16-r31 */ | ||
| 1614 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | ||
| 1615 | pt = task_pt_regs(dst->target); | ||
| 1616 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 1617 | &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, | ||
| 1618 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | ||
| 1619 | if (dst->ret || dst->count == 0) | ||
| 1620 | return; | ||
| 1621 | } | ||
| 1622 | |||
| 1623 | /* nat, pr, b0 - b7 */ | ||
| 1624 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | ||
| 1625 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | ||
| 1626 | min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? | ||
| 1627 | (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; | ||
| 1628 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | ||
| 1629 | index++) | ||
| 1630 | if (access_elf_reg(dst->target, info, i, | ||
| 1631 | &tmp[index], 0) < 0) { | ||
| 1632 | dst->ret = -EIO; | ||
| 1633 | return; | ||
| 1634 | } | ||
| 1635 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 1636 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
| 1637 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | ||
| 1638 | if (dst->ret || dst->count == 0) | ||
| 1639 | return; | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | ||
| 1643 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | ||
| 1644 | */ | ||
| 1645 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | ||
| 1646 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | ||
| 1647 | min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? | ||
| 1648 | (dst->pos + dst->count) : ELF_AR_END_OFFSET; | ||
| 1649 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | ||
| 1650 | index++) | ||
| 1651 | if (access_elf_reg(dst->target, info, i, | ||
| 1652 | &tmp[index], 0) < 0) { | ||
| 1653 | dst->ret = -EIO; | ||
| 1654 | return; | ||
| 1655 | } | ||
| 1656 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 1657 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
| 1658 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | ||
| 1659 | } | ||
| 1660 | } | ||
| 1661 | |||
| 1662 | void do_gpregs_set(struct unw_frame_info *info, void *arg) | ||
| 1663 | { | ||
| 1664 | struct pt_regs *pt; | ||
| 1665 | struct regset_getset *dst = arg; | ||
| 1666 | elf_greg_t tmp[16]; | ||
| 1667 | unsigned int i, index; | ||
| 1668 | |||
| 1669 | if (unw_unwind_to_user(info) < 0) | ||
| 1670 | return; | ||
| 1671 | |||
| 1672 | /* Skip r0 */ | ||
| 1673 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | ||
| 1674 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | ||
| 1675 | &dst->u.set.kbuf, | ||
| 1676 | &dst->u.set.ubuf, | ||
| 1677 | 0, ELF_GR_OFFSET(1)); | ||
| 1678 | if (dst->ret || dst->count == 0) | ||
| 1679 | return; | ||
| 1680 | } | ||
| 1681 | |||
| 1682 | /* gr1-gr15 */ | ||
| 1683 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | ||
| 1684 | i = dst->pos; | ||
| 1685 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | ||
| 1686 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 1687 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
| 1688 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | ||
| 1689 | if (dst->ret) | ||
| 1690 | return; | ||
| 1691 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | ||
| 1692 | if (access_elf_reg(dst->target, info, i, | ||
| 1693 | &tmp[index], 1) < 0) { | ||
| 1694 | dst->ret = -EIO; | ||
| 1695 | return; | ||
| 1696 | } | ||
| 1697 | if (dst->count == 0) | ||
| 1698 | return; | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | /* gr16-gr31 */ | ||
| 1702 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | ||
| 1703 | pt = task_pt_regs(dst->target); | ||
| 1704 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 1705 | &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, | ||
| 1706 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | ||
| 1707 | if (dst->ret || dst->count == 0) | ||
| 1708 | return; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | /* nat, pr, b0 - b7 */ | ||
| 1712 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | ||
| 1713 | i = dst->pos; | ||
| 1714 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | ||
| 1715 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 1716 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
| 1717 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | ||
| 1718 | if (dst->ret) | ||
| 1719 | return; | ||
| 1720 | for (; i < dst->pos; i += sizeof(elf_greg_t), index++) | ||
| 1721 | if (access_elf_reg(dst->target, info, i, | ||
| 1722 | &tmp[index], 1) < 0) { | ||
| 1723 | dst->ret = -EIO; | ||
| 1724 | return; | ||
| 1725 | } | ||
| 1726 | if (dst->count == 0) | ||
| 1727 | return; | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | ||
| 1731 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | ||
| 1732 | */ | ||
| 1733 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | ||
| 1734 | i = dst->pos; | ||
| 1735 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | ||
| 1736 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 1737 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
| 1738 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | ||
| 1739 | if (dst->ret) | ||
| 1740 | return; | ||
| 1741 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | ||
| 1742 | if (access_elf_reg(dst->target, info, i, | ||
| 1743 | &tmp[index], 1) < 0) { | ||
| 1744 | dst->ret = -EIO; | ||
| 1745 | return; | ||
| 1746 | } | ||
| 1747 | } | ||
| 1748 | } | ||
| 1749 | |||
| 1750 | #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t)) | ||
| 1751 | |||
| 1752 | void do_fpregs_get(struct unw_frame_info *info, void *arg) | ||
| 1753 | { | ||
| 1754 | struct regset_getset *dst = arg; | ||
| 1755 | struct task_struct *task = dst->target; | ||
| 1756 | elf_fpreg_t tmp[30]; | ||
| 1757 | int index, min_copy, i; | ||
| 1758 | |||
| 1759 | if (unw_unwind_to_user(info) < 0) | ||
| 1760 | return; | ||
| 1761 | |||
| 1762 | /* Skip pos 0 and 1 */ | ||
| 1763 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | ||
| 1764 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | ||
| 1765 | &dst->u.get.kbuf, | ||
| 1766 | &dst->u.get.ubuf, | ||
| 1767 | 0, ELF_FP_OFFSET(2)); | ||
| 1768 | if (dst->count == 0 || dst->ret) | ||
| 1769 | return; | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | /* fr2-fr31 */ | ||
| 1773 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | ||
| 1774 | index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); | ||
| 1775 | |||
| 1776 | min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), | ||
| 1777 | dst->pos + dst->count); | ||
| 1778 | for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), | ||
| 1779 | index++) | ||
| 1780 | if (unw_get_fr(info, i / sizeof(elf_fpreg_t), | ||
| 1781 | &tmp[index])) { | ||
| 1782 | dst->ret = -EIO; | ||
| 1783 | return; | ||
| 1784 | } | ||
| 1785 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
| 1786 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
| 1787 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | ||
| 1788 | if (dst->count == 0 || dst->ret) | ||
| 1789 | return; | ||
| 1790 | } | ||
| 1791 | |||
| 1792 | /* fph */ | ||
| 1793 | if (dst->count > 0) { | ||
| 1794 | ia64_flush_fph(dst->target); | ||
| 1795 | if (task->thread.flags & IA64_THREAD_FPH_VALID) | ||
| 1796 | dst->ret = user_regset_copyout( | ||
| 1797 | &dst->pos, &dst->count, | ||
| 1798 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
| 1799 | &dst->target->thread.fph, | ||
| 1800 | ELF_FP_OFFSET(32), -1); | ||
| 1801 | else | ||
| 1802 | /* Zero fill instead. */ | ||
| 1803 | dst->ret = user_regset_copyout_zero( | ||
| 1804 | &dst->pos, &dst->count, | ||
| 1805 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
| 1806 | ELF_FP_OFFSET(32), -1); | ||
| 1807 | } | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | void do_fpregs_set(struct unw_frame_info *info, void *arg) | ||
| 1811 | { | ||
| 1812 | struct regset_getset *dst = arg; | ||
| 1813 | elf_fpreg_t fpreg, tmp[30]; | ||
| 1814 | int index, start, end; | ||
| 1815 | |||
| 1816 | if (unw_unwind_to_user(info) < 0) | ||
| 1817 | return; | ||
| 1818 | |||
| 1819 | /* Skip pos 0 and 1 */ | ||
| 1820 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | ||
| 1821 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | ||
| 1822 | &dst->u.set.kbuf, | ||
| 1823 | &dst->u.set.ubuf, | ||
| 1824 | 0, ELF_FP_OFFSET(2)); | ||
| 1825 | if (dst->count == 0 || dst->ret) | ||
| 1826 | return; | ||
| 1827 | } | ||
| 1828 | |||
| 1829 | /* fr2-fr31 */ | ||
| 1830 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | ||
| 1831 | start = dst->pos; | ||
| 1832 | end = min(((unsigned int)ELF_FP_OFFSET(32)), | ||
| 1833 | dst->pos + dst->count); | ||
| 1834 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 1835 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
| 1836 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | ||
| 1837 | if (dst->ret) | ||
| 1838 | return; | ||
| 1839 | |||
| 1840 | if (start & 0xF) { /* only write high part */ | ||
| 1841 | if (unw_get_fr(info, start / sizeof(elf_fpreg_t), | ||
| 1842 | &fpreg)) { | ||
| 1843 | dst->ret = -EIO; | ||
| 1844 | return; | ||
| 1845 | } | ||
| 1846 | tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] | ||
| 1847 | = fpreg.u.bits[0]; | ||
| 1848 | start &= ~0xFUL; | ||
| 1849 | } | ||
| 1850 | if (end & 0xF) { /* only write low part */ | ||
| 1851 | if (unw_get_fr(info, end / sizeof(elf_fpreg_t), | ||
| 1852 | &fpreg)) { | ||
| 1853 | dst->ret = -EIO; | ||
| 1854 | return; | ||
| 1855 | } | ||
| 1856 | tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] | ||
| 1857 | = fpreg.u.bits[1]; | ||
| 1858 | end = (end + 0xF) & ~0xFUL; | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | for ( ; start < end ; start += sizeof(elf_fpreg_t)) { | ||
| 1862 | index = start / sizeof(elf_fpreg_t); | ||
| 1863 | if (unw_set_fr(info, index, tmp[index - 2])) { | ||
| 1864 | dst->ret = -EIO; | ||
| 1865 | return; | ||
| 1866 | } | ||
| 1867 | } | ||
| 1868 | if (dst->ret || dst->count == 0) | ||
| 1869 | return; | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | /* fph */ | ||
| 1873 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { | ||
| 1874 | ia64_sync_fph(dst->target); | ||
| 1875 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
| 1876 | &dst->u.set.kbuf, | ||
| 1877 | &dst->u.set.ubuf, | ||
| 1878 | &dst->target->thread.fph, | ||
| 1879 | ELF_FP_OFFSET(32), -1); | ||
| 1880 | } | ||
| 1881 | } | ||
| 1882 | |||
| 1883 | static int | ||
| 1884 | do_regset_call(void (*call)(struct unw_frame_info *, void *), | ||
| 1885 | struct task_struct *target, | ||
| 1886 | const struct user_regset *regset, | ||
| 1887 | unsigned int pos, unsigned int count, | ||
| 1888 | const void *kbuf, const void __user *ubuf) | ||
| 1889 | { | ||
| 1890 | struct regset_getset info = { .target = target, .regset = regset, | ||
| 1891 | .pos = pos, .count = count, | ||
| 1892 | .u.set = { .kbuf = kbuf, .ubuf = ubuf }, | ||
| 1893 | .ret = 0 }; | ||
| 1894 | |||
| 1895 | if (target == current) | ||
| 1896 | unw_init_running(call, &info); | ||
| 1897 | else { | ||
| 1898 | struct unw_frame_info ufi; | ||
| 1899 | memset(&ufi, 0, sizeof(ufi)); | ||
| 1900 | unw_init_from_blocked_task(&ufi, target); | ||
| 1901 | (*call)(&ufi, &info); | ||
| 1902 | } | ||
| 1903 | |||
| 1904 | return info.ret; | ||
| 1905 | } | ||
| 1906 | |||
| 1907 | static int | ||
| 1908 | gpregs_get(struct task_struct *target, | ||
| 1909 | const struct user_regset *regset, | ||
| 1910 | unsigned int pos, unsigned int count, | ||
| 1911 | void *kbuf, void __user *ubuf) | ||
| 1912 | { | ||
| 1913 | return do_regset_call(do_gpregs_get, target, regset, pos, count, | ||
| 1914 | kbuf, ubuf); | ||
| 1915 | } | ||
| 1916 | |||
| 1917 | static int gpregs_set(struct task_struct *target, | ||
| 1918 | const struct user_regset *regset, | ||
| 1919 | unsigned int pos, unsigned int count, | ||
| 1920 | const void *kbuf, const void __user *ubuf) | ||
| 1921 | { | ||
| 1922 | return do_regset_call(do_gpregs_set, target, regset, pos, count, | ||
| 1923 | kbuf, ubuf); | ||
| 1924 | } | ||
| 1925 | |||
| 1926 | static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) | ||
| 1927 | { | ||
| 1928 | do_sync_rbs(info, ia64_sync_user_rbs); | ||
| 1929 | } | ||
| 1930 | |||
| 1931 | /* | ||
| 1932 | * This is called to write back the register backing store. | ||
| 1933 | * ptrace does this before it stops, so that a tracer reading the user | ||
| 1934 | * memory after the thread stops will get the current register data. | ||
| 1935 | */ | ||
| 1936 | static int | ||
| 1937 | gpregs_writeback(struct task_struct *target, | ||
| 1938 | const struct user_regset *regset, | ||
| 1939 | int now) | ||
| 1940 | { | ||
| 1941 | if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) | ||
| 1942 | return 0; | ||
| 1943 | tsk_set_notify_resume(target); | ||
| 1944 | return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, | ||
| 1945 | NULL, NULL); | ||
| 1946 | } | ||
| 1947 | |||
| 1948 | static int | ||
| 1949 | fpregs_active(struct task_struct *target, const struct user_regset *regset) | ||
| 1950 | { | ||
| 1951 | return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; | ||
| 1952 | } | ||
| 1953 | |||
| 1954 | static int fpregs_get(struct task_struct *target, | ||
| 1955 | const struct user_regset *regset, | ||
| 1956 | unsigned int pos, unsigned int count, | ||
| 1957 | void *kbuf, void __user *ubuf) | ||
| 1958 | { | ||
| 1959 | return do_regset_call(do_fpregs_get, target, regset, pos, count, | ||
| 1960 | kbuf, ubuf); | ||
| 1961 | } | ||
| 1962 | |||
| 1963 | static int fpregs_set(struct task_struct *target, | ||
| 1964 | const struct user_regset *regset, | ||
| 1965 | unsigned int pos, unsigned int count, | ||
| 1966 | const void *kbuf, const void __user *ubuf) | ||
| 1967 | { | ||
| 1968 | return do_regset_call(do_fpregs_set, target, regset, pos, count, | ||
| 1969 | kbuf, ubuf); | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | static int | ||
| 1973 | access_uarea(struct task_struct *child, unsigned long addr, | ||
| 1974 | unsigned long *data, int write_access) | ||
| 1975 | { | ||
| 1976 | unsigned int pos = -1; /* an invalid value */ | ||
| 1977 | int ret; | ||
| 1978 | unsigned long *ptr, regnum; | ||
| 1979 | |||
| 1980 | if ((addr & 0x7) != 0) { | ||
| 1981 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | ||
| 1982 | return -1; | ||
| 1983 | } | ||
| 1984 | if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || | ||
| 1985 | (addr >= PT_R7 + 8 && addr < PT_B1) || | ||
| 1986 | (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || | ||
| 1987 | (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { | ||
| 1988 | dprintk("ptrace: rejecting access to register " | ||
| 1989 | "address 0x%lx\n", addr); | ||
| 1990 | return -1; | ||
| 1991 | } | ||
| 1992 | |||
| 1993 | switch (addr) { | ||
| 1994 | case PT_F32 ... (PT_F127 + 15): | ||
| 1995 | pos = addr - PT_F32 + ELF_FP_OFFSET(32); | ||
| 1996 | break; | ||
| 1997 | case PT_F2 ... (PT_F5 + 15): | ||
| 1998 | pos = addr - PT_F2 + ELF_FP_OFFSET(2); | ||
| 1999 | break; | ||
| 2000 | case PT_F10 ... (PT_F31 + 15): | ||
| 2001 | pos = addr - PT_F10 + ELF_FP_OFFSET(10); | ||
| 2002 | break; | ||
| 2003 | case PT_F6 ... (PT_F9 + 15): | ||
| 2004 | pos = addr - PT_F6 + ELF_FP_OFFSET(6); | ||
| 2005 | break; | ||
| 2006 | } | ||
| 2007 | |||
| 2008 | if (pos != -1) { | ||
| 2009 | if (write_access) | ||
| 2010 | ret = fpregs_set(child, NULL, pos, | ||
| 2011 | sizeof(unsigned long), data, NULL); | ||
| 2012 | else | ||
| 2013 | ret = fpregs_get(child, NULL, pos, | ||
| 2014 | sizeof(unsigned long), data, NULL); | ||
| 2015 | if (ret != 0) | ||
| 2016 | return -1; | ||
| 2017 | return 0; | ||
| 2018 | } | ||
| 2019 | |||
| 2020 | switch (addr) { | ||
| 2021 | case PT_NAT_BITS: | ||
| 2022 | pos = ELF_NAT_OFFSET; | ||
| 2023 | break; | ||
| 2024 | case PT_R4 ... PT_R7: | ||
| 2025 | pos = addr - PT_R4 + ELF_GR_OFFSET(4); | ||
| 2026 | break; | ||
| 2027 | case PT_B1 ... PT_B5: | ||
| 2028 | pos = addr - PT_B1 + ELF_BR_OFFSET(1); | ||
| 2029 | break; | ||
| 2030 | case PT_AR_EC: | ||
| 2031 | pos = ELF_AR_EC_OFFSET; | ||
| 2032 | break; | ||
| 2033 | case PT_AR_LC: | ||
| 2034 | pos = ELF_AR_LC_OFFSET; | ||
| 2035 | break; | ||
| 2036 | case PT_CR_IPSR: | ||
| 2037 | pos = ELF_CR_IPSR_OFFSET; | ||
| 2038 | break; | ||
| 2039 | case PT_CR_IIP: | ||
| 2040 | pos = ELF_CR_IIP_OFFSET; | ||
| 2041 | break; | ||
| 2042 | case PT_CFM: | ||
| 2043 | pos = ELF_CFM_OFFSET; | ||
| 2044 | break; | ||
| 2045 | case PT_AR_UNAT: | ||
| 2046 | pos = ELF_AR_UNAT_OFFSET; | ||
| 2047 | break; | ||
| 2048 | case PT_AR_PFS: | ||
| 2049 | pos = ELF_AR_PFS_OFFSET; | ||
| 2050 | break; | ||
| 2051 | case PT_AR_RSC: | ||
| 2052 | pos = ELF_AR_RSC_OFFSET; | ||
| 2053 | break; | ||
| 2054 | case PT_AR_RNAT: | ||
| 2055 | pos = ELF_AR_RNAT_OFFSET; | ||
| 2056 | break; | ||
| 2057 | case PT_AR_BSPSTORE: | ||
| 2058 | pos = ELF_AR_BSPSTORE_OFFSET; | ||
| 2059 | break; | ||
| 2060 | case PT_PR: | ||
| 2061 | pos = ELF_PR_OFFSET; | ||
| 2062 | break; | ||
| 2063 | case PT_B6: | ||
| 2064 | pos = ELF_BR_OFFSET(6); | ||
| 2065 | break; | ||
| 2066 | case PT_AR_BSP: | ||
| 2067 | pos = ELF_AR_BSP_OFFSET; | ||
| 2068 | break; | ||
| 2069 | case PT_R1 ... PT_R3: | ||
| 2070 | pos = addr - PT_R1 + ELF_GR_OFFSET(1); | ||
| 2071 | break; | ||
| 2072 | case PT_R12 ... PT_R15: | ||
| 2073 | pos = addr - PT_R12 + ELF_GR_OFFSET(12); | ||
| 2074 | break; | ||
| 2075 | case PT_R8 ... PT_R11: | ||
| 2076 | pos = addr - PT_R8 + ELF_GR_OFFSET(8); | ||
| 2077 | break; | ||
| 2078 | case PT_R16 ... PT_R31: | ||
| 2079 | pos = addr - PT_R16 + ELF_GR_OFFSET(16); | ||
| 2080 | break; | ||
| 2081 | case PT_AR_CCV: | ||
| 2082 | pos = ELF_AR_CCV_OFFSET; | ||
| 2083 | break; | ||
| 2084 | case PT_AR_FPSR: | ||
| 2085 | pos = ELF_AR_FPSR_OFFSET; | ||
| 2086 | break; | ||
| 2087 | case PT_B0: | ||
| 2088 | pos = ELF_BR_OFFSET(0); | ||
| 2089 | break; | ||
| 2090 | case PT_B7: | ||
| 2091 | pos = ELF_BR_OFFSET(7); | ||
| 2092 | break; | ||
| 2093 | case PT_AR_CSD: | ||
| 2094 | pos = ELF_AR_CSD_OFFSET; | ||
| 2095 | break; | ||
| 2096 | case PT_AR_SSD: | ||
| 2097 | pos = ELF_AR_SSD_OFFSET; | ||
| 2098 | break; | ||
| 2099 | } | ||
| 2100 | |||
| 2101 | if (pos != -1) { | ||
| 2102 | if (write_access) | ||
| 2103 | ret = gpregs_set(child, NULL, pos, | ||
| 2104 | sizeof(unsigned long), data, NULL); | ||
| 2105 | else | ||
| 2106 | ret = gpregs_get(child, NULL, pos, | ||
| 2107 | sizeof(unsigned long), data, NULL); | ||
| 2108 | if (ret != 0) | ||
| 2109 | return -1; | ||
| 2110 | return 0; | ||
| 2111 | } | ||
| 2112 | |||
| 2113 | /* access debug registers */ | ||
| 2114 | if (addr >= PT_IBR) { | ||
| 2115 | regnum = (addr - PT_IBR) >> 3; | ||
| 2116 | ptr = &child->thread.ibr[0]; | ||
| 2117 | } else { | ||
| 2118 | regnum = (addr - PT_DBR) >> 3; | ||
| 2119 | ptr = &child->thread.dbr[0]; | ||
| 2120 | } | ||
| 2121 | |||
| 2122 | if (regnum >= 8) { | ||
| 2123 | dprintk("ptrace: rejecting access to register " | ||
| 2124 | "address 0x%lx\n", addr); | ||
| 2125 | return -1; | ||
| 2126 | } | ||
| 2127 | #ifdef CONFIG_PERFMON | ||
| 2128 | /* | ||
| 2129 | * Check if debug registers are used by perfmon. This | ||
| 2130 | * test must be done once we know that we can do the | ||
| 2131 | * operation, i.e. the arguments are all valid, but | ||
| 2132 | * before we start modifying the state. | ||
| 2133 | * | ||
| 2134 | * Perfmon needs to keep a count of how many processes | ||
| 2135 | * are trying to modify the debug registers for system | ||
| 2136 | * wide monitoring sessions. | ||
| 2137 | * | ||
| 2138 | * We also include read access here, because they may | ||
| 2139 | * cause the PMU-installed debug register state | ||
| 2140 | * (dbr[], ibr[]) to be reset. The two arrays are also | ||
| 2141 | * used by perfmon, but we do not use | ||
| 2142 | * IA64_THREAD_DBG_VALID. The registers are restored | ||
| 2143 | * by the PMU context switch code. | ||
| 2144 | */ | ||
| 2145 | if (pfm_use_debug_registers(child)) | ||
| 2146 | return -1; | ||
| 2147 | #endif | ||
| 2148 | |||
| 2149 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | ||
| 2150 | child->thread.flags |= IA64_THREAD_DBG_VALID; | ||
| 2151 | memset(child->thread.dbr, 0, | ||
| 2152 | sizeof(child->thread.dbr)); | ||
| 2153 | memset(child->thread.ibr, 0, | ||
| 2154 | sizeof(child->thread.ibr)); | ||
| 2155 | } | ||
| 2156 | |||
| 2157 | ptr += regnum; | ||
| 2158 | |||
| 2159 | if ((regnum & 1) && write_access) { | ||
| 2160 | /* don't let the user set kernel-level breakpoints: */ | ||
| 2161 | *ptr = *data & ~(7UL << 56); | ||
| 2162 | return 0; | ||
| 2163 | } | ||
| 2164 | if (write_access) | ||
| 2165 | *ptr = *data; | ||
| 2166 | else | ||
| 2167 | *data = *ptr; | ||
| 2168 | return 0; | ||
| 2169 | } | ||
| 2170 | |||
| 2171 | static const struct user_regset native_regsets[] = { | ||
| 2172 | { | ||
| 2173 | .core_note_type = NT_PRSTATUS, | ||
| 2174 | .n = ELF_NGREG, | ||
| 2175 | .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), | ||
| 2176 | .get = gpregs_get, .set = gpregs_set, | ||
| 2177 | .writeback = gpregs_writeback | ||
| 2178 | }, | ||
| 2179 | { | ||
| 2180 | .core_note_type = NT_PRFPREG, | ||
| 2181 | .n = ELF_NFPREG, | ||
| 2182 | .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), | ||
| 2183 | .get = fpregs_get, .set = fpregs_set, .active = fpregs_active | ||
| 2184 | }, | ||
| 2185 | }; | ||
| 2186 | |||
| 2187 | static const struct user_regset_view user_ia64_view = { | ||
| 2188 | .name = "ia64", | ||
| 2189 | .e_machine = EM_IA_64, | ||
| 2190 | .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) | ||
| 2191 | }; | ||
| 2192 | |||
| 2193 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | ||
| 2194 | { | ||
| 2195 | #ifdef CONFIG_IA32_SUPPORT | ||
| 2196 | extern const struct user_regset_view user_ia32_view; | ||
| 2197 | if (IS_IA32_PROCESS(task_pt_regs(tsk))) | ||
| 2198 | return &user_ia32_view; | ||
| 2199 | #endif | ||
| 2200 | return &user_ia64_view; | ||
| 2201 | } | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c3..5015ca1275ca 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #include <asm/setup.h> | 59 | #include <asm/setup.h> |
| 60 | #include <asm/smp.h> | 60 | #include <asm/smp.h> |
| 61 | #include <asm/system.h> | 61 | #include <asm/system.h> |
| 62 | #include <asm/tlbflush.h> | ||
| 62 | #include <asm/unistd.h> | 63 | #include <asm/unistd.h> |
| 63 | #include <asm/hpsim.h> | 64 | #include <asm/hpsim.h> |
| 64 | 65 | ||
| @@ -176,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) | |||
| 176 | return 0; | 177 | return 0; |
| 177 | } | 178 | } |
| 178 | 179 | ||
| 180 | /* | ||
| 181 | * Similar to "filter_rsvd_memory()", but the reserved memory ranges | ||
| 182 | * are not filtered out. | ||
| 183 | */ | ||
| 184 | int __init | ||
| 185 | filter_memory(unsigned long start, unsigned long end, void *arg) | ||
| 186 | { | ||
| 187 | void (*func)(unsigned long, unsigned long, int); | ||
| 188 | |||
| 189 | #if IGNORE_PFN0 | ||
| 190 | if (start == PAGE_OFFSET) { | ||
| 191 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | ||
| 192 | start += PAGE_SIZE; | ||
| 193 | if (start >= end) | ||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | #endif | ||
| 197 | func = arg; | ||
| 198 | if (start < end) | ||
| 199 | call_pernode_memory(__pa(start), end - start, func); | ||
| 200 | return 0; | ||
| 201 | } | ||
| 202 | |||
| 179 | static void __init | 203 | static void __init |
| 180 | sort_regions (struct rsvd_region *rsvd_region, int max) | 204 | sort_regions (struct rsvd_region *rsvd_region, int max) |
| 181 | { | 205 | { |
| @@ -493,6 +517,8 @@ setup_arch (char **cmdline_p) | |||
| 493 | acpi_table_init(); | 517 | acpi_table_init(); |
| 494 | # ifdef CONFIG_ACPI_NUMA | 518 | # ifdef CONFIG_ACPI_NUMA |
| 495 | acpi_numa_init(); | 519 | acpi_numa_init(); |
| 520 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | ||
| 521 | 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); | ||
| 496 | # endif | 522 | # endif |
| 497 | #else | 523 | #else |
| 498 | # ifdef CONFIG_SMP | 524 | # ifdef CONFIG_SMP |
| @@ -946,9 +972,10 @@ cpu_init (void) | |||
| 946 | #endif | 972 | #endif |
| 947 | 973 | ||
| 948 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ | 974 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ |
| 949 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) | 975 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) { |
| 950 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; | 976 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; |
| 951 | else { | 977 | setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); |
| 978 | } else { | ||
| 952 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); | 979 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); |
| 953 | max_ctx = (1U << 15) - 1; /* use architected minimum */ | 980 | max_ctx = (1U << 15) - 1; /* use architected minimum */ |
| 954 | } | 981 | } |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 4e446aa5f4ac..9a9d4c489330 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -213,6 +213,19 @@ send_IPI_allbutself (int op) | |||
| 213 | * Called with preemption disabled. | 213 | * Called with preemption disabled. |
| 214 | */ | 214 | */ |
| 215 | static inline void | 215 | static inline void |
| 216 | send_IPI_mask(cpumask_t mask, int op) | ||
| 217 | { | ||
| 218 | unsigned int cpu; | ||
| 219 | |||
| 220 | for_each_cpu_mask(cpu, mask) { | ||
| 221 | send_IPI_single(cpu, op); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | /* | ||
| 226 | * Called with preemption disabled. | ||
| 227 | */ | ||
| 228 | static inline void | ||
| 216 | send_IPI_all (int op) | 229 | send_IPI_all (int op) |
| 217 | { | 230 | { |
| 218 | int i; | 231 | int i; |
| @@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int | |||
| 401 | } | 414 | } |
| 402 | EXPORT_SYMBOL(smp_call_function_single); | 415 | EXPORT_SYMBOL(smp_call_function_single); |
| 403 | 416 | ||
| 417 | /** | ||
| 418 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
| 419 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
| 420 | * <func> The function to run. This must be fast and non-blocking. | ||
| 421 | * <info> An arbitrary pointer to pass to the function. | ||
| 422 | * <wait> If true, wait (atomically) until function | ||
| 423 | * has completed on other CPUs. | ||
| 424 | * | ||
| 425 | * Returns 0 on success, else a negative status code. | ||
| 426 | * | ||
| 427 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 428 | * it returns just before the target cpu calls @func. | ||
| 429 | * | ||
| 430 | * You must not call this function with disabled interrupts or from a | ||
| 431 | * hardware interrupt handler or from a bottom half handler. | ||
| 432 | */ | ||
| 433 | int smp_call_function_mask(cpumask_t mask, | ||
| 434 | void (*func)(void *), void *info, | ||
| 435 | int wait) | ||
| 436 | { | ||
| 437 | struct call_data_struct data; | ||
| 438 | cpumask_t allbutself; | ||
| 439 | int cpus; | ||
| 440 | |||
| 441 | spin_lock(&call_lock); | ||
| 442 | allbutself = cpu_online_map; | ||
| 443 | cpu_clear(smp_processor_id(), allbutself); | ||
| 444 | |||
| 445 | cpus_and(mask, mask, allbutself); | ||
| 446 | cpus = cpus_weight(mask); | ||
| 447 | if (!cpus) { | ||
| 448 | spin_unlock(&call_lock); | ||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | /* Can deadlock when called with interrupts disabled */ | ||
| 453 | WARN_ON(irqs_disabled()); | ||
| 454 | |||
| 455 | data.func = func; | ||
| 456 | data.info = info; | ||
| 457 | atomic_set(&data.started, 0); | ||
| 458 | data.wait = wait; | ||
| 459 | if (wait) | ||
| 460 | atomic_set(&data.finished, 0); | ||
| 461 | |||
| 462 | call_data = &data; | ||
| 463 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
| 464 | |||
| 465 | /* Send a message to other CPUs */ | ||
| 466 | if (cpus_equal(mask, allbutself)) | ||
| 467 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 468 | else | ||
| 469 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
| 470 | |||
| 471 | /* Wait for response */ | ||
| 472 | while (atomic_read(&data.started) != cpus) | ||
| 473 | cpu_relax(); | ||
| 474 | |||
| 475 | if (wait) | ||
| 476 | while (atomic_read(&data.finished) != cpus) | ||
| 477 | cpu_relax(); | ||
| 478 | call_data = NULL; | ||
| 479 | |||
| 480 | spin_unlock(&call_lock); | ||
| 481 | return 0; | ||
| 482 | |||
| 483 | } | ||
| 484 | EXPORT_SYMBOL(smp_call_function_mask); | ||
| 485 | |||
| 404 | /* | 486 | /* |
| 405 | * this function sends a 'generic call function' IPI to all other CPUs | 487 | * this function sends a 'generic call function' IPI to all other CPUs |
| 406 | * in the system. | 488 | * in the system. |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 32ee5979a042..16483be18c0b 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
| @@ -400,9 +400,9 @@ smp_callin (void) | |||
| 400 | /* Setup the per cpu irq handling data structures */ | 400 | /* Setup the per cpu irq handling data structures */ |
| 401 | __setup_vector_irq(cpuid); | 401 | __setup_vector_irq(cpuid); |
| 402 | cpu_set(cpuid, cpu_online_map); | 402 | cpu_set(cpuid, cpu_online_map); |
| 403 | unlock_ipi_calllock(); | ||
| 404 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
| 405 | spin_unlock(&vector_lock); | 404 | spin_unlock(&vector_lock); |
| 405 | unlock_ipi_calllock(); | ||
| 406 | 406 | ||
| 407 | smp_setup_percpu_timer(); | 407 | smp_setup_percpu_timer(); |
| 408 | 408 | ||
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 17fda5293c67..48e15a51782f 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
| @@ -59,6 +59,84 @@ static struct clocksource clocksource_itc = { | |||
| 59 | }; | 59 | }; |
| 60 | static struct clocksource *itc_clocksource; | 60 | static struct clocksource *itc_clocksource; |
| 61 | 61 | ||
| 62 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 63 | |||
| 64 | #include <linux/kernel_stat.h> | ||
| 65 | |||
| 66 | extern cputime_t cycle_to_cputime(u64 cyc); | ||
| 67 | |||
| 68 | /* | ||
| 69 | * Called from the context switch with interrupts disabled, to charge all | ||
| 70 | * accumulated times to the current process, and to prepare accounting on | ||
| 71 | * the next process. | ||
| 72 | */ | ||
| 73 | void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) | ||
| 74 | { | ||
| 75 | struct thread_info *pi = task_thread_info(prev); | ||
| 76 | struct thread_info *ni = task_thread_info(next); | ||
| 77 | cputime_t delta_stime, delta_utime; | ||
| 78 | __u64 now; | ||
| 79 | |||
| 80 | now = ia64_get_itc(); | ||
| 81 | |||
| 82 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); | ||
| 83 | account_system_time(prev, 0, delta_stime); | ||
| 84 | account_system_time_scaled(prev, delta_stime); | ||
| 85 | |||
| 86 | if (pi->ac_utime) { | ||
| 87 | delta_utime = cycle_to_cputime(pi->ac_utime); | ||
| 88 | account_user_time(prev, delta_utime); | ||
| 89 | account_user_time_scaled(prev, delta_utime); | ||
| 90 | } | ||
| 91 | |||
| 92 | pi->ac_stamp = ni->ac_stamp = now; | ||
| 93 | ni->ac_stime = ni->ac_utime = 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Account time for a transition between system, hard irq or soft irq state. | ||
| 98 | * Note that this function is called with interrupts enabled. | ||
| 99 | */ | ||
| 100 | void account_system_vtime(struct task_struct *tsk) | ||
| 101 | { | ||
| 102 | struct thread_info *ti = task_thread_info(tsk); | ||
| 103 | unsigned long flags; | ||
| 104 | cputime_t delta_stime; | ||
| 105 | __u64 now; | ||
| 106 | |||
| 107 | local_irq_save(flags); | ||
| 108 | |||
| 109 | now = ia64_get_itc(); | ||
| 110 | |||
| 111 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); | ||
| 112 | account_system_time(tsk, 0, delta_stime); | ||
| 113 | account_system_time_scaled(tsk, delta_stime); | ||
| 114 | ti->ac_stime = 0; | ||
| 115 | |||
| 116 | ti->ac_stamp = now; | ||
| 117 | |||
| 118 | local_irq_restore(flags); | ||
| 119 | } | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Called from the timer interrupt handler to charge accumulated user time | ||
| 123 | * to the current process. Must be called with interrupts disabled. | ||
| 124 | */ | ||
| 125 | void account_process_tick(struct task_struct *p, int user_tick) | ||
| 126 | { | ||
| 127 | struct thread_info *ti = task_thread_info(p); | ||
| 128 | cputime_t delta_utime; | ||
| 129 | |||
| 130 | if (ti->ac_utime) { | ||
| 131 | delta_utime = cycle_to_cputime(ti->ac_utime); | ||
| 132 | account_user_time(p, delta_utime); | ||
| 133 | account_user_time_scaled(p, delta_utime); | ||
| 134 | ti->ac_utime = 0; | ||
| 135 | } | ||
| 136 | } | ||
| 137 | |||
| 138 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
| 139 | |||
| 62 | static irqreturn_t | 140 | static irqreturn_t |
| 63 | timer_interrupt (int irq, void *dev_id) | 141 | timer_interrupt (int irq, void *dev_id) |
| 64 | { | 142 | { |
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6903361d11a5..ff0e7c10faa7 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. | 13 | * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. |
| 14 | * 2001/01/17 Add support emulation of unaligned kernel accesses. | 14 | * 2001/01/17 Add support emulation of unaligned kernel accesses. |
| 15 | */ | 15 | */ |
| 16 | #include <linux/jiffies.h> | ||
| 16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 17 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
| 18 | #include <linux/tty.h> | 19 | #include <linux/tty.h> |
| @@ -1290,7 +1291,7 @@ within_logging_rate_limit (void) | |||
| 1290 | { | 1291 | { |
| 1291 | static unsigned long count, last_time; | 1292 | static unsigned long count, last_time; |
| 1292 | 1293 | ||
| 1293 | if (jiffies - last_time > 5*HZ) | 1294 | if (time_after(jiffies, last_time + 5 * HZ)) |
| 1294 | count = 0; | 1295 | count = 0; |
| 1295 | if (count < 5) { | 1296 | if (count < 5) { |
| 1296 | last_time = jiffies; | 1297 | last_time = jiffies; |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 344f64eca7a9..798bf9835a51 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
| @@ -45,8 +45,6 @@ void show_mem(void) | |||
| 45 | 45 | ||
| 46 | printk(KERN_INFO "Mem-info:\n"); | 46 | printk(KERN_INFO "Mem-info:\n"); |
| 47 | show_free_areas(); | 47 | show_free_areas(); |
| 48 | printk(KERN_INFO "Free swap: %6ldkB\n", | ||
| 49 | nr_swap_pages<<(PAGE_SHIFT-10)); | ||
| 50 | printk(KERN_INFO "Node memory in pages:\n"); | 48 | printk(KERN_INFO "Node memory in pages:\n"); |
| 51 | for_each_online_pgdat(pgdat) { | 49 | for_each_online_pgdat(pgdat) { |
| 52 | unsigned long present; | 50 | unsigned long present; |
| @@ -255,7 +253,7 @@ paging_init (void) | |||
| 255 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 253 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 256 | 254 | ||
| 257 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 255 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 258 | efi_memmap_walk(register_active_ranges, NULL); | 256 | efi_memmap_walk(filter_memory, register_active_ranges); |
| 259 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 257 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
| 260 | if (max_gap < LARGE_GAP) { | 258 | if (max_gap < LARGE_GAP) { |
| 261 | vmem_map = (struct page *) 0; | 259 | vmem_map = (struct page *) 0; |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index ee5e68b2af94..544dc420c65e 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
| @@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node) | |||
| 104 | { | 104 | { |
| 105 | int cpu, n = 0; | 105 | int cpu, n = 0; |
| 106 | 106 | ||
| 107 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 107 | for_each_possible_early_cpu(cpu) |
| 108 | if (node == node_cpuid[cpu].nid) | 108 | if (node == node_cpuid[cpu].nid) |
| 109 | n++; | 109 | n++; |
| 110 | 110 | ||
| @@ -124,6 +124,7 @@ static unsigned long __meminit compute_pernodesize(int node) | |||
| 124 | pernodesize += node * L1_CACHE_BYTES; | 124 | pernodesize += node * L1_CACHE_BYTES; |
| 125 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 125 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
| 126 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | 126 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); |
| 127 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 127 | pernodesize = PAGE_ALIGN(pernodesize); | 128 | pernodesize = PAGE_ALIGN(pernodesize); |
| 128 | return pernodesize; | 129 | return pernodesize; |
| 129 | } | 130 | } |
| @@ -142,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) | |||
| 142 | #ifdef CONFIG_SMP | 143 | #ifdef CONFIG_SMP |
| 143 | int cpu; | 144 | int cpu; |
| 144 | 145 | ||
| 145 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 146 | for_each_possible_early_cpu(cpu) { |
| 146 | if (node == node_cpuid[cpu].nid) { | 147 | if (node == node_cpuid[cpu].nid) { |
| 147 | memcpy(__va(cpu_data), __phys_per_cpu_start, | 148 | memcpy(__va(cpu_data), __phys_per_cpu_start, |
| 148 | __per_cpu_end - __per_cpu_start); | 149 | __per_cpu_end - __per_cpu_start); |
| @@ -345,7 +346,7 @@ static void __init initialize_pernode_data(void) | |||
| 345 | 346 | ||
| 346 | #ifdef CONFIG_SMP | 347 | #ifdef CONFIG_SMP |
| 347 | /* Set the node_data pointer for each per-cpu struct */ | 348 | /* Set the node_data pointer for each per-cpu struct */ |
| 348 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 349 | for_each_possible_early_cpu(cpu) { |
| 349 | node = node_cpuid[cpu].nid; | 350 | node = node_cpuid[cpu].nid; |
| 350 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 351 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; |
| 351 | } | 352 | } |
| @@ -444,7 +445,7 @@ void __init find_memory(void) | |||
| 444 | mem_data[node].min_pfn = ~0UL; | 445 | mem_data[node].min_pfn = ~0UL; |
| 445 | } | 446 | } |
| 446 | 447 | ||
| 447 | efi_memmap_walk(register_active_ranges, NULL); | 448 | efi_memmap_walk(filter_memory, register_active_ranges); |
| 448 | 449 | ||
| 449 | /* | 450 | /* |
| 450 | * Initialize the boot memory maps in reverse order since that's | 451 | * Initialize the boot memory maps in reverse order since that's |
| @@ -493,13 +494,9 @@ void __cpuinit *per_cpu_init(void) | |||
| 493 | int cpu; | 494 | int cpu; |
| 494 | static int first_time = 1; | 495 | static int first_time = 1; |
| 495 | 496 | ||
| 496 | |||
| 497 | if (smp_processor_id() != 0) | ||
| 498 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | ||
| 499 | |||
| 500 | if (first_time) { | 497 | if (first_time) { |
| 501 | first_time = 0; | 498 | first_time = 0; |
| 502 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 499 | for_each_possible_early_cpu(cpu) |
| 503 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | 500 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; |
| 504 | } | 501 | } |
| 505 | 502 | ||
| @@ -522,8 +519,6 @@ void show_mem(void) | |||
| 522 | 519 | ||
| 523 | printk(KERN_INFO "Mem-info:\n"); | 520 | printk(KERN_INFO "Mem-info:\n"); |
| 524 | show_free_areas(); | 521 | show_free_areas(); |
| 525 | printk(KERN_INFO "Free swap: %6ldkB\n", | ||
| 526 | nr_swap_pages<<(PAGE_SHIFT-10)); | ||
| 527 | printk(KERN_INFO "Node memory in pages:\n"); | 522 | printk(KERN_INFO "Node memory in pages:\n"); |
| 528 | for_each_online_pgdat(pgdat) { | 523 | for_each_online_pgdat(pgdat) { |
| 529 | unsigned long present; | 524 | unsigned long present; |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index a4ca657c72c6..5c1de53c8c1c 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
| @@ -58,7 +58,6 @@ __ia64_sync_icache_dcache (pte_t pte) | |||
| 58 | { | 58 | { |
| 59 | unsigned long addr; | 59 | unsigned long addr; |
| 60 | struct page *page; | 60 | struct page *page; |
| 61 | unsigned long order; | ||
| 62 | 61 | ||
| 63 | page = pte_page(pte); | 62 | page = pte_page(pte); |
| 64 | addr = (unsigned long) page_address(page); | 63 | addr = (unsigned long) page_address(page); |
| @@ -66,12 +65,7 @@ __ia64_sync_icache_dcache (pte_t pte) | |||
| 66 | if (test_bit(PG_arch_1, &page->flags)) | 65 | if (test_bit(PG_arch_1, &page->flags)) |
| 67 | return; /* i-cache is already coherent with d-cache */ | 66 | return; /* i-cache is already coherent with d-cache */ |
| 68 | 67 | ||
| 69 | if (PageCompound(page)) { | 68 | flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); |
| 70 | order = compound_order(page); | ||
| 71 | flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); | ||
| 72 | } | ||
| 73 | else | ||
| 74 | flush_icache_range(addr, addr + PAGE_SIZE); | ||
| 75 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ | 69 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
| 76 | } | 70 | } |
| 77 | 71 | ||
| @@ -553,12 +547,10 @@ find_largest_hole (u64 start, u64 end, void *arg) | |||
| 553 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | 547 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
| 554 | 548 | ||
| 555 | int __init | 549 | int __init |
| 556 | register_active_ranges(u64 start, u64 end, void *arg) | 550 | register_active_ranges(u64 start, u64 len, int nid) |
| 557 | { | 551 | { |
| 558 | int nid = paddr_to_nid(__pa(start)); | 552 | u64 end = start + len; |
| 559 | 553 | ||
| 560 | if (nid < 0) | ||
| 561 | nid = 0; | ||
| 562 | #ifdef CONFIG_KEXEC | 554 | #ifdef CONFIG_KEXEC |
| 563 | if (start > crashk_res.start && start < crashk_res.end) | 555 | if (start > crashk_res.start && start < crashk_res.end) |
| 564 | start = crashk_res.end; | 556 | start = crashk_res.end; |
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 7807fc5c0422..b73bf1838e57 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c | |||
| @@ -27,7 +27,9 @@ | |||
| 27 | */ | 27 | */ |
| 28 | int num_node_memblks; | 28 | int num_node_memblks; |
| 29 | struct node_memblk_s node_memblk[NR_NODE_MEMBLKS]; | 29 | struct node_memblk_s node_memblk[NR_NODE_MEMBLKS]; |
| 30 | struct node_cpuid_s node_cpuid[NR_CPUS]; | 30 | struct node_cpuid_s node_cpuid[NR_CPUS] = |
| 31 | { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } }; | ||
| 32 | |||
| 31 | /* | 33 | /* |
| 32 | * This is a matrix with "distances" between nodes, they should be | 34 | * This is a matrix with "distances" between nodes, they should be |
| 33 | * proportional to the memory access latency ratios. | 35 | * proportional to the memory access latency ratios. |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 655da240d13c..d52ec4e83409 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
| @@ -11,6 +11,9 @@ | |||
| 11 | * Rohit Seth <rohit.seth@intel.com> | 11 | * Rohit Seth <rohit.seth@intel.com> |
| 12 | * Ken Chen <kenneth.w.chen@intel.com> | 12 | * Ken Chen <kenneth.w.chen@intel.com> |
| 13 | * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation | 13 | * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation |
| 14 | * Copyright (C) 2007 Intel Corp | ||
| 15 | * Fenghua Yu <fenghua.yu@intel.com> | ||
| 16 | * Add multiple ptc.g/ptc.ga instruction support in global tlb purge. | ||
| 14 | */ | 17 | */ |
| 15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 16 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| @@ -26,6 +29,9 @@ | |||
| 26 | #include <asm/pal.h> | 29 | #include <asm/pal.h> |
| 27 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
| 28 | #include <asm/dma.h> | 31 | #include <asm/dma.h> |
| 32 | #include <asm/processor.h> | ||
| 33 | #include <asm/sal.h> | ||
| 34 | #include <asm/tlb.h> | ||
| 29 | 35 | ||
| 30 | static struct { | 36 | static struct { |
| 31 | unsigned long mask; /* mask of supported purge page-sizes */ | 37 | unsigned long mask; /* mask of supported purge page-sizes */ |
| @@ -39,6 +45,10 @@ struct ia64_ctx ia64_ctx = { | |||
| 39 | }; | 45 | }; |
| 40 | 46 | ||
| 41 | DEFINE_PER_CPU(u8, ia64_need_tlb_flush); | 47 | DEFINE_PER_CPU(u8, ia64_need_tlb_flush); |
| 48 | DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ | ||
| 49 | DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ | ||
| 50 | |||
| 51 | struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; | ||
| 42 | 52 | ||
| 43 | /* | 53 | /* |
| 44 | * Initializes the ia64_ctx.bitmap array based on max_ctx+1. | 54 | * Initializes the ia64_ctx.bitmap array based on max_ctx+1. |
| @@ -84,14 +94,140 @@ wrap_mmu_context (struct mm_struct *mm) | |||
| 84 | local_flush_tlb_all(); | 94 | local_flush_tlb_all(); |
| 85 | } | 95 | } |
| 86 | 96 | ||
| 97 | /* | ||
| 98 | * Implement "spinaphores" ... like counting semaphores, but they | ||
| 99 | * spin instead of sleeping. If there are ever any other users for | ||
| 100 | * this primitive it can be moved up to a spinaphore.h header. | ||
| 101 | */ | ||
| 102 | struct spinaphore { | ||
| 103 | atomic_t cur; | ||
| 104 | }; | ||
| 105 | |||
| 106 | static inline void spinaphore_init(struct spinaphore *ss, int val) | ||
| 107 | { | ||
| 108 | atomic_set(&ss->cur, val); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void down_spin(struct spinaphore *ss) | ||
| 112 | { | ||
| 113 | while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) | ||
| 114 | while (atomic_read(&ss->cur) == 0) | ||
| 115 | cpu_relax(); | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline void up_spin(struct spinaphore *ss) | ||
| 119 | { | ||
| 120 | atomic_add(1, &ss->cur); | ||
| 121 | } | ||
| 122 | |||
| 123 | static struct spinaphore ptcg_sem; | ||
| 124 | static u16 nptcg = 1; | ||
| 125 | static int need_ptcg_sem = 1; | ||
| 126 | static int toolatetochangeptcgsem = 0; | ||
| 127 | |||
| 128 | /* | ||
| 129 | * Kernel parameter "nptcg=" overrides max number of concurrent global TLB | ||
| 130 | * purges which is reported from either PAL or SAL PALO. | ||
| 131 | * | ||
| 132 | * We don't have sanity checking for nptcg value. It's the user's responsibility | ||
| 133 | * for valid nptcg value on the platform. Otherwise, kernel may hang in some | ||
| 134 | * cases. | ||
| 135 | */ | ||
| 136 | static int __init | ||
| 137 | set_nptcg(char *str) | ||
| 138 | { | ||
| 139 | int value = 0; | ||
| 140 | |||
| 141 | get_option(&str, &value); | ||
| 142 | setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER); | ||
| 143 | |||
| 144 | return 1; | ||
| 145 | } | ||
| 146 | |||
| 147 | __setup("nptcg=", set_nptcg); | ||
| 148 | |||
| 149 | /* | ||
| 150 | * Maximum number of simultaneous ptc.g purges in the system can | ||
| 151 | * be defined by PAL_VM_SUMMARY (in which case we should take | ||
| 152 | * the smallest value for any cpu in the system) or by the PAL | ||
| 153 | * override table (in which case we should ignore the value from | ||
| 154 | * PAL_VM_SUMMARY). | ||
| 155 | * | ||
| 156 | * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g | ||
| 157 | * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case, | ||
| 158 | * we should ignore the value from either PAL_VM_SUMMARY or PAL override table. | ||
| 159 | * | ||
| 160 | * Complicating the logic here is the fact that num_possible_cpus() | ||
| 161 | * isn't fully setup until we start bringing cpus online. | ||
| 162 | */ | ||
| 163 | void | ||
| 164 | setup_ptcg_sem(int max_purges, int nptcg_from) | ||
| 165 | { | ||
| 166 | static int kp_override; | ||
| 167 | static int palo_override; | ||
| 168 | static int firstcpu = 1; | ||
| 169 | |||
| 170 | if (toolatetochangeptcgsem) { | ||
| 171 | BUG_ON(max_purges < nptcg); | ||
| 172 | return; | ||
| 173 | } | ||
| 174 | |||
| 175 | if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) { | ||
| 176 | kp_override = 1; | ||
| 177 | nptcg = max_purges; | ||
| 178 | goto resetsema; | ||
| 179 | } | ||
| 180 | if (kp_override) { | ||
| 181 | need_ptcg_sem = num_possible_cpus() > nptcg; | ||
| 182 | return; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (nptcg_from == NPTCG_FROM_PALO) { | ||
| 186 | palo_override = 1; | ||
| 187 | |||
| 188 | /* In PALO max_purges == 0 really means it! */ | ||
| 189 | if (max_purges == 0) | ||
| 190 | panic("Whoa! Platform does not support global TLB purges.\n"); | ||
| 191 | nptcg = max_purges; | ||
| 192 | if (nptcg == PALO_MAX_TLB_PURGES) { | ||
| 193 | need_ptcg_sem = 0; | ||
| 194 | return; | ||
| 195 | } | ||
| 196 | goto resetsema; | ||
| 197 | } | ||
| 198 | if (palo_override) { | ||
| 199 | if (nptcg != PALO_MAX_TLB_PURGES) | ||
| 200 | need_ptcg_sem = (num_possible_cpus() > nptcg); | ||
| 201 | return; | ||
| 202 | } | ||
| 203 | |||
| 204 | /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */ | ||
| 205 | if (max_purges == 0) max_purges = 1; | ||
| 206 | |||
| 207 | if (firstcpu) { | ||
| 208 | nptcg = max_purges; | ||
| 209 | firstcpu = 0; | ||
| 210 | } | ||
| 211 | if (max_purges < nptcg) | ||
| 212 | nptcg = max_purges; | ||
| 213 | if (nptcg == PAL_MAX_PURGES) { | ||
| 214 | need_ptcg_sem = 0; | ||
| 215 | return; | ||
| 216 | } else | ||
| 217 | need_ptcg_sem = (num_possible_cpus() > nptcg); | ||
| 218 | |||
| 219 | resetsema: | ||
| 220 | spinaphore_init(&ptcg_sem, max_purges); | ||
| 221 | } | ||
| 222 | |||
| 87 | void | 223 | void |
| 88 | ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, | 224 | ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, |
| 89 | unsigned long end, unsigned long nbits) | 225 | unsigned long end, unsigned long nbits) |
| 90 | { | 226 | { |
| 91 | static DEFINE_SPINLOCK(ptcg_lock); | ||
| 92 | |||
| 93 | struct mm_struct *active_mm = current->active_mm; | 227 | struct mm_struct *active_mm = current->active_mm; |
| 94 | 228 | ||
| 229 | toolatetochangeptcgsem = 1; | ||
| 230 | |||
| 95 | if (mm != active_mm) { | 231 | if (mm != active_mm) { |
| 96 | /* Restore region IDs for mm */ | 232 | /* Restore region IDs for mm */ |
| 97 | if (mm && active_mm) { | 233 | if (mm && active_mm) { |
| @@ -102,19 +238,20 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, | |||
| 102 | } | 238 | } |
| 103 | } | 239 | } |
| 104 | 240 | ||
| 105 | /* HW requires global serialization of ptc.ga. */ | 241 | if (need_ptcg_sem) |
| 106 | spin_lock(&ptcg_lock); | 242 | down_spin(&ptcg_sem); |
| 107 | { | 243 | |
| 108 | do { | 244 | do { |
| 109 | /* | 245 | /* |
| 110 | * Flush ALAT entries also. | 246 | * Flush ALAT entries also. |
| 111 | */ | 247 | */ |
| 112 | ia64_ptcga(start, (nbits<<2)); | 248 | ia64_ptcga(start, (nbits << 2)); |
| 113 | ia64_srlz_i(); | 249 | ia64_srlz_i(); |
| 114 | start += (1UL << nbits); | 250 | start += (1UL << nbits); |
| 115 | } while (start < end); | 251 | } while (start < end); |
| 116 | } | 252 | |
| 117 | spin_unlock(&ptcg_lock); | 253 | if (need_ptcg_sem) |
| 254 | up_spin(&ptcg_sem); | ||
| 118 | 255 | ||
| 119 | if (mm != active_mm) { | 256 | if (mm != active_mm) { |
| 120 | activate_context(active_mm); | 257 | activate_context(active_mm); |
| @@ -190,6 +327,9 @@ ia64_tlb_init (void) | |||
| 190 | ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ | 327 | ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ |
| 191 | unsigned long tr_pgbits; | 328 | unsigned long tr_pgbits; |
| 192 | long status; | 329 | long status; |
| 330 | pal_vm_info_1_u_t vm_info_1; | ||
| 331 | pal_vm_info_2_u_t vm_info_2; | ||
| 332 | int cpu = smp_processor_id(); | ||
| 193 | 333 | ||
| 194 | if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { | 334 | if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { |
| 195 | printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; " | 335 | printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; " |
| @@ -206,4 +346,191 @@ ia64_tlb_init (void) | |||
| 206 | local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; | 346 | local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; |
| 207 | 347 | ||
| 208 | local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ | 348 | local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ |
| 349 | status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2); | ||
| 350 | |||
| 351 | if (status) { | ||
| 352 | printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); | ||
| 353 | per_cpu(ia64_tr_num, cpu) = 8; | ||
| 354 | return; | ||
| 355 | } | ||
| 356 | per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; | ||
| 357 | if (per_cpu(ia64_tr_num, cpu) > | ||
| 358 | (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1)) | ||
| 359 | per_cpu(ia64_tr_num, cpu) = | ||
| 360 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; | ||
| 361 | if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { | ||
| 362 | per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; | ||
| 363 | printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!" | ||
| 364 | "IA64_TR_ALLOC_MAX should be extended\n"); | ||
| 365 | } | ||
| 366 | } | ||
| 367 | |||
| 368 | /* | ||
| 369 | * is_tr_overlap | ||
| 370 | * | ||
| 371 | * Check overlap with inserted TRs. | ||
| 372 | */ | ||
| 373 | static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size) | ||
| 374 | { | ||
| 375 | u64 tr_log_size; | ||
| 376 | u64 tr_end; | ||
| 377 | u64 va_rr = ia64_get_rr(va); | ||
| 378 | u64 va_rid = RR_TO_RID(va_rr); | ||
| 379 | u64 va_end = va + (1<<log_size) - 1; | ||
| 380 | |||
| 381 | if (va_rid != RR_TO_RID(p->rr)) | ||
| 382 | return 0; | ||
| 383 | tr_log_size = (p->itir & 0xff) >> 2; | ||
| 384 | tr_end = p->ifa + (1<<tr_log_size) - 1; | ||
| 385 | |||
| 386 | if (va > tr_end || p->ifa > va_end) | ||
| 387 | return 0; | ||
| 388 | return 1; | ||
| 389 | |||
| 390 | } | ||
| 391 | |||
| 392 | /* | ||
| 393 | * ia64_insert_tr in virtual mode. Allocate a TR slot | ||
| 394 | * | ||
| 395 | * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr | ||
| 396 | * | ||
| 397 | * va : virtual address. | ||
| 398 | * pte : pte entries inserted. | ||
| 399 | * log_size: range to be covered. | ||
| 400 | * | ||
| 401 | * Return value: <0 : error No. | ||
| 402 | * | ||
| 403 | * >=0 : slot number allocated for TR. | ||
| 404 | * Must be called with preemption disabled. | ||
| 405 | */ | ||
| 406 | int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) | ||
| 407 | { | ||
| 408 | int i, r; | ||
| 409 | unsigned long psr; | ||
| 410 | struct ia64_tr_entry *p; | ||
| 411 | int cpu = smp_processor_id(); | ||
| 412 | |||
| 413 | r = -EINVAL; | ||
| 414 | /*Check overlap with existing TR entries*/ | ||
| 415 | if (target_mask & 0x1) { | ||
| 416 | p = &__per_cpu_idtrs[cpu][0][0]; | ||
| 417 | for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); | ||
| 418 | i++, p++) { | ||
| 419 | if (p->pte & 0x1) | ||
| 420 | if (is_tr_overlap(p, va, log_size)) { | ||
| 421 | printk(KERN_DEBUG "Overlapped Entry" | ||
| 422 | "Inserted for TR Reigster!!\n"); | ||
| 423 | goto out; | ||
| 424 | } | ||
| 425 | } | ||
| 426 | } | ||
| 427 | if (target_mask & 0x2) { | ||
| 428 | p = &__per_cpu_idtrs[cpu][1][0]; | ||
| 429 | for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); | ||
| 430 | i++, p++) { | ||
| 431 | if (p->pte & 0x1) | ||
| 432 | if (is_tr_overlap(p, va, log_size)) { | ||
| 433 | printk(KERN_DEBUG "Overlapped Entry" | ||
| 434 | "Inserted for TR Reigster!!\n"); | ||
| 435 | goto out; | ||
| 436 | } | ||
| 437 | } | ||
| 438 | } | ||
| 439 | |||
| 440 | for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { | ||
| 441 | switch (target_mask & 0x3) { | ||
| 442 | case 1: | ||
| 443 | if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) | ||
| 444 | goto found; | ||
| 445 | continue; | ||
| 446 | case 2: | ||
| 447 | if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | ||
| 448 | goto found; | ||
| 449 | continue; | ||
| 450 | case 3: | ||
| 451 | if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && | ||
| 452 | !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | ||
| 453 | goto found; | ||
| 454 | continue; | ||
| 455 | default: | ||
| 456 | r = -EINVAL; | ||
| 457 | goto out; | ||
| 458 | } | ||
| 459 | } | ||
| 460 | found: | ||
| 461 | if (i >= per_cpu(ia64_tr_num, cpu)) | ||
| 462 | return -EBUSY; | ||
| 463 | |||
| 464 | /*Record tr info for mca hander use!*/ | ||
| 465 | if (i > per_cpu(ia64_tr_used, cpu)) | ||
| 466 | per_cpu(ia64_tr_used, cpu) = i; | ||
| 467 | |||
| 468 | psr = ia64_clear_ic(); | ||
| 469 | if (target_mask & 0x1) { | ||
| 470 | ia64_itr(0x1, i, va, pte, log_size); | ||
| 471 | ia64_srlz_i(); | ||
| 472 | p = &__per_cpu_idtrs[cpu][0][i]; | ||
| 473 | p->ifa = va; | ||
| 474 | p->pte = pte; | ||
| 475 | p->itir = log_size << 2; | ||
| 476 | p->rr = ia64_get_rr(va); | ||
| 477 | } | ||
| 478 | if (target_mask & 0x2) { | ||
| 479 | ia64_itr(0x2, i, va, pte, log_size); | ||
| 480 | ia64_srlz_i(); | ||
| 481 | p = &__per_cpu_idtrs[cpu][1][i]; | ||
| 482 | p->ifa = va; | ||
| 483 | p->pte = pte; | ||
| 484 | p->itir = log_size << 2; | ||
| 485 | p->rr = ia64_get_rr(va); | ||
| 486 | } | ||
| 487 | ia64_set_psr(psr); | ||
| 488 | r = i; | ||
| 489 | out: | ||
| 490 | return r; | ||
| 491 | } | ||
| 492 | EXPORT_SYMBOL_GPL(ia64_itr_entry); | ||
| 493 | |||
| 494 | /* | ||
| 495 | * ia64_purge_tr | ||
| 496 | * | ||
| 497 | * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr. | ||
| 498 | * slot: slot number to be freed. | ||
| 499 | * | ||
| 500 | * Must be called with preemption disabled. | ||
| 501 | */ | ||
| 502 | void ia64_ptr_entry(u64 target_mask, int slot) | ||
| 503 | { | ||
| 504 | int cpu = smp_processor_id(); | ||
| 505 | int i; | ||
| 506 | struct ia64_tr_entry *p; | ||
| 507 | |||
| 508 | if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) | ||
| 509 | return; | ||
| 510 | |||
| 511 | if (target_mask & 0x1) { | ||
| 512 | p = &__per_cpu_idtrs[cpu][0][slot]; | ||
| 513 | if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { | ||
| 514 | p->pte = 0; | ||
| 515 | ia64_ptr(0x1, p->ifa, p->itir>>2); | ||
| 516 | ia64_srlz_i(); | ||
| 517 | } | ||
| 518 | } | ||
| 519 | |||
| 520 | if (target_mask & 0x2) { | ||
| 521 | p = &__per_cpu_idtrs[cpu][1][slot]; | ||
| 522 | if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { | ||
| 523 | p->pte = 0; | ||
| 524 | ia64_ptr(0x2, p->ifa, p->itir>>2); | ||
| 525 | ia64_srlz_i(); | ||
| 526 | } | ||
| 527 | } | ||
| 528 | |||
| 529 | for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { | ||
| 530 | if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || | ||
| 531 | (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | ||
| 532 | break; | ||
| 533 | } | ||
| 534 | per_cpu(ia64_tr_used, cpu) = i; | ||
| 209 | } | 535 | } |
| 536 | EXPORT_SYMBOL_GPL(ia64_ptr_entry); | ||
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 81785b78bc1e..9e0b164da9c2 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
| @@ -199,7 +199,7 @@ xpc_timeout_partition_disengage_request(unsigned long data) | |||
| 199 | struct xpc_partition *part = (struct xpc_partition *) data; | 199 | struct xpc_partition *part = (struct xpc_partition *) data; |
| 200 | 200 | ||
| 201 | 201 | ||
| 202 | DBUG_ON(jiffies < part->disengage_request_timeout); | 202 | DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); |
| 203 | 203 | ||
| 204 | (void) xpc_partition_disengaged(part); | 204 | (void) xpc_partition_disengaged(part); |
| 205 | 205 | ||
| @@ -230,7 +230,7 @@ xpc_hb_beater(unsigned long dummy) | |||
| 230 | { | 230 | { |
| 231 | xpc_vars->heartbeat++; | 231 | xpc_vars->heartbeat++; |
| 232 | 232 | ||
| 233 | if (jiffies >= xpc_hb_check_timeout) { | 233 | if (time_after_eq(jiffies, xpc_hb_check_timeout)) { |
| 234 | wake_up_interruptible(&xpc_act_IRQ_wq); | 234 | wake_up_interruptible(&xpc_act_IRQ_wq); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| @@ -270,7 +270,7 @@ xpc_hb_checker(void *ignore) | |||
| 270 | 270 | ||
| 271 | 271 | ||
| 272 | /* checking of remote heartbeats is skewed by IRQ handling */ | 272 | /* checking of remote heartbeats is skewed by IRQ handling */ |
| 273 | if (jiffies >= xpc_hb_check_timeout) { | 273 | if (time_after_eq(jiffies, xpc_hb_check_timeout)) { |
| 274 | dev_dbg(xpc_part, "checking remote heartbeats\n"); | 274 | dev_dbg(xpc_part, "checking remote heartbeats\n"); |
| 275 | xpc_check_remote_hb(); | 275 | xpc_check_remote_hb(); |
| 276 | 276 | ||
| @@ -305,7 +305,7 @@ xpc_hb_checker(void *ignore) | |||
| 305 | /* wait for IRQ or timeout */ | 305 | /* wait for IRQ or timeout */ |
| 306 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | 306 | (void) wait_event_interruptible(xpc_act_IRQ_wq, |
| 307 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | 307 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || |
| 308 | jiffies >= xpc_hb_check_timeout || | 308 | time_after_eq(jiffies, xpc_hb_check_timeout) || |
| 309 | (volatile int) xpc_exiting)); | 309 | (volatile int) xpc_exiting)); |
| 310 | } | 310 | } |
| 311 | 311 | ||
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 7ba403232cb8..9e97c2684832 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c | |||
| @@ -877,7 +877,7 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
| 877 | disengaged = (xpc_partition_engaged(1UL << partid) == 0); | 877 | disengaged = (xpc_partition_engaged(1UL << partid) == 0); |
| 878 | if (part->disengage_request_timeout) { | 878 | if (part->disengage_request_timeout) { |
| 879 | if (!disengaged) { | 879 | if (!disengaged) { |
| 880 | if (jiffies < part->disengage_request_timeout) { | 880 | if (time_before(jiffies, part->disengage_request_timeout)) { |
| 881 | /* timelimit hasn't been reached yet */ | 881 | /* timelimit hasn't been reached yet */ |
| 882 | return 0; | 882 | return 0; |
| 883 | } | 883 | } |
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index cd1cc39b5599..fcfad326f4c7 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
| 36 | #include <linux/numa.h> | 36 | #include <linux/numa.h> |
| 37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
| 38 | #include <asm/numa.h> | ||
| 38 | 39 | ||
| 39 | #define COMPILER_DEPENDENT_INT64 long | 40 | #define COMPILER_DEPENDENT_INT64 long |
| 40 | #define COMPILER_DEPENDENT_UINT64 unsigned long | 41 | #define COMPILER_DEPENDENT_UINT64 unsigned long |
| @@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu); | |||
| 115 | extern void set_cpei_target_cpu(unsigned int cpu); | 116 | extern void set_cpei_target_cpu(unsigned int cpu); |
| 116 | extern unsigned int get_cpei_target_cpu(void); | 117 | extern unsigned int get_cpei_target_cpu(void); |
| 117 | extern void prefill_possible_map(void); | 118 | extern void prefill_possible_map(void); |
| 119 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | ||
| 118 | extern int additional_cpus; | 120 | extern int additional_cpus; |
| 121 | #else | ||
| 122 | #define additional_cpus 0 | ||
| 123 | #endif | ||
| 119 | 124 | ||
| 120 | #ifdef CONFIG_ACPI_NUMA | 125 | #ifdef CONFIG_ACPI_NUMA |
| 121 | #if MAX_NUMNODES > 256 | 126 | #if MAX_NUMNODES > 256 |
| @@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; | |||
| 129 | 134 | ||
| 130 | #define acpi_unlazy_tlb(x) | 135 | #define acpi_unlazy_tlb(x) |
| 131 | 136 | ||
| 137 | #ifdef CONFIG_ACPI_NUMA | ||
| 138 | extern cpumask_t early_cpu_possible_map; | ||
| 139 | #define for_each_possible_early_cpu(cpu) \ | ||
| 140 | for_each_cpu_mask((cpu), early_cpu_possible_map) | ||
| 141 | |||
| 142 | static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) | ||
| 143 | { | ||
| 144 | int low_cpu, high_cpu; | ||
| 145 | int cpu; | ||
| 146 | int next_nid = 0; | ||
| 147 | |||
| 148 | low_cpu = cpus_weight(early_cpu_possible_map); | ||
| 149 | |||
| 150 | high_cpu = max(low_cpu, min_cpus); | ||
| 151 | high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); | ||
| 152 | |||
| 153 | for (cpu = low_cpu; cpu < high_cpu; cpu++) { | ||
| 154 | cpu_set(cpu, early_cpu_possible_map); | ||
| 155 | if (node_cpuid[cpu].nid == NUMA_NO_NODE) { | ||
| 156 | node_cpuid[cpu].nid = next_nid; | ||
| 157 | next_nid++; | ||
| 158 | if (next_nid >= num_online_nodes()) | ||
| 159 | next_nid = 0; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | } | ||
| 163 | #endif /* CONFIG_ACPI_NUMA */ | ||
| 164 | |||
| 132 | #endif /*__KERNEL__*/ | 165 | #endif /*__KERNEL__*/ |
| 133 | 166 | ||
| 134 | #endif /*_ASM_ACPI_H*/ | 167 | #endif /*_ASM_ACPI_H*/ |
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h index 72400a78002a..f9abdec6577a 100644 --- a/include/asm-ia64/cputime.h +++ b/include/asm-ia64/cputime.h | |||
| @@ -1,6 +1,110 @@ | |||
| 1 | /* | ||
| 2 | * include/asm-ia64/cputime.h: | ||
| 3 | * Definitions for measuring cputime on ia64 machines. | ||
| 4 | * | ||
| 5 | * Based on <asm-powerpc/cputime.h>. | ||
| 6 | * | ||
| 7 | * Copyright (C) 2007 FUJITSU LIMITED | ||
| 8 | * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License | ||
| 12 | * as published by the Free Software Foundation; either version | ||
| 13 | * 2 of the License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. | ||
| 16 | * Otherwise we measure cpu time in jiffies using the generic definitions. | ||
| 17 | */ | ||
| 18 | |||
| 1 | #ifndef __IA64_CPUTIME_H | 19 | #ifndef __IA64_CPUTIME_H |
| 2 | #define __IA64_CPUTIME_H | 20 | #define __IA64_CPUTIME_H |
| 3 | 21 | ||
| 22 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 4 | #include <asm-generic/cputime.h> | 23 | #include <asm-generic/cputime.h> |
| 24 | #else | ||
| 25 | |||
| 26 | #include <linux/time.h> | ||
| 27 | #include <linux/jiffies.h> | ||
| 28 | #include <asm/processor.h> | ||
| 29 | |||
| 30 | typedef u64 cputime_t; | ||
| 31 | typedef u64 cputime64_t; | ||
| 32 | |||
| 33 | #define cputime_zero ((cputime_t)0) | ||
| 34 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | ||
| 35 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
| 36 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
| 37 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
| 38 | #define cputime_halve(__a) ((__a) >> 1) | ||
| 39 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
| 40 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
| 41 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
| 42 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
| 43 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
| 44 | |||
| 45 | #define cputime64_zero ((cputime64_t)0) | ||
| 46 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
| 47 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
| 48 | #define cputime_to_cputime64(__ct) (__ct) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Convert cputime <-> jiffies (HZ) | ||
| 52 | */ | ||
| 53 | #define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | ||
| 54 | #define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | ||
| 55 | #define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | ||
| 56 | #define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Convert cputime <-> milliseconds | ||
| 60 | */ | ||
| 61 | #define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC) | ||
| 62 | #define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC) | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Convert cputime <-> seconds | ||
| 66 | */ | ||
| 67 | #define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) | ||
| 68 | #define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) | ||
| 69 | |||
| 70 | /* | ||
| 71 | * Convert cputime <-> timespec (nsec) | ||
| 72 | */ | ||
| 73 | static inline cputime_t timespec_to_cputime(const struct timespec *val) | ||
| 74 | { | ||
| 75 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | ||
| 76 | return (ret + val->tv_nsec); | ||
| 77 | } | ||
| 78 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | ||
| 79 | { | ||
| 80 | val->tv_sec = ct / NSEC_PER_SEC; | ||
| 81 | val->tv_nsec = ct % NSEC_PER_SEC; | ||
| 82 | } | ||
| 83 | |||
| 84 | /* | ||
| 85 | * Convert cputime <-> timeval (msec) | ||
| 86 | */ | ||
| 87 | static inline cputime_t timeval_to_cputime(struct timeval *val) | ||
| 88 | { | ||
| 89 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | ||
| 90 | return (ret + val->tv_usec * NSEC_PER_USEC); | ||
| 91 | } | ||
| 92 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) | ||
| 93 | { | ||
| 94 | val->tv_sec = ct / NSEC_PER_SEC; | ||
| 95 | val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; | ||
| 96 | } | ||
| 97 | |||
| 98 | /* | ||
| 99 | * Convert cputime <-> clock (USER_HZ) | ||
| 100 | */ | ||
| 101 | #define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) | ||
| 102 | #define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Convert cputime64 to clock. | ||
| 106 | */ | ||
| 107 | #define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) | ||
| 5 | 108 | ||
| 109 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
| 6 | #endif /* __IA64_CPUTIME_H */ | 110 | #endif /* __IA64_CPUTIME_H */ |
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h index f8e83eca67a2..5e0c1a6bce8d 100644 --- a/include/asm-ia64/elf.h +++ b/include/asm-ia64/elf.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #define ELF_ARCH EM_IA_64 | 26 | #define ELF_ARCH EM_IA_64 |
| 27 | 27 | ||
| 28 | #define USE_ELF_CORE_DUMP | 28 | #define USE_ELF_CORE_DUMP |
| 29 | #define CORE_DUMP_USE_REGSET | ||
| 29 | 30 | ||
| 30 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are | 31 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are |
| 31 | interpreted as follows by Linux: */ | 32 | interpreted as follows by Linux: */ |
| @@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void); | |||
| 154 | #define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ | 155 | #define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ |
| 155 | #define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ | 156 | #define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ |
| 156 | 157 | ||
| 158 | /* elf_gregset_t register offsets */ | ||
| 159 | #define ELF_GR_0_OFFSET 0 | ||
| 160 | #define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t)) | ||
| 161 | #define ELF_PR_OFFSET (33 * sizeof(elf_greg_t)) | ||
| 162 | #define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t)) | ||
| 163 | #define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t)) | ||
| 164 | #define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t)) | ||
| 165 | #define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t)) | ||
| 166 | #define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t)) | ||
| 167 | #define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t)) | ||
| 168 | #define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t)) | ||
| 169 | #define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t)) | ||
| 170 | #define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t)) | ||
| 171 | #define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t)) | ||
| 172 | #define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t)) | ||
| 173 | #define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t)) | ||
| 174 | #define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t)) | ||
| 175 | #define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t)) | ||
| 176 | #define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t)) | ||
| 177 | #define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t)) | ||
| 178 | #define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t)) | ||
| 179 | #define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t)) | ||
| 180 | #define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t)) | ||
| 181 | |||
| 157 | typedef unsigned long elf_fpxregset_t; | 182 | typedef unsigned long elf_fpxregset_t; |
| 158 | 183 | ||
| 159 | typedef unsigned long elf_greg_t; | 184 | typedef unsigned long elf_greg_t; |
| @@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); | |||
| 183 | 208 | ||
| 184 | struct task_struct; | 209 | struct task_struct; |
| 185 | 210 | ||
| 186 | extern int dump_task_regs(struct task_struct *, elf_gregset_t *); | ||
| 187 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | ||
| 188 | |||
| 189 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs) | ||
| 190 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | ||
| 191 | |||
| 192 | #define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) | 211 | #define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) |
| 193 | 212 | ||
| 194 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ | 213 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ |
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index d03bf9ff68e3..ef71b57fc2f4 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
| @@ -30,8 +30,12 @@ | |||
| 30 | #include <asm/break.h> | 30 | #include <asm/break.h> |
| 31 | 31 | ||
| 32 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 32 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
| 33 | #define MAX_INSN_SIZE 1 | 33 | #define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */ |
| 34 | #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) | 34 | #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) |
| 35 | #define NOP_M_INST (long)(1<<27) | ||
| 36 | #define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \ | ||
| 37 | (0x1L << 12) | /* many */ \ | ||
| 38 | (((i1) & 1) << 36) | ((i2) << 13))) /* imm */ | ||
| 35 | 39 | ||
| 36 | typedef union cmp_inst { | 40 | typedef union cmp_inst { |
| 37 | struct { | 41 | struct { |
| @@ -112,6 +116,7 @@ struct arch_specific_insn { | |||
| 112 | #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 | 116 | #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 |
| 113 | #define INST_FLAG_FIX_BRANCH_REG 2 | 117 | #define INST_FLAG_FIX_BRANCH_REG 2 |
| 114 | #define INST_FLAG_BREAK_INST 4 | 118 | #define INST_FLAG_BREAK_INST 4 |
| 119 | #define INST_FLAG_BOOSTABLE 8 | ||
| 115 | unsigned long inst_flag; | 120 | unsigned long inst_flag; |
| 116 | unsigned short target_br_reg; | 121 | unsigned short target_br_reg; |
| 117 | unsigned short slot; | 122 | unsigned short slot; |
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h index 7e55a584975c..aefcdfee7f23 100644 --- a/include/asm-ia64/kregs.h +++ b/include/asm-ia64/kregs.h | |||
| @@ -31,6 +31,9 @@ | |||
| 31 | #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ | 31 | #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ |
| 32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ | 32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ |
| 33 | 33 | ||
| 34 | #define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ | ||
| 35 | #define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ | ||
| 36 | |||
| 34 | /* Processor status register bits: */ | 37 | /* Processor status register bits: */ |
| 35 | #define IA64_PSR_BE_BIT 1 | 38 | #define IA64_PSR_BE_BIT 1 |
| 36 | #define IA64_PSR_UP_BIT 2 | 39 | #define IA64_PSR_UP_BIT 2 |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index f93308f54b61..7245a5781594 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
| @@ -35,6 +35,7 @@ extern void find_memory (void); | |||
| 35 | extern void reserve_memory (void); | 35 | extern void reserve_memory (void); |
| 36 | extern void find_initrd (void); | 36 | extern void find_initrd (void); |
| 37 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); | 37 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); |
| 38 | extern int filter_memory (unsigned long start, unsigned long end, void *arg); | ||
| 38 | extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); | 39 | extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); |
| 39 | extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); | 40 | extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); |
| 40 | 41 | ||
| @@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); | |||
| 56 | 57 | ||
| 57 | #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ | 58 | #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ |
| 58 | 59 | ||
| 59 | extern int register_active_ranges(u64 start, u64 end, void *arg); | 60 | extern int register_active_ranges(u64 start, u64 len, int nid); |
| 60 | 61 | ||
| 61 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 62 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 62 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ | 63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ |
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 6a8a27cfae3e..3499ff57bf42 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | 22 | ||
| 23 | #include <asm/mmzone.h> | 23 | #include <asm/mmzone.h> |
| 24 | 24 | ||
| 25 | #define NUMA_NO_NODE -1 | ||
| 26 | |||
| 25 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | 27 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; |
| 26 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | 28 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; |
| 27 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; | 29 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; |
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 8a695d3407d2..67b02901ead4 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | * Copyright (C) 1999 VA Linux Systems | 13 | * Copyright (C) 1999 VA Linux Systems |
| 14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | 14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
| 15 | * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> | 15 | * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> |
| 16 | * Copyright (C) 2008 Silicon Graphics, Inc. (SGI) | ||
| 16 | * | 17 | * |
| 17 | * 99/10/01 davidm Make sure we pass zero for reserved parameters. | 18 | * 99/10/01 davidm Make sure we pass zero for reserved parameters. |
| 18 | * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. | 19 | * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. |
| @@ -73,6 +74,8 @@ | |||
| 73 | #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ | 74 | #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ |
| 74 | #define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ | 75 | #define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ |
| 75 | #define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ | 76 | #define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ |
| 77 | #define PAL_VP_INFO 50 /* Information about virtual processor features */ | ||
| 78 | #define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */ | ||
| 76 | 79 | ||
| 77 | #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ | 80 | #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ |
| 78 | #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ | 81 | #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ |
| @@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s { | |||
| 504 | wiv : 1, /* Way field valid */ | 507 | wiv : 1, /* Way field valid */ |
| 505 | reserved2 : 1, | 508 | reserved2 : 1, |
| 506 | dp : 1, /* Data poisoned on MBE */ | 509 | dp : 1, /* Data poisoned on MBE */ |
| 507 | reserved3 : 8, | 510 | reserved3 : 6, |
| 511 | hlth : 2, /* Health indicator */ | ||
| 508 | 512 | ||
| 509 | index : 20, /* Cache line index */ | 513 | index : 20, /* Cache line index */ |
| 510 | reserved4 : 2, | 514 | reserved4 : 2, |
| @@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s { | |||
| 542 | dtc : 1, /* Fail in data TC */ | 546 | dtc : 1, /* Fail in data TC */ |
| 543 | itc : 1, /* Fail in inst. TC */ | 547 | itc : 1, /* Fail in inst. TC */ |
| 544 | op : 4, /* Cache operation */ | 548 | op : 4, /* Cache operation */ |
| 545 | reserved3 : 30, | 549 | reserved3 : 6, |
| 550 | hlth : 2, /* Health indicator */ | ||
| 551 | reserved4 : 22, | ||
| 546 | 552 | ||
| 547 | is : 1, /* instruction set (1 == ia32) */ | 553 | is : 1, /* instruction set (1 == ia32) */ |
| 548 | iv : 1, /* instruction set field valid */ | 554 | iv : 1, /* instruction set field valid */ |
| @@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s { | |||
| 633 | way : 6, /* Way of structure */ | 639 | way : 6, /* Way of structure */ |
| 634 | wv : 1, /* way valid */ | 640 | wv : 1, /* way valid */ |
| 635 | xv : 1, /* index valid */ | 641 | xv : 1, /* index valid */ |
| 636 | reserved1 : 8, | 642 | reserved1 : 6, |
| 643 | hlth : 2, /* Health indicator */ | ||
| 637 | index : 8, /* Index or set of the uarch | 644 | index : 8, /* Index or set of the uarch |
| 638 | * structure that failed. | 645 | * structure that failed. |
| 639 | */ | 646 | */ |
| @@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void) | |||
| 1213 | 1220 | ||
| 1214 | /* Return the machine check dynamic processor state */ | 1221 | /* Return the machine check dynamic processor state */ |
| 1215 | static inline s64 | 1222 | static inline s64 |
| 1216 | ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds) | 1223 | ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size) |
| 1217 | { | 1224 | { |
| 1218 | struct ia64_pal_retval iprv; | 1225 | struct ia64_pal_retval iprv; |
| 1219 | PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); | 1226 | PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0); |
| 1220 | if (size) | 1227 | if (size) |
| 1221 | *size = iprv.v0; | 1228 | *size = iprv.v0; |
| 1222 | if (pds) | ||
| 1223 | *pds = iprv.v1; | ||
| 1224 | return iprv.status; | 1229 | return iprv.status; |
| 1225 | } | 1230 | } |
| 1226 | 1231 | ||
| @@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous) | |||
| 1281 | return iprv.status; | 1286 | return iprv.status; |
| 1282 | } | 1287 | } |
| 1283 | 1288 | ||
| 1289 | typedef union pal_hw_tracking_u { | ||
| 1290 | u64 pht_data; | ||
| 1291 | struct { | ||
| 1292 | u64 itc :4, /* Instruction cache tracking */ | ||
| 1293 | dct :4, /* Date cache tracking */ | ||
| 1294 | itt :4, /* Instruction TLB tracking */ | ||
| 1295 | ddt :4, /* Data TLB tracking */ | ||
| 1296 | reserved:48; | ||
| 1297 | } pal_hw_tracking_s; | ||
| 1298 | } pal_hw_tracking_u_t; | ||
| 1299 | |||
| 1300 | /* | ||
| 1301 | * Hardware tracking status. | ||
| 1302 | */ | ||
| 1303 | static inline s64 | ||
| 1304 | ia64_pal_mc_hw_tracking (u64 *status) | ||
| 1305 | { | ||
| 1306 | struct ia64_pal_retval iprv; | ||
| 1307 | PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0); | ||
| 1308 | if (status) | ||
| 1309 | *status = iprv.v0; | ||
| 1310 | return iprv.status; | ||
| 1311 | } | ||
| 1312 | |||
| 1284 | /* Register a platform dependent location with PAL to which it can save | 1313 | /* Register a platform dependent location with PAL to which it can save |
| 1285 | * minimal processor state in the event of a machine check or initialization | 1314 | * minimal processor state in the event of a machine check or initialization |
| 1286 | * event. | 1315 | * event. |
| 1287 | */ | 1316 | */ |
| 1288 | static inline s64 | 1317 | static inline s64 |
| 1289 | ia64_pal_mc_register_mem (u64 physical_addr) | 1318 | ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size) |
| 1290 | { | 1319 | { |
| 1291 | struct ia64_pal_retval iprv; | 1320 | struct ia64_pal_retval iprv; |
| 1292 | PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); | 1321 | PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0); |
| 1322 | if (req_size) | ||
| 1323 | *req_size = iprv.v0; | ||
| 1293 | return iprv.status; | 1324 | return iprv.status; |
| 1294 | } | 1325 | } |
| 1295 | 1326 | ||
| @@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2) | |||
| 1631 | return iprv.status; | 1662 | return iprv.status; |
| 1632 | } | 1663 | } |
| 1633 | 1664 | ||
| 1665 | typedef union pal_vp_info_u { | ||
| 1666 | u64 pvi_val; | ||
| 1667 | struct { | ||
| 1668 | u64 index: 48, /* virtual feature set info */ | ||
| 1669 | vmm_id: 16; /* feature set id */ | ||
| 1670 | } pal_vp_info_s; | ||
| 1671 | } pal_vp_info_u_t; | ||
| 1672 | |||
| 1673 | /* | ||
| 1674 | * Returns infomation about virtual processor features | ||
| 1675 | */ | ||
| 1676 | static inline s64 | ||
| 1677 | ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id) | ||
| 1678 | { | ||
| 1679 | struct ia64_pal_retval iprv; | ||
| 1680 | PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0); | ||
| 1681 | if (vp_info) | ||
| 1682 | *vp_info = iprv.v0; | ||
| 1683 | if (vmm_id) | ||
| 1684 | *vmm_id = iprv.v1; | ||
| 1685 | return iprv.status; | ||
| 1686 | } | ||
| 1687 | |||
| 1634 | typedef union pal_itr_valid_u { | 1688 | typedef union pal_itr_valid_u { |
| 1635 | u64 piv_val; | 1689 | u64 piv_val; |
| 1636 | struct { | 1690 | struct { |
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e6204f14f614..ed70862ea247 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h | |||
| @@ -371,7 +371,7 @@ pgd_index (unsigned long address) | |||
| 371 | /* The offset in the 1-level directory is given by the 3 region bits | 371 | /* The offset in the 1-level directory is given by the 3 region bits |
| 372 | (61..63) and the level-1 bits. */ | 372 | (61..63) and the level-1 bits. */ |
| 373 | static inline pgd_t* | 373 | static inline pgd_t* |
| 374 | pgd_offset (struct mm_struct *mm, unsigned long address) | 374 | pgd_offset (const struct mm_struct *mm, unsigned long address) |
| 375 | { | 375 | { |
| 376 | return mm->pgd + pgd_index(address); | 376 | return mm->pgd + pgd_index(address); |
| 377 | } | 377 | } |
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index f4904db3b057..89594b442f83 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h | |||
| @@ -296,6 +296,9 @@ enum { | |||
| 296 | EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) | 296 | EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) |
| 297 | #define SAL_PLAT_BUS_ERR_SECT_GUID \ | 297 | #define SAL_PLAT_BUS_ERR_SECT_GUID \ |
| 298 | EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) | 298 | EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) |
| 299 | #define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \ | ||
| 300 | EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \ | ||
| 301 | 0xca, 0x4d) | ||
| 299 | 302 | ||
| 300 | #define MAX_CACHE_ERRORS 6 | 303 | #define MAX_CACHE_ERRORS 6 |
| 301 | #define MAX_TLB_ERRORS 6 | 304 | #define MAX_TLB_ERRORS 6 |
| @@ -879,6 +882,24 @@ extern void ia64_jump_to_sal(struct sal_to_os_boot *); | |||
| 879 | 882 | ||
| 880 | extern void ia64_sal_handler_init(void *entry_point, void *gpval); | 883 | extern void ia64_sal_handler_init(void *entry_point, void *gpval); |
| 881 | 884 | ||
| 885 | #define PALO_MAX_TLB_PURGES 0xFFFF | ||
| 886 | #define PALO_SIG "PALO" | ||
| 887 | |||
| 888 | struct palo_table { | ||
| 889 | u8 signature[4]; /* Should be "PALO" */ | ||
| 890 | u32 length; | ||
| 891 | u8 minor_revision; | ||
| 892 | u8 major_revision; | ||
| 893 | u8 checksum; | ||
| 894 | u8 reserved1[5]; | ||
| 895 | u16 max_tlb_purges; | ||
| 896 | u8 reserved2[6]; | ||
| 897 | }; | ||
| 898 | |||
| 899 | #define NPTCG_FROM_PAL 0 | ||
| 900 | #define NPTCG_FROM_PALO 1 | ||
| 901 | #define NPTCG_FROM_KERNEL_PARAMETER 2 | ||
| 902 | |||
| 882 | #endif /* __ASSEMBLY__ */ | 903 | #endif /* __ASSEMBLY__ */ |
| 883 | 904 | ||
| 884 | #endif /* _ASM_IA64_SAL_H */ | 905 | #endif /* _ASM_IA64_SAL_H */ |
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 4fa733dd417a..ec5f355fb7e3 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
| @@ -38,6 +38,9 @@ ia64_get_lid (void) | |||
| 38 | return lid.f.id << 8 | lid.f.eid; | 38 | return lid.f.id << 8 | lid.f.eid; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
| 42 | void *info, int wait); | ||
| 43 | |||
| 41 | #define hard_smp_processor_id() ia64_get_lid() | 44 | #define hard_smp_processor_id() ia64_get_lid() |
| 42 | 45 | ||
| 43 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 595112bca3cc..dff8128fa58e 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
| @@ -210,6 +210,13 @@ struct task_struct; | |||
| 210 | extern void ia64_save_extra (struct task_struct *task); | 210 | extern void ia64_save_extra (struct task_struct *task); |
| 211 | extern void ia64_load_extra (struct task_struct *task); | 211 | extern void ia64_load_extra (struct task_struct *task); |
| 212 | 212 | ||
| 213 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 214 | extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); | ||
| 215 | # define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) | ||
| 216 | #else | ||
| 217 | # define IA64_ACCOUNT_ON_SWITCH(p,n) | ||
| 218 | #endif | ||
| 219 | |||
| 213 | #ifdef CONFIG_PERFMON | 220 | #ifdef CONFIG_PERFMON |
| 214 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | 221 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); |
| 215 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | 222 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) |
| @@ -222,6 +229,7 @@ extern void ia64_load_extra (struct task_struct *task); | |||
| 222 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) | 229 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) |
| 223 | 230 | ||
| 224 | #define __switch_to(prev,next,last) do { \ | 231 | #define __switch_to(prev,next,last) do { \ |
| 232 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | ||
| 225 | if (IA64_HAS_EXTRA_STATE(prev)) \ | 233 | if (IA64_HAS_EXTRA_STATE(prev)) \ |
| 226 | ia64_save_extra(prev); \ | 234 | ia64_save_extra(prev); \ |
| 227 | if (IA64_HAS_EXTRA_STATE(next)) \ | 235 | if (IA64_HAS_EXTRA_STATE(next)) \ |
| @@ -266,6 +274,10 @@ void cpu_idle_wait(void); | |||
| 266 | 274 | ||
| 267 | void default_idle(void); | 275 | void default_idle(void); |
| 268 | 276 | ||
| 277 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 278 | extern void account_system_vtime(struct task_struct *); | ||
| 279 | #endif | ||
| 280 | |||
| 269 | #endif /* __KERNEL__ */ | 281 | #endif /* __KERNEL__ */ |
| 270 | 282 | ||
| 271 | #endif /* __ASSEMBLY__ */ | 283 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 93d83cbe0c8c..6da8069a0f77 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
| @@ -31,6 +31,12 @@ struct thread_info { | |||
| 31 | mm_segment_t addr_limit; /* user-level address space limit */ | 31 | mm_segment_t addr_limit; /* user-level address space limit */ |
| 32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | 32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ |
| 33 | struct restart_block restart_block; | 33 | struct restart_block restart_block; |
| 34 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 35 | __u64 ac_stamp; | ||
| 36 | __u64 ac_leave; | ||
| 37 | __u64 ac_stime; | ||
| 38 | __u64 ac_utime; | ||
| 39 | #endif | ||
| 34 | }; | 40 | }; |
| 35 | 41 | ||
| 36 | #define THREAD_SIZE KERNEL_STACK_SIZE | 42 | #define THREAD_SIZE KERNEL_STACK_SIZE |
| @@ -62,9 +68,17 @@ struct thread_info { | |||
| 62 | #define task_stack_page(tsk) ((void *)(tsk)) | 68 | #define task_stack_page(tsk) ((void *)(tsk)) |
| 63 | 69 | ||
| 64 | #define __HAVE_THREAD_FUNCTIONS | 70 | #define __HAVE_THREAD_FUNCTIONS |
| 71 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 72 | #define setup_thread_stack(p, org) \ | ||
| 73 | *task_thread_info(p) = *task_thread_info(org); \ | ||
| 74 | task_thread_info(p)->ac_stime = 0; \ | ||
| 75 | task_thread_info(p)->ac_utime = 0; \ | ||
| 76 | task_thread_info(p)->task = (p); | ||
| 77 | #else | ||
| 65 | #define setup_thread_stack(p, org) \ | 78 | #define setup_thread_stack(p, org) \ |
| 66 | *task_thread_info(p) = *task_thread_info(org); \ | 79 | *task_thread_info(p) = *task_thread_info(org); \ |
| 67 | task_thread_info(p)->task = (p); | 80 | task_thread_info(p)->task = (p); |
| 81 | #endif | ||
| 68 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) | 82 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) |
| 69 | 83 | ||
| 70 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | 84 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR |
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index 26edcb750f9f..20d8a39680c2 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h | |||
| @@ -64,6 +64,32 @@ struct mmu_gather { | |||
| 64 | struct page *pages[FREE_PTE_NR]; | 64 | struct page *pages[FREE_PTE_NR]; |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | struct ia64_tr_entry { | ||
| 68 | u64 ifa; | ||
| 69 | u64 itir; | ||
| 70 | u64 pte; | ||
| 71 | u64 rr; | ||
| 72 | }; /*Record for tr entry!*/ | ||
| 73 | |||
| 74 | extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); | ||
| 75 | extern void ia64_ptr_entry(u64 target_mask, int slot); | ||
| 76 | |||
| 77 | extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; | ||
| 78 | |||
| 79 | /* | ||
| 80 | region register macros | ||
| 81 | */ | ||
| 82 | #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) | ||
| 83 | #define RR_VE(val) (((val) & 0x0000000000000001) << 0) | ||
| 84 | #define RR_VE_MASK 0x0000000000000001L | ||
| 85 | #define RR_VE_SHIFT 0 | ||
| 86 | #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) | ||
| 87 | #define RR_PS(val) (((val) & 0x000000000000003f) << 2) | ||
| 88 | #define RR_PS_MASK 0x00000000000000fcL | ||
| 89 | #define RR_PS_SHIFT 2 | ||
| 90 | #define RR_RID_MASK 0x00000000ffffff00L | ||
| 91 | #define RR_TO_RID(val) ((val >> 8) & 0xffffff) | ||
| 92 | |||
| 67 | /* Users of the generic TLB shootdown code must declare this storage space. */ | 93 | /* Users of the generic TLB shootdown code must declare this storage space. */ |
| 68 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 94 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 69 | 95 | ||
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index 7774a1cac0cc..3be25dfed164 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | * Now for some TLB flushing routines. This is the kind of stuff that | 17 | * Now for some TLB flushing routines. This is the kind of stuff that |
| 18 | * can be very expensive, so try to avoid them whenever possible. | 18 | * can be very expensive, so try to avoid them whenever possible. |
| 19 | */ | 19 | */ |
| 20 | extern void setup_ptcg_sem(int max_purges, int from_palo); | ||
| 20 | 21 | ||
| 21 | /* | 22 | /* |
| 22 | * Flush everything (kernel mapping may also have changed due to | 23 | * Flush everything (kernel mapping may also have changed due to |
