From 071375bc76ee86e58592f4682030c81d410ddfd9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 26 Jul 2008 14:52:26 +0200 Subject: x86, RDC321x: remove gpio.h complications Remove the include/asm-x86/gpio.h specials, just use the generic version. Signed-off-by: Ingo Molnar --- include/asm-x86/gpio.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/asm-x86') diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h index 116e9147fe66..c4c91b37c104 100644 --- a/include/asm-x86/gpio.h +++ b/include/asm-x86/gpio.h @@ -16,10 +16,6 @@ #ifndef _ASM_I386_GPIO_H #define _ASM_I386_GPIO_H -#ifdef CONFIG_X86_RDC321X -#include -#else /* CONFIG_X86_RDC321X */ - #include #ifdef CONFIG_GPIOLIB @@ -57,6 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) #endif /* CONFIG_GPIOLIB */ -#endif /* CONFIG_X86_RDC321X */ - #endif /* _ASM_I386_GPIO_H */ -- cgit v1.2.2 From 1ca9fda4b2f3413e4bc748b983f5585629ca0560 Mon Sep 17 00:00:00 2001 From: Chris McDermott Date: Thu, 24 Jul 2008 19:06:09 -0700 Subject: x86: fix IBM Summit based systems' phys_cpu_present_map on 32-bit kernels x86 kernels on IBM Summit based systems will only online 1 CPU because the phys_cpu_present_map is not set up correctly. Patch below applied to 2.6.26-git10. Signed-off-by: Chris McDermott Tested-by: Tim Pepper Signed-off-by: Ingo Molnar --- include/asm-x86/mach-summit/mach_apic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-x86') diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 75d2c95005d7..c47e2ab5c5ca 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) static inline physid_mask_t apicid_to_cpu_present(int apicid) { - return physid_mask_of_physid(0); + return physid_mask_of_physid(apicid); } static inline void setup_portio_remap(void) -- cgit v1.2.2 From 8d8bb39b9eba32dd70e87fd5ad5c5dd4ba118e06 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 25 Jul 2008 19:44:49 -0700 Subject: dma-mapping: add the device argument to dma_mapping_error() Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori Cc: Muli Ben-Yehuda Cc: Andi Kleen Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Avi Kivity Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-x86/device.h | 3 ++ include/asm-x86/dma-mapping.h | 99 +++++++++++++++++++++++++++++-------------- include/asm-x86/swiotlb.h | 2 +- 3 files changed, 72 insertions(+), 32 deletions(-) (limited to 'include/asm-x86') diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h index 87a715367a1b..3c034f48fdb0 100644 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h @@ -5,6 +5,9 @@ struct dev_archdata { #ifdef CONFIG_ACPI void *acpi_handle; #endif +#ifdef CONFIG_X86_64 +struct dma_mapping_ops *dma_ops; +#endif #ifdef CONFIG_DMAR void *iommu; /* hook for IOMMU specific extension */ #endif diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c2ddd3d1b883..0eaa9bf6011f 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h @@ -17,7 +17,8 @@ extern int panic_on_overflow; extern int force_iommu; struct dma_mapping_ops { - int (*mapping_error)(dma_addr_t dma_addr); + int (*mapping_error)(struct device *dev, + dma_addr_t dma_addr); void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, @@ -56,14 +57,32 @@ struct dma_mapping_ops { int is_phys; }; -extern const struct dma_mapping_ops *dma_ops; +extern struct dma_mapping_ops *dma_ops; -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) { - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dma_addr); +#ifdef CONFIG_X86_32 + return dma_ops; +#else + if (unlikely(!dev) || !dev->archdata.dma_ops) + return dma_ops; + else + return dev->archdata.dma_ops; +#endif +} + +/* Make sure we keep the same behaviour */ +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ +#ifdef CONFIG_X86_32 + return 0; +#else + struct dma_mapping_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); return (dma_addr == bad_dma_address); +#endif } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) @@ -83,44 +102,53 @@ static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); + return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->unmap_single) - dma_ops->unmap_single(dev, addr, size, direction); + if (ops->unmap_single) + ops->unmap_single(dev, addr, size, direction); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_sg(hwdev, sg, nents, direction); + return ops->map_sg(hwdev, sg, nents, direction); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->unmap_sg) - dma_ops->unmap_sg(hwdev, sg, nents, direction); + if (ops->unmap_sg) + ops->unmap_sg(hwdev, sg, nents, direction); } static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_for_cpu) - dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, - direction); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); flush_write_buffers(); } @@ -128,10 +156,11 @@ static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_for_device) - dma_ops->sync_single_for_device(hwdev, dma_handle, size, - direction); + if (ops->sync_single_for_device) + ops->sync_single_for_device(hwdev, dma_handle, size, direction); flush_write_buffers(); } @@ -139,11 +168,12 @@ static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { - BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_range_for_cpu) - dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, direction); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); + if (ops->sync_single_range_for_cpu) + ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, + size, direction); flush_write_buffers(); } @@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { - BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_range_for_device) - dma_ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, direction); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); + if (ops->sync_single_range_for_device) + ops->sync_single_range_for_device(hwdev, dma_handle, + offset, size, direction); flush_write_buffers(); } @@ -164,9 +195,11 @@ static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_sg_for_cpu) - dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); } @@ -174,9 +207,11 @@ static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_sg_for_device) - dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(hwdev, sg, nelems, direction); flush_write_buffers(); } @@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_single(dev, page_to_phys(page)+offset, - size, direction); + return ops->map_single(dev, page_to_phys(page) + offset, + size, direction); } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index c706a7442633..2730b351afcf 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h @@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); -extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); +extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); extern void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); -- cgit v1.2.2 From 1956a96de488feb05e95c08c9d5e80f63a4be2b1 Mon Sep 17 00:00:00 2001 From: Alexis Bruemmer Date: Fri, 25 Jul 2008 19:44:51 -0700 Subject: x86 calgary: fix handling of devices that aren't behind the Calgary The calgary code can give drivers addresses above 4GB which is very bad for hardware that is only 32bit DMA addressable. With this patch, the calgary code sets the global dma_ops to swiotlb or nommu properly, and the dma_ops of devices behind the Calgary/CalIOC2 to calgary_dma_ops. So the calgary code can handle devices safely that aren't behind the Calgary/CalIOC2. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Alexis Bruemmer Signed-off-by: FUJITA Tomonori Cc: Muli Ben-Yehuda Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-x86/iommu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-x86') diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index d63166fb3ab7..ecc8061904a9 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h @@ -3,6 +3,7 @@ extern void pci_iommu_shutdown(void); extern void no_iommu_init(void); +extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; -- cgit v1.2.2 From 3ab83521378268044a448113c6aa9a9e245f4d2f Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 25 Jul 2008 19:45:07 -0700 Subject: kexec jump This patch provides an enhancement to kexec/kdump. It implements the following features: - Backup/restore memory used by the original kernel before/after kexec. - Save/restore CPU state before/after kexec. The features of this patch can be used as a general method to call program in physical mode (paging turning off). This can be used to call BIOS code under Linux. kexec-tools needs to be patched to support kexec jump. The patches and the precompiled kexec can be download from the following URL: source: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-src_git_kh10.tar.bz2 patches: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-patches_git_kh10.tar.bz2 binary: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec_git_kh10 Usage example of calling some physical mode code and return: 1. Compile and install patched kernel with following options selected: CONFIG_X86_32=y CONFIG_KEXEC=y CONFIG_PM=y CONFIG_KEXEC_JUMP=y 2. Build patched kexec-tool or download the pre-built one. 3. Build some physical mode executable named such as "phy_mode" 4. Boot kernel compiled in step 1. 5. Load physical mode executable with /sbin/kexec. The shell command line can be as follow: /sbin/kexec --load-preserve-context --args-none phy_mode 6. Call physical mode executable with following shell command line: /sbin/kexec -e Implementation point: To support jumping without reserving memory. One shadow backup page (source page) is allocated for each page used by kexeced code image (destination page). When do kexec_load, the image of kexeced code is loaded into source pages, and before executing, the destination pages and the source pages are swapped, so the contents of destination pages are backupped. Before jumping to the kexeced code image and after jumping back to the original kernel, the destination pages and the source pages are swapped too. C ABI (calling convention) is used as communication protocol between kernel and called code. A flag named KEXEC_PRESERVE_CONTEXT for sys_kexec_load is added to indicate that the loaded kernel image is used for jumping back. Now, only the i386 architecture is supported. Signed-off-by: Huang Ying Acked-by: Vivek Goyal Cc: "Eric W. Biederman" Cc: Pavel Machek Cc: Nigel Cunningham Cc: "Rafael J. Wysocki" Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-x86/kexec.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'include/asm-x86') diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 8f855a15f64d..c0e52a14fd4d 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -10,14 +10,15 @@ # define VA_PTE_0 5 # define PA_PTE_1 6 # define VA_PTE_1 7 +# define PA_SWAP_PAGE 8 # ifdef CONFIG_X86_PAE -# define PA_PMD_0 8 -# define VA_PMD_0 9 -# define PA_PMD_1 10 -# define VA_PMD_1 11 -# define PAGES_NR 12 +# define PA_PMD_0 9 +# define VA_PMD_0 10 +# define PA_PMD_1 11 +# define VA_PMD_1 12 +# define PAGES_NR 13 # else -# define PAGES_NR 8 +# define PAGES_NR 9 # endif #else # define PA_CONTROL_PAGE 0 @@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, } #ifdef CONFIG_X86_32 -asmlinkage NORET_TYPE void +asmlinkage unsigned long relocate_kernel(unsigned long indirection_page, unsigned long control_page, unsigned long start_address, - unsigned int has_pae) ATTRIB_NORET; + unsigned int has_pae, + unsigned int preserve_context); #else NORET_TYPE void relocate_kernel(unsigned long indirection_page, -- cgit v1.2.2 From a0a8f5364a5ad248aec6cb705e0092ff563edc2f Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 25 Jul 2008 19:45:20 -0700 Subject: x86: implement pte_special Implement the pte_special bit for x86. This is required to support lockless get_user_pages, because we need to know whether or not we can refcount a particular page given only its pte (and no vma). [hugh@veritas.com: fix a BUG] Signed-off-by: Nick Piggin Cc: Dave Kleikamp Cc: Andy Whitcroft Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andi Kleen Cc: Dave Kleikamp Cc: Badari Pulavarty Cc: Zach Brown Cc: Jens Axboe Reviewed-by: Peter Zijlstra Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-x86/pgtable.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'include/asm-x86') diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 3e5dbc4195f4..04caa2f544df 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -18,6 +18,7 @@ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) @@ -34,6 +35,8 @@ #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) +#define __HAVE_ARCH_PTE_SPECIAL #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) @@ -54,7 +57,7 @@ /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ - _PAGE_ACCESSED | _PAGE_DIRTY) + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_WB (0) @@ -180,7 +183,7 @@ static inline int pte_exec(pte_t pte) static inline int pte_special(pte_t pte) { - return 0; + return pte_val(pte) & _PAGE_SPECIAL; } static inline int pmd_large(pmd_t pte) @@ -246,7 +249,7 @@ static inline pte_t pte_clrglobal(pte_t pte) static inline pte_t pte_mkspecial(pte_t pte) { - return pte; + return __pte(pte_val(pte) | _PAGE_SPECIAL); } extern pteval_t __supported_pte_mask; -- cgit v1.2.2 From 8174c430e445a93016ef18f717fe570214fa38bf Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 25 Jul 2008 19:45:24 -0700 Subject: x86: lockless get_user_pages_fast() Implement get_user_pages_fast without locking in the fastpath on x86. Do an optimistic lockless pagetable walk, without taking mmap_sem or any page table locks or even mmap_sem. Page table existence is guaranteed by turning interrupts off (combined with the fact that we're always looking up the current mm, means we can do the lockless page table walk within the constraints of the TLB shootdown design). Basically we can do this lockless pagetable walk in a similar manner to the way the CPU's pagetable walker does not have to take any locks to find present ptes. This patch (combined with the subsequent ones to convert direct IO to use it) was found to give about 10% performance improvement on a 2 socket 8 core Intel Xeon system running an OLTP workload on DB2 v9.5 "To test the effects of the patch, an OLTP workload was run on an IBM x3850 M2 server with 2 processors (quad-core Intel Xeon processors at 2.93 GHz) using IBM DB2 v9.5 running Linux 2.6.24rc7 kernel. Comparing runs with and without the patch resulted in an overall performance benefit of ~9.8%. Correspondingly, oprofiles showed that samples from __up_read and __down_read routines that is seen during thread contention for system resources was reduced from 2.8% down to .05%. Monitoring the /proc/vmstat output from the patched run showed that the counter for fast_gup contained a very high number while the fast_gup_slow value was zero." (fast_gup is the old name for get_user_pages_fast, fast_gup_slow is a counter we had for the number of times the slowpath was invoked). The main reason for the improvement is that DB2 has multiple threads each issuing direct-IO. Direct-IO uses get_user_pages, and thus the threads contend the mmap_sem cacheline, and can also contend on page table locks. I would anticipate larger performance gains on larger systems, however I think DB2 uses an adaptive mix of threads and processes, so it could be that thread contention remains pretty constant as machine size increases. In which case, we stuck with "only" a 10% gain. The downside of using get_user_pages_fast is that if there is not a pte with the correct permissions for the access, we end up falling back to get_user_pages and so the get_user_pages_fast is a bit of extra work. However this should not be the common case in most performance critical code. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: Kconfig fix] [akpm@linux-foundation.org: Makefile fix/cleanup] [akpm@linux-foundation.org: warning fix] Signed-off-by: Nick Piggin Cc: Dave Kleikamp Cc: Andy Whitcroft Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andi Kleen Cc: Dave Kleikamp Cc: Badari Pulavarty Cc: Zach Brown Cc: Jens Axboe Reviewed-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-x86/uaccess.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-x86') diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index f6fa4d841bbc..5f702d1d5218 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h @@ -451,3 +451,4 @@ extern struct movsl_mask { #endif #endif + -- cgit v1.2.2