aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci12
-rw-r--r--Documentation/feature-removal-schedule.txt2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts2
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig2
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c10
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/clock.c12
-rw-r--r--arch/arm/mach-gemini/irq.c1
-rw-r--r--arch/arm/mach-kirkwood/common.c7
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c1
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c13
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c3
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c2
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c4
-rw-r--r--arch/arm/mach-tegra/pcie.c12
-rw-r--r--arch/arm/mm/dma-mapping.c114
-rw-r--r--arch/mips/pci/pci-octeon.c15
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/eeh.h13
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h9
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h169
-rw-r--r--arch/powerpc/include/asm/mmu.h9
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/page_64.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h9
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h19
-rw-r--r--arch/powerpc/include/asm/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h18
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/sparsemem.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h3
-rw-r--r--arch/powerpc/include/asm/tlbflush.h7
-rw-r--r--arch/powerpc/include/asm/uaccess.h11
-rw-r--r--arch/powerpc/kernel/entry_32.S47
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S127
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/pci-common.c20
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c17
-rw-r--r--arch/powerpc/kvm/trace.h14
-rw-r--r--arch/powerpc/lib/sstep.c36
-rw-r--r--arch/powerpc/mm/hash_low_64.S97
-rw-r--r--arch/powerpc/mm/hash_native_64.c164
-rw-r--r--arch/powerpc/mm/hash_utils_64.c45
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c15
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c12
-rw-r--r--arch/powerpc/mm/slb_low.S62
-rw-r--r--arch/powerpc/mm/slice.c112
-rw-r--r--arch/powerpc/mm/tlb_hash64.c11
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c45
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c733
-rw-r--r--arch/powerpc/platforms/powernv/pci.h21
-rw-r--r--arch/powerpc/platforms/ps3/htab.c22
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c14
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c99
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c102
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c76
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c32
-rw-r--r--arch/powerpc/xmon/xmon.c107
-rw-r--r--arch/tile/kernel/pci.c26
-rw-r--r--arch/um/os-Linux/time.c2
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-em.c4
-rw-r--r--drivers/gpio/gpio-rdc321x.c1
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c10
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c38
-rw-r--r--drivers/input/keyboard/imx_keypad.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/tablet/wacom_wac.c6
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c2
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/mmc/card/block.c26
-rw-r--r--drivers/mmc/host/atmel-mci.c6
-rw-r--r--drivers/mmc/host/bfin_sdh.c7
-rw-r--r--drivers/mmc/host/dw_mmc.c85
-rw-r--r--drivers/mmc/host/mxs-mmc.c14
-rw-r--r--drivers/mmc/host/omap.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h6
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c50
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c8
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c31
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c44
-rw-r--r--drivers/net/ethernet/sun/niu.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c21
-rw-r--r--drivers/net/wireless/iwlegacy/common.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c7
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c8
-rw-r--r--drivers/pci/access.c202
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c6
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c12
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c20
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/iov.c6
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci-sysfs.c42
-rw-r--r--drivers/pci/pci.c317
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c24
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c53
-rw-r--r--drivers/pci/pcie/aspm.c119
-rw-r--r--drivers/pci/pcie/pme.c29
-rw-r--r--drivers/pci/pcie/portdrv_bus.c2
-rw-r--r--drivers/pci/pcie/portdrv_core.c19
-rw-r--r--drivers/pci/pcie/portdrv_pci.c31
-rw-r--r--drivers/pci/probe.c59
-rw-r--r--drivers/pci/quirks.c31
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c81
-rw-r--r--drivers/rapidio/devices/tsi721.c18
-rw-r--r--drivers/rtc/rtc-at91sam9.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4
-rw-r--r--drivers/staging/et131x/et131x.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c8
-rw-r--r--drivers/video/auo_k190x.c2
-rw-r--r--drivers/video/console/bitblit.c2
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c14
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c8
-rw-r--r--include/linux/kernel.h12
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/pci.h46
-rw-r--r--include/linux/pci_regs.h1
-rw-r--r--mm/mempolicy.c2
-rw-r--r--net/socket.c4
-rw-r--r--scripts/Makefile.fwinst2
-rw-r--r--scripts/link-vmlinux.sh2
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/endpoint.c24
-rw-r--r--sound/usb/endpoint.h3
-rw-r--r--sound/usb/pcm.c64
174 files changed, 2612 insertions, 2016 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 34f51100f029..dff1f48d252d 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -210,3 +210,15 @@ Users:
210 firmware assigned instance number of the PCI 210 firmware assigned instance number of the PCI
211 device that can help in understanding the firmware 211 device that can help in understanding the firmware
212 intended order of the PCI device. 212 intended order of the PCI device.
213
214What: /sys/bus/pci/devices/.../d3cold_allowed
215Date: July 2012
216Contact: Huang Ying <ying.huang@intel.com>
217Description:
218 d3cold_allowed is bit to control whether the corresponding PCI
219 device can be put into D3Cold state. If it is cleared, the
220 device will never be put into D3Cold state. If it is set, the
221 device may be put into D3Cold state if other requirements are
222 satisfied too. Reading this attribute will show the current
223 value of d3cold_allowed bit. Writing this attribute will set
224 the value of d3cold_allowed bit.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index afaff312bf41..f4d8c7105fcd 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more
579---------------------------- 579----------------------------
580 580
581What: at91-mci driver ("CONFIG_MMC_AT91") 581What: at91-mci driver ("CONFIG_MMC_AT91")
582When: 3.7 582When: 3.8
583Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support 583Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
584 was added to atmel-mci as a first step to support more chips. 584 was added to atmel-mci as a first step to support more chips.
585 Then at91-mci was kept only for old IP versions (on at91rm9200 and 585 Then at91-mci was kept only for old IP versions (on at91rm9200 and
diff --git a/Makefile b/Makefile
index 371ce8899f5c..0f66f146d57e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c5f9ae5dbd1a..2f88d8d97701 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
6 select HAVE_DMA_API_DEBUG 6 select HAVE_DMA_API_DEBUG
7 select HAVE_IDE if PCI || ISA || PCMCIA 7 select HAVE_IDE if PCI || ISA || PCMCIA
8 select HAVE_DMA_ATTRS 8 select HAVE_DMA_ATTRS
9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) 9 select HAVE_DMA_CONTIGUOUS if MMU
10 select HAVE_MEMBLOCK 10 select HAVE_MEMBLOCK
11 select RTC_LIB 11 select RTC_LIB
12 select SYS_SUPPORTS_APM_EMULATION 12 select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 7829a4d0cb22..96514c134e54 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -15,7 +15,7 @@
15 compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; 15 compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
16 16
17 chosen { 17 chosen {
18 bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; 18 bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
19 }; 19 };
20 20
21 ahb { 21 ahb {
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index 7d8718468e0d..90610c7030f7 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -33,7 +33,7 @@ CONFIG_AEABI=y
33CONFIG_FORCE_MAX_ZONEORDER=13 33CONFIG_FORCE_MAX_ZONEORDER=13
34CONFIG_ZBOOT_ROM_TEXT=0x0 34CONFIG_ZBOOT_ROM_TEXT=0x0
35CONFIG_ZBOOT_ROM_BSS=0x0 35CONFIG_ZBOOT_ROM_BSS=0x0
36CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" 36CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
37CONFIG_CMDLINE_FORCE=y 37CONFIG_CMDLINE_FORCE=y
38CONFIG_KEXEC=y 38CONFIG_KEXEC=y
39CONFIG_VFP=y 39CONFIG_VFP=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2ae842df4551..5c44dcb0987b 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
203} 203}
204 204
205/* 205/*
206 * This can be called during early boot to increase the size of the atomic
207 * coherent DMA pool above the default value of 256KiB. It must be called
208 * before postcore_initcall.
209 */
210extern void __init init_dma_coherent_pool_size(unsigned long size);
211
212/*
206 * This can be called during boot to increase the size of the consistent 213 * This can be called during boot to increase the size of the consistent
207 * DMA region above it's default value of 2MB. It must be called before the 214 * DMA region above it's default value of 2MB. It must be called before the
208 * memory allocator is initialised, i.e. before any core_initcall. 215 * memory allocator is initialised, i.e. before any core_initcall.
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 104ca40d8d18..aaa443b48c91 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
197 at91_st_read(AT91_ST_SR); 197 at91_st_read(AT91_ST_SR);
198 198
199 /* Make IRQs happen for the system timer */ 199 /* Make IRQs happen for the system timer */
200 setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); 200 setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
201 201
202 /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used 202 /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
203 * directly for the clocksource and all clockevents, after adjusting 203 * directly for the clocksource and all clockevents, after adjusting
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 7b9c2ba396ed..bce572a530ef 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
726 .flags = IORESOURCE_MEM, 726 .flags = IORESOURCE_MEM,
727 }, { 727 }, {
728 .flags = IORESOURCE_MEM, 728 .flags = IORESOURCE_MEM,
729 }, {
730 .flags = IORESOURCE_IRQ,
729 }, 731 },
730}; 732};
731 733
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
744 * The second resource is needed: 746 * The second resource is needed:
745 * GPBR will serve as the storage for RTC time offset 747 * GPBR will serve as the storage for RTC time offset
746 */ 748 */
747 at91sam9260_rtt_device.num_resources = 2; 749 at91sam9260_rtt_device.num_resources = 3;
748 rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 750 rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
749 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 751 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
750 rtt_resources[1].end = rtt_resources[1].start + 3; 752 rtt_resources[1].end = rtt_resources[1].start + 3;
753 rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
754 rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
751} 755}
752#else 756#else
753static void __init at91_add_device_rtt_rtc(void) 757static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 8df5c1bdff92..bc2590d712d0 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
609 .flags = IORESOURCE_MEM, 609 .flags = IORESOURCE_MEM,
610 }, { 610 }, {
611 .flags = IORESOURCE_MEM, 611 .flags = IORESOURCE_MEM,
612 }, {
613 .flags = IORESOURCE_IRQ,
612 } 614 }
613}; 615};
614 616
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
626 * The second resource is needed: 628 * The second resource is needed:
627 * GPBR will serve as the storage for RTC time offset 629 * GPBR will serve as the storage for RTC time offset
628 */ 630 */
629 at91sam9261_rtt_device.num_resources = 2; 631 at91sam9261_rtt_device.num_resources = 3;
630 rtt_resources[1].start = AT91SAM9261_BASE_GPBR + 632 rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
631 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 633 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
632 rtt_resources[1].end = rtt_resources[1].start + 3; 634 rtt_resources[1].end = rtt_resources[1].start + 3;
635 rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
636 rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
633} 637}
634#else 638#else
635static void __init at91_add_device_rtt_rtc(void) 639static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index eb6bbf86fb9f..9b6ca734f1a9 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
990 .flags = IORESOURCE_MEM, 990 .flags = IORESOURCE_MEM,
991 }, { 991 }, {
992 .flags = IORESOURCE_MEM, 992 .flags = IORESOURCE_MEM,
993 }, {
994 .flags = IORESOURCE_IRQ,
993 } 995 }
994}; 996};
995 997
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
1006 .flags = IORESOURCE_MEM, 1008 .flags = IORESOURCE_MEM,
1007 }, { 1009 }, {
1008 .flags = IORESOURCE_MEM, 1010 .flags = IORESOURCE_MEM,
1011 }, {
1012 .flags = IORESOURCE_IRQ,
1009 } 1013 }
1010}; 1014};
1011 1015
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
1027 * The second resource is needed only for the chosen RTT: 1031 * The second resource is needed only for the chosen RTT:
1028 * GPBR will serve as the storage for RTC time offset 1032 * GPBR will serve as the storage for RTC time offset
1029 */ 1033 */
1030 at91sam9263_rtt0_device.num_resources = 2; 1034 at91sam9263_rtt0_device.num_resources = 3;
1031 at91sam9263_rtt1_device.num_resources = 1; 1035 at91sam9263_rtt1_device.num_resources = 1;
1032 pdev = &at91sam9263_rtt0_device; 1036 pdev = &at91sam9263_rtt0_device;
1033 r = rtt0_resources; 1037 r = rtt0_resources;
1034 break; 1038 break;
1035 case 1: 1039 case 1:
1036 at91sam9263_rtt0_device.num_resources = 1; 1040 at91sam9263_rtt0_device.num_resources = 1;
1037 at91sam9263_rtt1_device.num_resources = 2; 1041 at91sam9263_rtt1_device.num_resources = 3;
1038 pdev = &at91sam9263_rtt1_device; 1042 pdev = &at91sam9263_rtt1_device;
1039 r = rtt1_resources; 1043 r = rtt1_resources;
1040 break; 1044 break;
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
1047 pdev->name = "rtc-at91sam9"; 1051 pdev->name = "rtc-at91sam9";
1048 r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 1052 r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
1049 r[1].end = r[1].start + 3; 1053 r[1].end = r[1].start + 3;
1054 r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
1055 r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
1050} 1056}
1051#else 1057#else
1052static void __init at91_add_device_rtt_rtc(void) 1058static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 06073996a382..1b47319ca00b 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
1293 .flags = IORESOURCE_MEM, 1293 .flags = IORESOURCE_MEM,
1294 }, { 1294 }, {
1295 .flags = IORESOURCE_MEM, 1295 .flags = IORESOURCE_MEM,
1296 }, {
1297 .flags = IORESOURCE_IRQ,
1296 } 1298 }
1297}; 1299};
1298 1300
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
1310 * The second resource is needed: 1312 * The second resource is needed:
1311 * GPBR will serve as the storage for RTC time offset 1313 * GPBR will serve as the storage for RTC time offset
1312 */ 1314 */
1313 at91sam9g45_rtt_device.num_resources = 2; 1315 at91sam9g45_rtt_device.num_resources = 3;
1314 rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 1316 rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
1315 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 1317 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
1316 rtt_resources[1].end = rtt_resources[1].start + 3; 1318 rtt_resources[1].end = rtt_resources[1].start + 3;
1319 rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
1320 rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
1317} 1321}
1318#else 1322#else
1319static void __init at91_add_device_rtt_rtc(void) 1323static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index f09fff932172..b3d365dadef5 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
688 .flags = IORESOURCE_MEM, 688 .flags = IORESOURCE_MEM,
689 }, { 689 }, {
690 .flags = IORESOURCE_MEM, 690 .flags = IORESOURCE_MEM,
691 }, {
692 .flags = IORESOURCE_IRQ,
691 } 693 }
692}; 694};
693 695
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
705 * The second resource is needed: 707 * The second resource is needed:
706 * GPBR will serve as the storage for RTC time offset 708 * GPBR will serve as the storage for RTC time offset
707 */ 709 */
708 at91sam9rl_rtt_device.num_resources = 2; 710 at91sam9rl_rtt_device.num_resources = 3;
709 rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + 711 rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
710 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 712 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
711 rtt_resources[1].end = rtt_resources[1].start + 3; 713 rtt_resources[1].end = rtt_resources[1].start + 3;
714 rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
715 rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
712} 716}
713#else 717#else
714static void __init at91_add_device_rtt_rtc(void) 718static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index de2ec6b8fea7..188c82971ebd 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
63 63
64#define cpu_has_300M_plla() (cpu_is_at91sam9g10()) 64#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
65 65
66#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
67 || cpu_is_at91sam9263() \
68 || cpu_is_at91sam9rl())
69
70#define cpu_has_210M_plla() (cpu_is_at91sam9260())
71
66#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ 72#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
67 || cpu_is_at91sam9g45() \ 73 || cpu_is_at91sam9g45() \
68 || cpu_is_at91sam9x5() \ 74 || cpu_is_at91sam9x5() \
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
706 } else if (cpu_has_800M_plla()) { 712 } else if (cpu_has_800M_plla()) {
707 if (plla.rate_hz > 800000000) 713 if (plla.rate_hz > 800000000)
708 pll_overclock = true; 714 pll_overclock = true;
715 } else if (cpu_has_240M_plla()) {
716 if (plla.rate_hz > 240000000)
717 pll_overclock = true;
718 } else if (cpu_has_210M_plla()) {
719 if (plla.rate_hz > 210000000)
720 pll_overclock = true;
709 } else { 721 } else {
710 if (plla.rate_hz > 209000000) 722 if (plla.rate_hz > 209000000)
711 pll_overclock = true; 723 pll_overclock = true;
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index ca70e5fcc7ac..020852d3bdd8 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -17,6 +17,7 @@
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <asm/irq.h> 18#include <asm/irq.h>
19#include <asm/mach/irq.h> 19#include <asm/mach/irq.h>
20#include <asm/system_misc.h>
20#include <mach/hardware.h> 21#include <mach/hardware.h>
21 22
22#define IRQ_SOURCE(base_addr) (base_addr + 0x00) 23#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 3226077735b1..1201191d7f1b 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
517void __init kirkwood_init_early(void) 517void __init kirkwood_init_early(void)
518{ 518{
519 orion_time_set_base(TIMER_VIRT_BASE); 519 orion_time_set_base(TIMER_VIRT_BASE);
520
521 /*
522 * Some Kirkwood devices allocate their coherent buffers from atomic
523 * context. Increase size of atomic coherent pool to make sure such
524 * the allocations won't fail.
525 */
526 init_dma_coherent_pool_size(SZ_1M);
520} 527}
521 528
522int kirkwood_tclk; 529int kirkwood_tclk;
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index d93359379598..be90b7d0e10b 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/sizes.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
15#include <linux/ata_platform.h> 16#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index cf10f92856dc..453a6e50db8b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
520}; 520};
521 521
522/* GPIO KEY */ 522/* GPIO KEY */
523#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } 523#define GPIO_KEY(c, g, d, ...) \
524 { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
524 525
525static struct gpio_keys_button gpio_buttons[] = { 526static struct gpio_keys_button gpio_buttons[] = {
526 GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), 527 GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
527 GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), 528 GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
528 GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), 529 GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
529 GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), 530 GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
530}; 531};
531 532
532static struct gpio_keys_platform_data gpio_key_info = { 533static struct gpio_keys_platform_data gpio_key_info = {
@@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
901 &camera_device, 902 &camera_device,
902 &ceu0_device, 903 &ceu0_device,
903 &fsi_device, 904 &fsi_device,
904 &fsi_hdmi_device,
905 &fsi_wm8978_device, 905 &fsi_wm8978_device,
906 &fsi_hdmi_device,
906}; 907};
907 908
908static void __init eva_clock_init(void) 909static void __init eva_clock_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7ea2b31e3199..c129542f6aed 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
695 * - J30 "open" 695 * - J30 "open"
696 * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET 696 * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
697 * - add .get_vbus = usbhs_get_vbus in usbhs1_private 697 * - add .get_vbus = usbhs_get_vbus in usbhs1_private
698 * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
698 */ 699 */
699#define IRQ8 evt2irq(0x0300) 700#define IRQ8 evt2irq(0x0300)
700#define USB_PHY_MODE (1 << 4) 701#define USB_PHY_MODE (1 << 4)
@@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
1325 &nor_flash_device, 1326 &nor_flash_device,
1326 &smc911x_device, 1327 &smc911x_device,
1327 &lcdc_device, 1328 &lcdc_device,
1328 &usbhs1_device,
1329 &usbhs0_device, 1329 &usbhs0_device,
1330 &usbhs1_device,
1330 &leds_device, 1331 &leds_device,
1331 &fsi_device, 1332 &fsi_device,
1332 &fsi_ak4643_device, 1333 &fsi_ak4643_device,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 3a528cf4366c..fcf5a47f4772 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
67 67
68static struct platform_device eth_device = { 68static struct platform_device eth_device = {
69 .name = "smsc911x", 69 .name = "smsc911x",
70 .id = 0, 70 .id = -1,
71 .dev = { 71 .dev = {
72 .platform_data = &smsc911x_platdata, 72 .platform_data = &smsc911x_platdata,
73 }, 73 },
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index ee447404c857..588555a67d9c 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
259 return 0; /* always allow wakeup */ 259 return 0; /* always allow wakeup */
260} 260}
261 261
262#define RELOC_BASE 0x1000 262#define RELOC_BASE 0x1200
263 263
264/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ 264/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
265#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) 265#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
266 266
267INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, 267INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index d3ad5150d660..c25a2a4f2e3d 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -367,17 +367,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
367/* Tegra PCIE requires relaxed ordering */ 367/* Tegra PCIE requires relaxed ordering */
368static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) 368static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
369{ 369{
370 u16 val16; 370 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
371 int pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
372
373 if (pos <= 0) {
374 dev_err(&dev->dev, "skipping relaxed ordering fixup\n");
375 return;
376 }
377
378 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16);
379 val16 |= PCI_EXP_DEVCTL_RELAX_EN;
380 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16);
381} 371}
382DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); 372DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
383 373
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4e7d1182e8a3..051204fc4617 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
267 vunmap(cpu_addr); 267 vunmap(cpu_addr);
268} 268}
269 269
270#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
271
270struct dma_pool { 272struct dma_pool {
271 size_t size; 273 size_t size;
272 spinlock_t lock; 274 spinlock_t lock;
273 unsigned long *bitmap; 275 unsigned long *bitmap;
274 unsigned long nr_pages; 276 unsigned long nr_pages;
275 void *vaddr; 277 void *vaddr;
276 struct page *page; 278 struct page **pages;
277}; 279};
278 280
279static struct dma_pool atomic_pool = { 281static struct dma_pool atomic_pool = {
280 .size = SZ_256K, 282 .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
281}; 283};
282 284
283static int __init early_coherent_pool(char *p) 285static int __init early_coherent_pool(char *p)
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
287} 289}
288early_param("coherent_pool", early_coherent_pool); 290early_param("coherent_pool", early_coherent_pool);
289 291
292void __init init_dma_coherent_pool_size(unsigned long size)
293{
294 /*
295 * Catch any attempt to set the pool size too late.
296 */
297 BUG_ON(atomic_pool.vaddr);
298
299 /*
300 * Set architecture specific coherent pool size only if
301 * it has not been changed by kernel command line parameter.
302 */
303 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
304 atomic_pool.size = size;
305}
306
290/* 307/*
291 * Initialise the coherent pool for atomic allocations. 308 * Initialise the coherent pool for atomic allocations.
292 */ 309 */
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
297 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 314 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
298 unsigned long *bitmap; 315 unsigned long *bitmap;
299 struct page *page; 316 struct page *page;
317 struct page **pages;
300 void *ptr; 318 void *ptr;
301 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 319 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
302 320
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
304 if (!bitmap) 322 if (!bitmap)
305 goto no_bitmap; 323 goto no_bitmap;
306 324
325 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
326 if (!pages)
327 goto no_pages;
328
307 if (IS_ENABLED(CONFIG_CMA)) 329 if (IS_ENABLED(CONFIG_CMA))
308 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 330 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
309 else 331 else
310 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 332 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
311 &page, NULL); 333 &page, NULL);
312 if (ptr) { 334 if (ptr) {
335 int i;
336
337 for (i = 0; i < nr_pages; i++)
338 pages[i] = page + i;
339
313 spin_lock_init(&pool->lock); 340 spin_lock_init(&pool->lock);
314 pool->vaddr = ptr; 341 pool->vaddr = ptr;
315 pool->page = page; 342 pool->pages = pages;
316 pool->bitmap = bitmap; 343 pool->bitmap = bitmap;
317 pool->nr_pages = nr_pages; 344 pool->nr_pages = nr_pages;
318 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 345 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
319 (unsigned)pool->size / 1024); 346 (unsigned)pool->size / 1024);
320 return 0; 347 return 0;
321 } 348 }
349no_pages:
322 kfree(bitmap); 350 kfree(bitmap);
323no_bitmap: 351no_bitmap:
324 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 352 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
443 if (pageno < pool->nr_pages) { 471 if (pageno < pool->nr_pages) {
444 bitmap_set(pool->bitmap, pageno, count); 472 bitmap_set(pool->bitmap, pageno, count);
445 ptr = pool->vaddr + PAGE_SIZE * pageno; 473 ptr = pool->vaddr + PAGE_SIZE * pageno;
446 *ret_page = pool->page + pageno; 474 *ret_page = pool->pages[pageno];
475 } else {
476 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
477 "Please increase it with coherent_pool= kernel parameter!\n",
478 (unsigned)pool->size / 1024);
447 } 479 }
448 spin_unlock_irqrestore(&pool->lock, flags); 480 spin_unlock_irqrestore(&pool->lock, flags);
449 481
450 return ptr; 482 return ptr;
451} 483}
452 484
485static bool __in_atomic_pool(void *start, size_t size)
486{
487 struct dma_pool *pool = &atomic_pool;
488 void *end = start + size;
489 void *pool_start = pool->vaddr;
490 void *pool_end = pool->vaddr + pool->size;
491
492 if (start < pool_start || start > pool_end)
493 return false;
494
495 if (end <= pool_end)
496 return true;
497
498 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
499 start, end - 1, pool_start, pool_end - 1);
500
501 return false;
502}
503
453static int __free_from_pool(void *start, size_t size) 504static int __free_from_pool(void *start, size_t size)
454{ 505{
455 struct dma_pool *pool = &atomic_pool; 506 struct dma_pool *pool = &atomic_pool;
456 unsigned long pageno, count; 507 unsigned long pageno, count;
457 unsigned long flags; 508 unsigned long flags;
458 509
459 if (start < pool->vaddr || start > pool->vaddr + pool->size) 510 if (!__in_atomic_pool(start, size))
460 return 0; 511 return 0;
461 512
462 if (start + size > pool->vaddr + pool->size) {
463 WARN(1, "freeing wrong coherent size from pool\n");
464 return 0;
465 }
466
467 pageno = (start - pool->vaddr) >> PAGE_SHIFT; 513 pageno = (start - pool->vaddr) >> PAGE_SHIFT;
468 count = size >> PAGE_SHIFT; 514 count = size >> PAGE_SHIFT;
469 515
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
1090 return 0; 1136 return 0;
1091} 1137}
1092 1138
1139static struct page **__atomic_get_pages(void *addr)
1140{
1141 struct dma_pool *pool = &atomic_pool;
1142 struct page **pages = pool->pages;
1143 int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1144
1145 return pages + offs;
1146}
1147
1093static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1148static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1094{ 1149{
1095 struct vm_struct *area; 1150 struct vm_struct *area;
1096 1151
1152 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1153 return __atomic_get_pages(cpu_addr);
1154
1097 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1155 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1098 return cpu_addr; 1156 return cpu_addr;
1099 1157
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1103 return NULL; 1161 return NULL;
1104} 1162}
1105 1163
1164static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1165 dma_addr_t *handle)
1166{
1167 struct page *page;
1168 void *addr;
1169
1170 addr = __alloc_from_pool(size, &page);
1171 if (!addr)
1172 return NULL;
1173
1174 *handle = __iommu_create_mapping(dev, &page, size);
1175 if (*handle == DMA_ERROR_CODE)
1176 goto err_mapping;
1177
1178 return addr;
1179
1180err_mapping:
1181 __free_from_pool(addr, size);
1182 return NULL;
1183}
1184
1185static void __iommu_free_atomic(struct device *dev, struct page **pages,
1186 dma_addr_t handle, size_t size)
1187{
1188 __iommu_remove_mapping(dev, handle, size);
1189 __free_from_pool(page_address(pages[0]), size);
1190}
1191
1106static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1192static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1107 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1193 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1108{ 1194{
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1113 *handle = DMA_ERROR_CODE; 1199 *handle = DMA_ERROR_CODE;
1114 size = PAGE_ALIGN(size); 1200 size = PAGE_ALIGN(size);
1115 1201
1202 if (gfp & GFP_ATOMIC)
1203 return __iommu_alloc_atomic(dev, size, handle);
1204
1116 pages = __iommu_alloc_buffer(dev, size, gfp); 1205 pages = __iommu_alloc_buffer(dev, size, gfp);
1117 if (!pages) 1206 if (!pages)
1118 return NULL; 1207 return NULL;
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1179 return; 1268 return;
1180 } 1269 }
1181 1270
1271 if (__in_atomic_pool(cpu_addr, size)) {
1272 __iommu_free_atomic(dev, pages, handle, size);
1273 return;
1274 }
1275
1182 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1276 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1183 unmap_kernel_range((unsigned long)cpu_addr, size); 1277 unmap_kernel_range((unsigned long)cpu_addr, size);
1184 vunmap(cpu_addr); 1278 vunmap(cpu_addr);
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 52a1ba70b3b6..c5dfb2c87d44 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -117,16 +117,11 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
117 } 117 }
118 118
119 /* Enable the PCIe normal error reporting */ 119 /* Enable the PCIe normal error reporting */
120 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 120 config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
121 if (pos) { 121 config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
122 /* Update Device Control */ 122 config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
123 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config); 123 config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
124 config |= PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */ 124 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
125 config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
126 config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
127 config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
128 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
129 }
130 125
131 /* Find the Advanced Error Reporting capability */ 126 /* Find the Advanced Error Reporting capability */
132 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 127 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b8bab10bd0f1..4ce0be32d153 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -562,6 +562,14 @@ config SCHED_SMT
562 when dealing with POWER5 cpus at a cost of slightly increased 562 when dealing with POWER5 cpus at a cost of slightly increased
563 overhead in some places. If unsure say N here. 563 overhead in some places. If unsure say N here.
564 564
565config PPC_DENORMALISATION
566 bool "PowerPC denormalisation exception handling"
567 depends on PPC_BOOK3S_64
568 default "n"
569 ---help---
570 Add support for handling denormalisation of single precision
571 values. Useful for bare metal only. If unsure say Y here.
572
565config CMDLINE_BOOL 573config CMDLINE_BOOL
566 bool "Default bootloader kernel arguments" 574 bool "Default bootloader kernel arguments"
567 575
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index db27c82e0542..e263e6a5aca1 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -51,6 +51,7 @@ CONFIG_KEXEC=y
51CONFIG_IRQ_ALL_CPUS=y 51CONFIG_IRQ_ALL_CPUS=y
52CONFIG_MEMORY_HOTREMOVE=y 52CONFIG_MEMORY_HOTREMOVE=y
53CONFIG_SCHED_SMT=y 53CONFIG_SCHED_SMT=y
54CONFIG_PPC_DENORMALISATION=y
54CONFIG_PCCARD=y 55CONFIG_PCCARD=y
55CONFIG_ELECTRA_CF=y 56CONFIG_ELECTRA_CF=y
56CONFIG_HOTPLUG_PCI=m 57CONFIG_HOTPLUG_PCI=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1f65b3c9b59a..c169dfb3e42d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -48,6 +48,7 @@ CONFIG_MEMORY_HOTREMOVE=y
48CONFIG_PPC_64K_PAGES=y 48CONFIG_PPC_64K_PAGES=y
49CONFIG_PPC_SUBPAGE_PROT=y 49CONFIG_PPC_SUBPAGE_PROT=y
50CONFIG_SCHED_SMT=y 50CONFIG_SCHED_SMT=y
51CONFIG_PPC_DENORMALISATION=y
51CONFIG_HOTPLUG_PCI=m 52CONFIG_HOTPLUG_PCI=m
52CONFIG_HOTPLUG_PCI_RPA=m 53CONFIG_HOTPLUG_PCI_RPA=m
53CONFIG_HOTPLUG_PCI_RPA_DLPAR=m 54CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 58c5ee61e700..b0ef73882b38 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -45,9 +45,10 @@ struct device_node;
45 * in the corresponding PHB. Therefore, the root PEs should be created 45 * in the corresponding PHB. Therefore, the root PEs should be created
46 * against existing PHBs in on-to-one fashion. 46 * against existing PHBs in on-to-one fashion.
47 */ 47 */
48#define EEH_PE_PHB 1 /* PHB PE */ 48#define EEH_PE_INVALID (1 << 0) /* Invalid */
49#define EEH_PE_DEVICE 2 /* Device PE */ 49#define EEH_PE_PHB (1 << 1) /* PHB PE */
50#define EEH_PE_BUS 3 /* Bus PE */ 50#define EEH_PE_DEVICE (1 << 2) /* Device PE */
51#define EEH_PE_BUS (1 << 3) /* Bus PE */
51 52
52#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ 53#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
53#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 54#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
@@ -184,7 +185,7 @@ static inline void eeh_unlock(void)
184typedef void *(*eeh_traverse_func)(void *data, void *flag); 185typedef void *(*eeh_traverse_func)(void *data, void *flag);
185int __devinit eeh_phb_pe_create(struct pci_controller *phb); 186int __devinit eeh_phb_pe_create(struct pci_controller *phb);
186int eeh_add_to_parent_pe(struct eeh_dev *edev); 187int eeh_add_to_parent_pe(struct eeh_dev *edev);
187int eeh_rmv_from_parent_pe(struct eeh_dev *edev); 188int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe);
188void *eeh_pe_dev_traverse(struct eeh_pe *root, 189void *eeh_pe_dev_traverse(struct eeh_pe *root,
189 eeh_traverse_func fn, void *flag); 190 eeh_traverse_func fn, void *flag);
190void eeh_pe_restore_bars(struct eeh_pe *pe); 191void eeh_pe_restore_bars(struct eeh_pe *pe);
@@ -200,7 +201,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev);
200void __init eeh_addr_cache_build(void); 201void __init eeh_addr_cache_build(void);
201void eeh_add_device_tree_early(struct device_node *); 202void eeh_add_device_tree_early(struct device_node *);
202void eeh_add_device_tree_late(struct pci_bus *); 203void eeh_add_device_tree_late(struct pci_bus *);
203void eeh_remove_bus_device(struct pci_dev *); 204void eeh_remove_bus_device(struct pci_dev *, int);
204 205
205/** 206/**
206 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. 207 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -239,7 +240,7 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
239 240
240static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } 241static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
241 242
242static inline void eeh_remove_bus_device(struct pci_dev *dev) { } 243static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
243 244
244static inline void eeh_lock(void) { } 245static inline void eeh_lock(void) { }
245static inline void eeh_unlock(void) { } 246static inline void eeh_unlock(void) { }
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f0e0c6a66d97..7aefdb3e1ce4 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -59,7 +59,7 @@ struct hpte_cache {
59 struct hlist_node list_vpte; 59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long; 60 struct hlist_node list_vpte_long;
61 struct rcu_head rcu_head; 61 struct rcu_head rcu_head;
62 u64 host_va; 62 u64 host_vpn;
63 u64 pfn; 63 u64 pfn;
64 ulong slot; 64 ulong slot;
65 struct kvmppc_pte pte; 65 struct kvmppc_pte pte;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 236b4779ec4f..c4231973edd3 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -34,19 +34,19 @@ struct machdep_calls {
34 char *name; 34 char *name;
35#ifdef CONFIG_PPC64 35#ifdef CONFIG_PPC64
36 void (*hpte_invalidate)(unsigned long slot, 36 void (*hpte_invalidate)(unsigned long slot,
37 unsigned long va, 37 unsigned long vpn,
38 int psize, int ssize, 38 int psize, int ssize,
39 int local); 39 int local);
40 long (*hpte_updatepp)(unsigned long slot, 40 long (*hpte_updatepp)(unsigned long slot,
41 unsigned long newpp, 41 unsigned long newpp,
42 unsigned long va, 42 unsigned long vpn,
43 int psize, int ssize, 43 int psize, int ssize,
44 int local); 44 int local);
45 void (*hpte_updateboltedpp)(unsigned long newpp, 45 void (*hpte_updateboltedpp)(unsigned long newpp,
46 unsigned long ea, 46 unsigned long ea,
47 int psize, int ssize); 47 int psize, int ssize);
48 long (*hpte_insert)(unsigned long hpte_group, 48 long (*hpte_insert)(unsigned long hpte_group,
49 unsigned long va, 49 unsigned long vpn,
50 unsigned long prpn, 50 unsigned long prpn,
51 unsigned long rflags, 51 unsigned long rflags,
52 unsigned long vflags, 52 unsigned long vflags,
@@ -215,6 +215,9 @@ struct machdep_calls {
215 /* Called after scan and before resource survey */ 215 /* Called after scan and before resource survey */
216 void (*pcibios_fixup_phb)(struct pci_controller *hose); 216 void (*pcibios_fixup_phb)(struct pci_controller *hose);
217 217
218 /* Called during PCI resource reassignment */
219 resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type);
220
218 /* Called to shutdown machine specific hardware not already controlled 221 /* Called to shutdown machine specific hardware not already controlled
219 * by other drivers. 222 * by other drivers.
220 */ 223 */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 1c65a59881ea..9673f73eb8db 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -16,6 +16,13 @@
16#include <asm/page.h> 16#include <asm/page.h>
17 17
18/* 18/*
19 * This is necessary to get the definition of PGTABLE_RANGE which we
20 * need for various slices related matters. Note that this isn't the
21 * complete pgtable.h but only a portion of it.
22 */
23#include <asm/pgtable-ppc64.h>
24
25/*
19 * Segment table 26 * Segment table
20 */ 27 */
21 28
@@ -154,9 +161,25 @@ struct mmu_psize_def
154#define MMU_SEGSIZE_256M 0 161#define MMU_SEGSIZE_256M 0
155#define MMU_SEGSIZE_1T 1 162#define MMU_SEGSIZE_1T 1
156 163
164/*
165 * encode page number shift.
166 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
167 * 12 bits. This enable us to address upto 76 bit va.
168 * For hpt hash from a va we can ignore the page size bits of va and for
169 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
170 * we work in all cases including 4k page size.
171 */
172#define VPN_SHIFT 12
157 173
158#ifndef __ASSEMBLY__ 174#ifndef __ASSEMBLY__
159 175
176static inline int segment_shift(int ssize)
177{
178 if (ssize == MMU_SEGSIZE_256M)
179 return SID_SHIFT;
180 return SID_SHIFT_1T;
181}
182
160/* 183/*
161 * The current system page and segment sizes 184 * The current system page and segment sizes
162 */ 185 */
@@ -180,18 +203,39 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
180extern int mmu_ci_restrictions; 203extern int mmu_ci_restrictions;
181 204
182/* 205/*
206 * This computes the AVPN and B fields of the first dword of a HPTE,
207 * for use when we want to match an existing PTE. The bottom 7 bits
208 * of the returned value are zero.
209 */
210static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
211 int ssize)
212{
213 unsigned long v;
214 /*
215 * The AVA field omits the low-order 23 bits of the 78 bits VA.
216 * These bits are not needed in the PTE, because the
217 * low-order b of these bits are part of the byte offset
218 * into the virtual page and, if b < 23, the high-order
219 * 23-b of these bits are always used in selecting the
220 * PTEGs to be searched
221 */
222 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
223 v <<= HPTE_V_AVPN_SHIFT;
224 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
225 return v;
226}
227
228/*
183 * This function sets the AVPN and L fields of the HPTE appropriately 229 * This function sets the AVPN and L fields of the HPTE appropriately
184 * for the page size 230 * for the page size
185 */ 231 */
186static inline unsigned long hpte_encode_v(unsigned long va, int psize, 232static inline unsigned long hpte_encode_v(unsigned long vpn,
187 int ssize) 233 int psize, int ssize)
188{ 234{
189 unsigned long v; 235 unsigned long v;
190 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 236 v = hpte_encode_avpn(vpn, psize, ssize);
191 v <<= HPTE_V_AVPN_SHIFT;
192 if (psize != MMU_PAGE_4K) 237 if (psize != MMU_PAGE_4K)
193 v |= HPTE_V_LARGE; 238 v |= HPTE_V_LARGE;
194 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
195 return v; 239 return v;
196} 240}
197 241
@@ -216,30 +260,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
216} 260}
217 261
218/* 262/*
219 * Build a VA given VSID, EA and segment size 263 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
220 */ 264 */
221static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid, 265static inline unsigned long hpt_vpn(unsigned long ea,
222 int ssize) 266 unsigned long vsid, int ssize)
223{ 267{
224 if (ssize == MMU_SEGSIZE_256M) 268 unsigned long mask;
225 return (vsid << 28) | (ea & 0xfffffffUL); 269 int s_shift = segment_shift(ssize);
226 return (vsid << 40) | (ea & 0xffffffffffUL); 270
271 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
272 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
227} 273}
228 274
229/* 275/*
230 * This hashes a virtual address 276 * This hashes a virtual address
231 */ 277 */
232 278static inline unsigned long hpt_hash(unsigned long vpn,
233static inline unsigned long hpt_hash(unsigned long va, unsigned int shift, 279 unsigned int shift, int ssize)
234 int ssize)
235{ 280{
281 int mask;
236 unsigned long hash, vsid; 282 unsigned long hash, vsid;
237 283
284 /* VPN_SHIFT can be atmost 12 */
238 if (ssize == MMU_SEGSIZE_256M) { 285 if (ssize == MMU_SEGSIZE_256M) {
239 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift); 286 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
287 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
288 ((vpn & mask) >> (shift - VPN_SHIFT));
240 } else { 289 } else {
241 vsid = va >> 40; 290 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
242 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift); 291 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
292 hash = vsid ^ (vsid << 25) ^
293 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
243 } 294 }
244 return hash & 0x7fffffffffUL; 295 return hash & 0x7fffffffffUL;
245} 296}
@@ -280,63 +331,61 @@ extern void slb_set_size(u16 size);
280#endif /* __ASSEMBLY__ */ 331#endif /* __ASSEMBLY__ */
281 332
282/* 333/*
283 * VSID allocation 334 * VSID allocation (256MB segment)
335 *
336 * We first generate a 38-bit "proto-VSID". For kernel addresses this
337 * is equal to the ESID | 1 << 37, for user addresses it is:
338 * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
284 * 339 *
285 * We first generate a 36-bit "proto-VSID". For kernel addresses this 340 * This splits the proto-VSID into the below range
286 * is equal to the ESID, for user addresses it is: 341 * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
287 * (context << 15) | (esid & 0x7fff) 342 * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
288 * 343 *
289 * The two forms are distinguishable because the top bit is 0 for user 344 * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
290 * addresses, whereas the top two bits are 1 for kernel addresses. 345 * That is, we assign half of the space to user processes and half
291 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for 346 * to the kernel.
292 * now.
293 * 347 *
294 * The proto-VSIDs are then scrambled into real VSIDs with the 348 * The proto-VSIDs are then scrambled into real VSIDs with the
295 * multiplicative hash: 349 * multiplicative hash:
296 * 350 *
297 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS 351 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
298 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
299 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
300 * 352 *
301 * This scramble is only well defined for proto-VSIDs below 353 * VSID_MULTIPLIER is prime, so in particular it is
302 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
303 * reserved. VSID_MULTIPLIER is prime, so in particular it is
304 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 354 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
305 * Because the modulus is 2^n-1 we can compute it efficiently without 355 * Because the modulus is 2^n-1 we can compute it efficiently without
306 * a divide or extra multiply (see below). 356 * a divide or extra multiply (see below).
307 * 357 *
308 * This scheme has several advantages over older methods: 358 * This scheme has several advantages over older methods:
309 * 359 *
310 * - We have VSIDs allocated for every kernel address 360 * - We have VSIDs allocated for every kernel address
311 * (i.e. everything above 0xC000000000000000), except the very top 361 * (i.e. everything above 0xC000000000000000), except the very top
312 * segment, which simplifies several things. 362 * segment, which simplifies several things.
313 * 363 *
314 * - We allow for 16 significant bits of ESID and 19 bits of 364 * - We allow for USER_ESID_BITS significant bits of ESID and
315 * context for user addresses. i.e. 16T (44 bits) of address space for 365 * CONTEXT_BITS bits of context for user addresses.
316 * up to half a million contexts. 366 * i.e. 64T (46 bits) of address space for up to half a million contexts.
317 * 367 *
318 * - The scramble function gives robust scattering in the hash 368 * - The scramble function gives robust scattering in the hash
319 * table (at least based on some initial results). The previous 369 * table (at least based on some initial results). The previous
320 * method was more susceptible to pathological cases giving excessive 370 * method was more susceptible to pathological cases giving excessive
321 * hash collisions. 371 * hash collisions.
322 */ 372 */
373
323/* 374/*
324 * WARNING - If you change these you must make sure the asm 375 * This should be computed such that protovosid * vsid_mulitplier
325 * implementations in slb_allocate (slb_low.S), do_stab_bolted 376 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
326 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
327 */ 377 */
328 378#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
329#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */ 379#define VSID_BITS_256M 38
330#define VSID_BITS_256M 36
331#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 380#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
332 381
333#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 382#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
334#define VSID_BITS_1T 24 383#define VSID_BITS_1T 26
335#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 384#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
336 385
337#define CONTEXT_BITS 19 386#define CONTEXT_BITS 19
338#define USER_ESID_BITS 16 387#define USER_ESID_BITS 18
339#define USER_ESID_BITS_1T 4 388#define USER_ESID_BITS_1T 6
340 389
341#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 390#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
342 391
@@ -372,6 +421,8 @@ extern void slb_set_size(u16 size);
372 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ 421 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
373 add rt,rt,rx 422 add rt,rt,rx
374 423
424/* 4 bits per slice and we have one slice per 1TB */
425#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
375 426
376#ifndef __ASSEMBLY__ 427#ifndef __ASSEMBLY__
377 428
@@ -416,7 +467,7 @@ typedef struct {
416 467
417#ifdef CONFIG_PPC_MM_SLICES 468#ifdef CONFIG_PPC_MM_SLICES
418 u64 low_slices_psize; /* SLB page size encodings */ 469 u64 low_slices_psize; /* SLB page size encodings */
419 u64 high_slices_psize; /* 4 bits per slice for now */ 470 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
420#else 471#else
421 u16 sllp; /* SLB page size encoding */ 472 u16 sllp; /* SLB page size encoding */
422#endif 473#endif
@@ -452,12 +503,32 @@ typedef struct {
452 }) 503 })
453#endif /* 1 */ 504#endif /* 1 */
454 505
455/* This is only valid for addresses >= PAGE_OFFSET */ 506/*
507 * This is only valid for addresses >= PAGE_OFFSET
508 * The proto-VSID space is divided into two class
509 * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
510 * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
511 *
512 * With KERNEL_START at 0xc000000000000000, the proto vsid for
513 * the kernel ends up with 0xc00000000 (36 bits). With 64TB
514 * support we need to have kernel proto-VSID in the
515 * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
516 */
456static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) 517static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
457{ 518{
458 if (ssize == MMU_SEGSIZE_256M) 519 unsigned long proto_vsid;
459 return vsid_scramble(ea >> SID_SHIFT, 256M); 520 /*
460 return vsid_scramble(ea >> SID_SHIFT_1T, 1T); 521 * We need to make sure proto_vsid for the kernel is
522 * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
523 */
524 if (ssize == MMU_SEGSIZE_256M) {
525 proto_vsid = ea >> SID_SHIFT;
526 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
527 return vsid_scramble(proto_vsid, 256M);
528 }
529 proto_vsid = ea >> SID_SHIFT_1T;
530 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
531 return vsid_scramble(proto_vsid, 1T);
461} 532}
462 533
463/* Returns the segment size indicator for a user address */ 534/* Returns the segment size indicator for a user address */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e8a26db2e8f3..5e38eedea218 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
146extern u64 ppc64_rma_size; 146extern u64 ppc64_rma_size;
147#endif /* CONFIG_PPC64 */ 147#endif /* CONFIG_PPC64 */
148 148
149struct mm_struct;
150#ifdef CONFIG_DEBUG_VM
151extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
152#else /* CONFIG_DEBUG_VM */
153static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
154{
155}
156#endif /* !CONFIG_DEBUG_VM */
157
149#endif /* !__ASSEMBLY__ */ 158#endif /* !__ASSEMBLY__ */
150 159
151/* The kernel use the constants below to index in the page sizes array. 160/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 7796519fd238..e9e7a6999bb8 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -100,7 +100,7 @@ struct paca_struct {
100 /* SLB related definitions */ 100 /* SLB related definitions */
101 u16 vmalloc_sllp; 101 u16 vmalloc_sllp;
102 u16 slb_cache_ptr; 102 u16 slb_cache_ptr;
103 u16 slb_cache[SLB_CACHE_ENTRIES]; 103 u32 slb_cache[SLB_CACHE_ENTRIES];
104#endif /* CONFIG_PPC_STD_MMU_64 */ 104#endif /* CONFIG_PPC_STD_MMU_64 */
105 105
106#ifdef CONFIG_PPC_BOOK3E 106#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index fed85e6290e1..cd915d6b093d 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -78,11 +78,19 @@ extern u64 ppc64_pft_size;
78#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) 78#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
79#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) 79#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
80 80
81/*
82 * 1 bit per slice and we have one slice per 1TB
83 * Right now we support only 64TB.
84 * IF we change this we will have to change the type
85 * of high_slices
86 */
87#define SLICE_MASK_SIZE 8
88
81#ifndef __ASSEMBLY__ 89#ifndef __ASSEMBLY__
82 90
83struct slice_mask { 91struct slice_mask {
84 u16 low_slices; 92 u16 low_slices;
85 u16 high_slices; 93 u64 high_slices;
86}; 94};
87 95
88struct mm_struct; 96struct mm_struct;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 973df4d9d366..025a130729bc 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -182,6 +182,14 @@ static inline int pci_device_from_OF_node(struct device_node *np,
182#if defined(CONFIG_EEH) 182#if defined(CONFIG_EEH)
183static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) 183static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
184{ 184{
185 /*
186 * For those OF nodes whose parent isn't PCI bridge, they
187 * don't have PCI_DN actually. So we have to skip them for
188 * any EEH operations.
189 */
190 if (!dn || !PCI_DN(dn))
191 return NULL;
192
185 return PCI_DN(dn)->edev; 193 return PCI_DN(dn)->edev;
186} 194}
187#else 195#else
@@ -192,6 +200,7 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
192extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 200extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
193 201
194/** Remove all of the PCI devices under this bus */ 202/** Remove all of the PCI devices under this bus */
203extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
195extern void pcibios_remove_pci_devices(struct pci_bus *bus); 204extern void pcibios_remove_pci_devices(struct pci_bus *bus);
196 205
197/** Discover new pci devices under this bus, and add them */ 206/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 6eefdcffa359..12798c9d4b4b 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -7,7 +7,7 @@
7 */ 7 */
8#define PTE_INDEX_SIZE 9 8#define PTE_INDEX_SIZE 9
9#define PMD_INDEX_SIZE 7 9#define PMD_INDEX_SIZE 7
10#define PUD_INDEX_SIZE 7 10#define PUD_INDEX_SIZE 9
11#define PGD_INDEX_SIZE 9 11#define PGD_INDEX_SIZE 9
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
@@ -19,7 +19,7 @@
19 19
20#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 20#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
21#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 21#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
22#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) 22#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
23#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 23#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
24 24
25/* PMD_SHIFT determines what a second-level page table entry can map */ 25/* PMD_SHIFT determines what a second-level page table entry can map */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index 90533ddcd703..be4e2878fbc0 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -7,7 +7,7 @@
7#define PTE_INDEX_SIZE 12 7#define PTE_INDEX_SIZE 12
8#define PMD_INDEX_SIZE 12 8#define PMD_INDEX_SIZE 12
9#define PUD_INDEX_SIZE 0 9#define PUD_INDEX_SIZE 0
10#define PGD_INDEX_SIZE 4 10#define PGD_INDEX_SIZE 6
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) 13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index c4205616dfb5..0182c203e411 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -21,17 +21,6 @@
21#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 21#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
22 22
23 23
24/* Some sanity checking */
25#if TASK_SIZE_USER64 > PGTABLE_RANGE
26#error TASK_SIZE_USER64 exceeds pagetable range
27#endif
28
29#ifdef CONFIG_PPC_STD_MMU_64
30#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
31#error TASK_SIZE_USER64 exceeds user VSID range
32#endif
33#endif
34
35/* 24/*
36 * Define the address range of the kernel non-linear virtual area 25 * Define the address range of the kernel non-linear virtual area
37 */ 26 */
@@ -41,7 +30,7 @@
41#else 30#else
42#define KERN_VIRT_START ASM_CONST(0xD000000000000000) 31#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
43#endif 32#endif
44#define KERN_VIRT_SIZE PGTABLE_RANGE 33#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
45 34
46/* 35/*
47 * The vmalloc space starts at the beginning of that region, and 36 * The vmalloc space starts at the beginning of that region, and
@@ -117,9 +106,6 @@
117 106
118#ifndef __ASSEMBLY__ 107#ifndef __ASSEMBLY__
119 108
120#include <linux/stddef.h>
121#include <asm/tlbflush.h>
122
123/* 109/*
124 * This is the default implementation of various PTE accessors, it's 110 * This is the default implementation of various PTE accessors, it's
125 * used in all cases except Book3S with 64K pages where we have a 111 * used in all cases except Book3S with 64K pages where we have a
@@ -198,7 +184,8 @@
198/* to find an entry in a kernel page-table-directory */ 184/* to find an entry in a kernel page-table-directory */
199/* This now only contains the vmalloc pages */ 185/* This now only contains the vmalloc pages */
200#define pgd_offset_k(address) pgd_offset(&init_mm, address) 186#define pgd_offset_k(address) pgd_offset(&init_mm, address)
201 187extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
188 pte_t *ptep, unsigned long pte, int huge);
202 189
203/* Atomic PTE updates */ 190/* Atomic PTE updates */
204static inline unsigned long pte_update(struct mm_struct *mm, 191static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2e0e4110f7ae..a9cbd3ba5c33 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -9,14 +9,6 @@
9 9
10struct mm_struct; 10struct mm_struct;
11 11
12#ifdef CONFIG_DEBUG_VM
13extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
14#else /* CONFIG_DEBUG_VM */
15static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
16{
17}
18#endif /* !CONFIG_DEBUG_VM */
19
20#endif /* !__ASSEMBLY__ */ 12#endif /* !__ASSEMBLY__ */
21 13
22#if defined(CONFIG_PPC64) 14#if defined(CONFIG_PPC64)
@@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
27 19
28#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
29 21
22#include <asm/tlbflush.h>
23
30/* Generic accessors to PTE bits */ 24/* Generic accessors to PTE bits */
31static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 25static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
32static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 26static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4c25319f2fbc..5f73ce63fcae 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -126,6 +126,7 @@
126#define PPC_INST_TLBIVAX 0x7c000624 126#define PPC_INST_TLBIVAX 0x7c000624
127#define PPC_INST_TLBSRX_DOT 0x7c0006a5 127#define PPC_INST_TLBSRX_DOT 0x7c0006a5
128#define PPC_INST_XXLOR 0xf0000510 128#define PPC_INST_XXLOR 0xf0000510
129#define PPC_INST_XVCPSGNDP 0xf0000780
129 130
130#define PPC_INST_NAP 0x4c000364 131#define PPC_INST_NAP 0x4c000364
131#define PPC_INST_SLEEP 0x4c0003a4 132#define PPC_INST_SLEEP 0x4c0003a4
@@ -277,6 +278,8 @@
277 VSX_XX1((s), a, b)) 278 VSX_XX1((s), a, b))
278#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ 279#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
279 VSX_XX3((t), a, b)) 280 VSX_XX3((t), a, b))
281#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
282 VSX_XX3((t), (a), (b))))
280 283
281#define PPC_NAP stringify_in_c(.long PPC_INST_NAP) 284#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
282#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) 285#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 83efc6e81543..9dc5cd1fde1a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -97,8 +97,8 @@ extern struct task_struct *last_task_used_spe;
97#endif 97#endif
98 98
99#ifdef CONFIG_PPC64 99#ifdef CONFIG_PPC64
100/* 64-bit user address space is 44-bits (16TB user VM) */ 100/* 64-bit user address space is 46-bits (64TB user VM) */
101#define TASK_SIZE_USER64 (0x0000100000000000UL) 101#define TASK_SIZE_USER64 (0x0000400000000000UL)
102 102
103/* 103/*
104 * 32-bit user address space is 4GB - 1 page 104 * 32-bit user address space is 4GB - 1 page
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 59247e816ac5..eedf427c9124 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -58,14 +58,16 @@
58/* Trick: we set __end to va + 64k, which happens works for 58/* Trick: we set __end to va + 64k, which happens works for
59 * a 16M page as well as we want only one iteration 59 * a 16M page as well as we want only one iteration
60 */ 60 */
61#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 61#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
62 do { \ 62 do { \
63 unsigned long __end = va + PAGE_SIZE; \ 63 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
64 unsigned __split = (psize == MMU_PAGE_4K || \ 64 unsigned __split = (psize == MMU_PAGE_4K || \
65 psize == MMU_PAGE_64K_AP); \ 65 psize == MMU_PAGE_64K_AP); \
66 shift = mmu_psize_defs[psize].shift; \ 66 shift = mmu_psize_defs[psize].shift; \
67 for (index = 0; va < __end; index++, va += (1L << shift)) { \ 67 for (index = 0; vpn < __end; index++, \
68 if (!__split || __rpte_sub_valid(rpte, index)) do { \ 68 vpn += (1L << (shift - VPN_SHIFT))) { \
69 if (!__split || __rpte_sub_valid(rpte, index)) \
70 do {
69 71
70#define pte_iterate_hashed_end() } while(0); } } while(0) 72#define pte_iterate_hashed_end() } while(0); } } while(0)
71 73
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 121a90bbf778..a1096fb62816 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -524,6 +524,7 @@
524 524
525#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ 525#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
526#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ 526#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
527#define HSRR1_DENORM 0x00100000 /* Denorm exception */
527 528
528#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */ 529#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
529#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */ 530#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 0c5fa3145615..f6fc0ee813d7 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -10,8 +10,8 @@
10 */ 10 */
11#define SECTION_SIZE_BITS 24 11#define SECTION_SIZE_BITS 24
12 12
13#define MAX_PHYSADDR_BITS 44 13#define MAX_PHYSADDR_BITS 46
14#define MAX_PHYSMEM_BITS 44 14#define MAX_PHYSMEM_BITS 46
15 15
16#endif /* CONFIG_SPARSEMEM */ 16#endif /* CONFIG_SPARSEMEM */
17 17
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index e942203cd4a8..8ceea14d6fe4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -104,6 +104,8 @@ static inline struct thread_info *current_thread_info(void)
104#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 104#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
105#define TIF_UPROBE 14 /* breakpointed or single-stepping */ 105#define TIF_UPROBE 14 /* breakpointed or single-stepping */
106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
108 for stack store? */
107 109
108/* as above, but as bit values */ 110/* as above, but as bit values */
109#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -121,6 +123,7 @@ static inline struct thread_info *current_thread_info(void)
121#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 123#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
122#define _TIF_UPROBE (1<<TIF_UPROBE) 124#define _TIF_UPROBE (1<<TIF_UPROBE)
123#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 125#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
126#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
124#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 127#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
125 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) 128 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
126 129
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 81143fcbd113..61a59271665b 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -95,7 +95,7 @@ struct ppc64_tlb_batch {
95 unsigned long index; 95 unsigned long index;
96 struct mm_struct *mm; 96 struct mm_struct *mm;
97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 97 real_pte_t pte[PPC64_TLB_BATCH_NR];
98 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 unsigned long vpn[PPC64_TLB_BATCH_NR];
99 unsigned int psize; 99 unsigned int psize;
100 int ssize; 100 int ssize;
101}; 101};
@@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
103 103
104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
105 105
106extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
107 pte_t *ptep, unsigned long pte, int huge);
108
109#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 106#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
110 107
111static inline void arch_enter_lazy_mmu_mode(void) 108static inline void arch_enter_lazy_mmu_mode(void)
@@ -127,7 +124,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
127#define arch_flush_lazy_mmu_mode() do {} while (0) 124#define arch_flush_lazy_mmu_mode() do {} while (0)
128 125
129 126
130extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
131 int ssize, int local); 128 int ssize, int local);
132extern void flush_hash_range(unsigned long number, int local); 129extern void flush_hash_range(unsigned long number, int local);
133 130
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 17bb40cad5bf..4db49590acf5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -98,11 +98,6 @@ struct exception_table_entry {
98 * PowerPC, we can just do these as direct assignments. (Of course, the 98 * PowerPC, we can just do these as direct assignments. (Of course, the
99 * exception handling means that it's no longer "just"...) 99 * exception handling means that it's no longer "just"...)
100 * 100 *
101 * The "user64" versions of the user access functions are versions that
102 * allow access of 64-bit data. The "get_user" functions do not
103 * properly handle 64-bit data because the value gets down cast to a long.
104 * The "put_user" functions already handle 64-bit data properly but we add
105 * "user64" versions for completeness
106 */ 101 */
107#define get_user(x, ptr) \ 102#define get_user(x, ptr) \
108 __get_user_check((x), (ptr), sizeof(*(ptr))) 103 __get_user_check((x), (ptr), sizeof(*(ptr)))
@@ -114,12 +109,6 @@ struct exception_table_entry {
114#define __put_user(x, ptr) \ 109#define __put_user(x, ptr) \
115 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 110 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
116 111
117#ifndef __powerpc64__
118#define __get_user64(x, ptr) \
119 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
120#define __put_user64(x, ptr) __put_user(x, ptr)
121#endif
122
123#define __get_user_inatomic(x, ptr) \ 112#define __get_user_inatomic(x, ptr) \
124 __get_user_nosleep((x), (ptr), sizeof(*(ptr))) 113 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
125#define __put_user_inatomic(x, ptr) \ 114#define __put_user_inatomic(x, ptr) \
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ead5016b02d0..af37528da49f 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -831,19 +831,56 @@ restore_user:
831 bnel- load_dbcr0 831 bnel- load_dbcr0
832#endif 832#endif
833 833
834#ifdef CONFIG_PREEMPT
835 b restore 834 b restore
836 835
837/* N.B. the only way to get here is from the beq following ret_from_except. */ 836/* N.B. the only way to get here is from the beq following ret_from_except. */
838resume_kernel: 837resume_kernel:
839 /* check current_thread_info->preempt_count */ 838 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
840 CURRENT_THREAD_INFO(r9, r1) 839 CURRENT_THREAD_INFO(r9, r1)
840 lwz r8,TI_FLAGS(r9)
841 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
842 beq+ 1f
843
844 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
845
846 lwz r3,GPR1(r1)
847 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
848 mr r4,r1 /* src: current exception frame */
849 mr r1,r3 /* Reroute the trampoline frame to r1 */
850
851 /* Copy from the original to the trampoline. */
852 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
853 li r6,0 /* start offset: 0 */
854 mtctr r5
8552: lwzx r0,r6,r4
856 stwx r0,r6,r3
857 addi r6,r6,4
858 bdnz 2b
859
860 /* Do real store operation to complete stwu */
861 lwz r5,GPR1(r1)
862 stw r8,0(r5)
863
864 /* Clear _TIF_EMULATE_STACK_STORE flag */
865 lis r11,_TIF_EMULATE_STACK_STORE@h
866 addi r5,r9,TI_FLAGS
8670: lwarx r8,0,r5
868 andc r8,r8,r11
869#ifdef CONFIG_IBM405_ERR77
870 dcbt 0,r5
871#endif
872 stwcx. r8,0,r5
873 bne- 0b
8741:
875
876#ifdef CONFIG_PREEMPT
877 /* check current_thread_info->preempt_count */
841 lwz r0,TI_PREEMPT(r9) 878 lwz r0,TI_PREEMPT(r9)
842 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ 879 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
843 bne restore 880 bne restore
844 lwz r0,TI_FLAGS(r9) 881 andi. r8,r8,_TIF_NEED_RESCHED
845 andi. r0,r0,_TIF_NEED_RESCHED
846 beq+ restore 882 beq+ restore
883 lwz r3,_MSR(r1)
847 andi. r0,r3,MSR_EE /* interrupts off? */ 884 andi. r0,r3,MSR_EE /* interrupts off? */
848 beq restore /* don't schedule if so */ 885 beq restore /* don't schedule if so */
849#ifdef CONFIG_TRACE_IRQFLAGS 886#ifdef CONFIG_TRACE_IRQFLAGS
@@ -864,8 +901,6 @@ resume_kernel:
864 */ 901 */
865 bl trace_hardirqs_on 902 bl trace_hardirqs_on
866#endif 903#endif
867#else
868resume_kernel:
869#endif /* CONFIG_PREEMPT */ 904#endif /* CONFIG_PREEMPT */
870 905
871 /* interrupts are hard-disabled at this point */ 906 /* interrupts are hard-disabled at this point */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b40e0b4815b3..0e931aaffca2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -593,6 +593,41 @@ _GLOBAL(ret_from_except_lite)
593 b .ret_from_except 593 b .ret_from_except
594 594
595resume_kernel: 595resume_kernel:
596 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
597 CURRENT_THREAD_INFO(r9, r1)
598 ld r8,TI_FLAGS(r9)
599 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
600 beq+ 1f
601
602 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
603
604 lwz r3,GPR1(r1)
605 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
606 mr r4,r1 /* src: current exception frame */
607 mr r1,r3 /* Reroute the trampoline frame to r1 */
608
609 /* Copy from the original to the trampoline. */
610 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
611 li r6,0 /* start offset: 0 */
612 mtctr r5
6132: ldx r0,r6,r4
614 stdx r0,r6,r3
615 addi r6,r6,8
616 bdnz 2b
617
618 /* Do real store operation to complete stwu */
619 lwz r5,GPR1(r1)
620 std r8,0(r5)
621
622 /* Clear _TIF_EMULATE_STACK_STORE flag */
623 lis r11,_TIF_EMULATE_STACK_STORE@h
624 addi r5,r9,TI_FLAGS
625 ldarx r4,0,r5
626 andc r4,r4,r11
627 stdcx. r4,0,r5
628 bne- 0b
6291:
630
596#ifdef CONFIG_PREEMPT 631#ifdef CONFIG_PREEMPT
597 /* Check if we need to preempt */ 632 /* Check if we need to preempt */
598 andi. r0,r4,_TIF_NEED_RESCHED 633 andi. r0,r4,_TIF_NEED_RESCHED
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 39aa97d3ff88..10b658ad65e1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -275,6 +275,31 @@ vsx_unavailable_pSeries_1:
275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
277 277
278 . = 0x1500
279 .global denorm_Hypervisor
280denorm_exception_hv:
281 HMT_MEDIUM
282 mtspr SPRN_SPRG_HSCRATCH0,r13
283 mfspr r13,SPRN_SPRG_HPACA
284 std r9,PACA_EXGEN+EX_R9(r13)
285 std r10,PACA_EXGEN+EX_R10(r13)
286 std r11,PACA_EXGEN+EX_R11(r13)
287 std r12,PACA_EXGEN+EX_R12(r13)
288 mfspr r9,SPRN_SPRG_HSCRATCH0
289 std r9,PACA_EXGEN+EX_R13(r13)
290 mfcr r9
291
292#ifdef CONFIG_PPC_DENORMALISATION
293 mfspr r10,SPRN_HSRR1
294 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
295 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
296 addi r11,r11,-4 /* HSRR0 is next instruction */
297 bne+ denorm_assist
298#endif
299
300 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
301 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
302
278#ifdef CONFIG_CBE_RAS 303#ifdef CONFIG_CBE_RAS
279 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 304 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
280 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 305 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
@@ -336,6 +361,103 @@ do_stab_bolted_pSeries:
336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 361 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 362 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
338 363
364#ifdef CONFIG_PPC_DENORMALISATION
365denorm_assist:
366BEGIN_FTR_SECTION
367/*
368 * To denormalise we need to move a copy of the register to itself.
369 * For POWER6 do that here for all FP regs.
370 */
371 mfmsr r10
372 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
373 xori r10,r10,(MSR_FE0|MSR_FE1)
374 mtmsrd r10
375 sync
376 fmr 0,0
377 fmr 1,1
378 fmr 2,2
379 fmr 3,3
380 fmr 4,4
381 fmr 5,5
382 fmr 6,6
383 fmr 7,7
384 fmr 8,8
385 fmr 9,9
386 fmr 10,10
387 fmr 11,11
388 fmr 12,12
389 fmr 13,13
390 fmr 14,14
391 fmr 15,15
392 fmr 16,16
393 fmr 17,17
394 fmr 18,18
395 fmr 19,19
396 fmr 20,20
397 fmr 21,21
398 fmr 22,22
399 fmr 23,23
400 fmr 24,24
401 fmr 25,25
402 fmr 26,26
403 fmr 27,27
404 fmr 28,28
405 fmr 29,29
406 fmr 30,30
407 fmr 31,31
408FTR_SECTION_ELSE
409/*
410 * To denormalise we need to move a copy of the register to itself.
411 * For POWER7 do that here for the first 32 VSX registers only.
412 */
413 mfmsr r10
414 oris r10,r10,MSR_VSX@h
415 mtmsrd r10
416 sync
417 XVCPSGNDP(0,0,0)
418 XVCPSGNDP(1,1,1)
419 XVCPSGNDP(2,2,2)
420 XVCPSGNDP(3,3,3)
421 XVCPSGNDP(4,4,4)
422 XVCPSGNDP(5,5,5)
423 XVCPSGNDP(6,6,6)
424 XVCPSGNDP(7,7,7)
425 XVCPSGNDP(8,8,8)
426 XVCPSGNDP(9,9,9)
427 XVCPSGNDP(10,10,10)
428 XVCPSGNDP(11,11,11)
429 XVCPSGNDP(12,12,12)
430 XVCPSGNDP(13,13,13)
431 XVCPSGNDP(14,14,14)
432 XVCPSGNDP(15,15,15)
433 XVCPSGNDP(16,16,16)
434 XVCPSGNDP(17,17,17)
435 XVCPSGNDP(18,18,18)
436 XVCPSGNDP(19,19,19)
437 XVCPSGNDP(20,20,20)
438 XVCPSGNDP(21,21,21)
439 XVCPSGNDP(22,22,22)
440 XVCPSGNDP(23,23,23)
441 XVCPSGNDP(24,24,24)
442 XVCPSGNDP(25,25,25)
443 XVCPSGNDP(26,26,26)
444 XVCPSGNDP(27,27,27)
445 XVCPSGNDP(28,28,28)
446 XVCPSGNDP(29,29,29)
447 XVCPSGNDP(30,30,30)
448 XVCPSGNDP(31,31,31)
449ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
450 mtspr SPRN_HSRR0,r11
451 mtcrf 0x80,r9
452 ld r9,PACA_EXGEN+EX_R9(r13)
453 ld r10,PACA_EXGEN+EX_R10(r13)
454 ld r11,PACA_EXGEN+EX_R11(r13)
455 ld r12,PACA_EXGEN+EX_R12(r13)
456 ld r13,PACA_EXGEN+EX_R13(r13)
457 HRFID
458 b .
459#endif
460
339 .align 7 461 .align 7
340 /* moved from 0xe00 */ 462 /* moved from 0xe00 */
341 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 463 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
@@ -495,6 +617,7 @@ machine_check_common:
495 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 617 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
496 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 618 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
497 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 619 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
620 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
498#ifdef CONFIG_ALTIVEC 621#ifdef CONFIG_ALTIVEC
499 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 622 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
500#else 623#else
@@ -960,7 +1083,9 @@ _GLOBAL(do_stab_bolted)
960 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1083 rldimi r10,r11,7,52 /* r10 = first ste of the group */
961 1084
962 /* Calculate VSID */ 1085 /* Calculate VSID */
963 /* This is a kernel address, so protovsid = ESID */ 1086 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1087 li r9,0x1
1088 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
964 ASM_VSID_SCRAMBLE(r11, r9, 256M) 1089 ASM_VSID_SCRAMBLE(r11, r9, 256M)
965 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1090 rldic r9,r11,12,16 /* r9 = vsid << 12 */
966 1091
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 1f017bb7a7ce..71413f41278f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -489,10 +489,10 @@ void do_IRQ(struct pt_regs *regs)
489 struct pt_regs *old_regs = set_irq_regs(regs); 489 struct pt_regs *old_regs = set_irq_regs(regs);
490 unsigned int irq; 490 unsigned int irq;
491 491
492 trace_irq_entry(regs);
493
494 irq_enter(); 492 irq_enter();
495 493
494 trace_irq_entry(regs);
495
496 check_stack_overflow(); 496 check_stack_overflow();
497 497
498 /* 498 /*
@@ -511,10 +511,10 @@ void do_IRQ(struct pt_regs *regs)
511 else 511 else
512 __get_cpu_var(irq_stat).spurious_irqs++; 512 __get_cpu_var(irq_stat).spurious_irqs++;
513 513
514 trace_irq_exit(regs);
515
514 irq_exit(); 516 irq_exit();
515 set_irq_regs(old_regs); 517 set_irq_regs(old_regs);
516
517 trace_irq_exit(regs);
518} 518}
519 519
520void __init init_IRQ(void) 520void __init init_IRQ(void)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 4cb714792bea..7f94f760dd0c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -99,6 +99,26 @@ void pcibios_free_controller(struct pci_controller *phb)
99 kfree(phb); 99 kfree(phb);
100} 100}
101 101
102/*
103 * The function is used to return the minimal alignment
104 * for memory or I/O windows of the associated P2P bridge.
105 * By default, 4KiB alignment for I/O windows and 1MiB for
106 * memory windows.
107 */
108resource_size_t pcibios_window_alignment(struct pci_bus *bus,
109 unsigned long type)
110{
111 if (ppc_md.pcibios_window_alignment)
112 return ppc_md.pcibios_window_alignment(bus, type);
113
114 /*
115 * PCI core will figure out the default
116 * alignment: 4KiB for I/O and 1MiB for
117 * memory window.
118 */
119 return 1;
120}
121
102static resource_size_t pcibios_io_size(const struct pci_controller *hose) 122static resource_size_t pcibios_io_size(const struct pci_controller *hose)
103{ 123{
104#ifdef CONFIG_PPC64 124#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e49e93191b69..bd693a11d86e 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -493,8 +493,6 @@ void timer_interrupt(struct pt_regs * regs)
493 */ 493 */
494 may_hard_irq_enable(); 494 may_hard_irq_enable();
495 495
496 trace_timer_interrupt_entry(regs);
497
498 __get_cpu_var(irq_stat).timer_irqs++; 496 __get_cpu_var(irq_stat).timer_irqs++;
499 497
500#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 498#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
@@ -505,6 +503,8 @@ void timer_interrupt(struct pt_regs * regs)
505 old_regs = set_irq_regs(regs); 503 old_regs = set_irq_regs(regs);
506 irq_enter(); 504 irq_enter();
507 505
506 trace_timer_interrupt_entry(regs);
507
508 if (test_irq_work_pending()) { 508 if (test_irq_work_pending()) {
509 clear_irq_work_pending(); 509 clear_irq_work_pending();
510 irq_work_run(); 510 irq_work_run();
@@ -529,10 +529,10 @@ void timer_interrupt(struct pt_regs * regs)
529 } 529 }
530#endif 530#endif
531 531
532 trace_timer_interrupt_exit(regs);
533
532 irq_exit(); 534 irq_exit();
533 set_irq_regs(old_regs); 535 set_irq_regs(old_regs);
534
535 trace_timer_interrupt_exit(regs);
536} 536}
537 537
538/* 538/*
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 837f13e7b6bf..00aa61268e0d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -141,7 +141,7 @@ extern char etext[];
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
142{ 142{
143 pfn_t hpaddr; 143 pfn_t hpaddr;
144 u64 va; 144 u64 vpn;
145 u64 vsid; 145 u64 vsid;
146 struct kvmppc_sid_map *map; 146 struct kvmppc_sid_map *map;
147 volatile u32 *pteg; 147 volatile u32 *pteg;
@@ -173,7 +173,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
173 BUG_ON(!map); 173 BUG_ON(!map);
174 174
175 vsid = map->host_vsid; 175 vsid = map->host_vsid;
176 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); 176 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT)
177 177
178next_pteg: 178next_pteg:
179 if (rr == 16) { 179 if (rr == 16) {
@@ -244,11 +244,11 @@ next_pteg:
244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
245 orig_pte->may_write ? 'w' : '-', 245 orig_pte->may_write ? 'w' : '-',
246 orig_pte->may_execute ? 'x' : '-', 246 orig_pte->may_execute ? 'x' : '-',
247 orig_pte->eaddr, (ulong)pteg, va, 247 orig_pte->eaddr, (ulong)pteg, vpn,
248 orig_pte->vpage, hpaddr); 248 orig_pte->vpage, hpaddr);
249 249
250 pte->slot = (ulong)&pteg[rr]; 250 pte->slot = (ulong)&pteg[rr];
251 pte->host_va = va; 251 pte->host_vpn = vpn;
252 pte->pte = *orig_pte; 252 pte->pte = *orig_pte;
253 pte->pfn = hpaddr >> PAGE_SHIFT; 253 pte->pfn = hpaddr >> PAGE_SHIFT;
254 254
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0688b6b39585..4d72f9ebc554 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -33,7 +33,7 @@
33 33
34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35{ 35{
36 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 MMU_PAGE_4K, MMU_SEGSIZE_256M, 37 MMU_PAGE_4K, MMU_SEGSIZE_256M,
38 false); 38 false);
39} 39}
@@ -80,8 +80,9 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80 80
81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
82{ 82{
83 unsigned long vpn;
83 pfn_t hpaddr; 84 pfn_t hpaddr;
84 ulong hash, hpteg, va; 85 ulong hash, hpteg;
85 u64 vsid; 86 u64 vsid;
86 int ret; 87 int ret;
87 int rflags = 0x192; 88 int rflags = 0x192;
@@ -117,7 +118,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
117 } 118 }
118 119
119 vsid = map->host_vsid; 120 vsid = map->host_vsid;
120 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 121 vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
121 122
122 if (!orig_pte->may_write) 123 if (!orig_pte->may_write)
123 rflags |= HPTE_R_PP; 124 rflags |= HPTE_R_PP;
@@ -129,7 +130,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
129 else 130 else
130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 131 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
131 132
132 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 133 hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
133 134
134map_again: 135map_again:
135 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 136 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -141,7 +142,8 @@ map_again:
141 goto out; 142 goto out;
142 } 143 }
143 144
144 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_SEGSIZE_256M);
145 147
146 if (ret < 0) { 148 if (ret < 0) {
147 /* If we couldn't map a primary PTE, try a secondary */ 149 /* If we couldn't map a primary PTE, try a secondary */
@@ -152,7 +154,8 @@ map_again:
152 } else { 154 } else {
153 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); 155 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
154 156
155 trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); 157 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 vpn, hpaddr, orig_pte);
156 159
157 /* The ppc_md code may give us a secondary entry even though we 160 /* The ppc_md code may give us a secondary entry even though we
158 asked for a primary. Fix up. */ 161 asked for a primary. Fix up. */
@@ -162,7 +165,7 @@ map_again:
162 } 165 }
163 166
164 pte->slot = hpteg + (ret & 7); 167 pte->slot = hpteg + (ret & 7);
165 pte->host_va = va; 168 pte->host_vpn = vpn;
166 pte->pte = *orig_pte; 169 pte->pte = *orig_pte;
167 pte->pfn = hpaddr >> PAGE_SHIFT; 170 pte->pfn = hpaddr >> PAGE_SHIFT;
168 171
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 877186b7b1c3..ddb6a2149d44 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -189,7 +189,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
189 TP_ARGS(pte), 189 TP_ARGS(pte),
190 190
191 TP_STRUCT__entry( 191 TP_STRUCT__entry(
192 __field( u64, host_va ) 192 __field( u64, host_vpn )
193 __field( u64, pfn ) 193 __field( u64, pfn )
194 __field( ulong, eaddr ) 194 __field( ulong, eaddr )
195 __field( u64, vpage ) 195 __field( u64, vpage )
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
198 ), 198 ),
199 199
200 TP_fast_assign( 200 TP_fast_assign(
201 __entry->host_va = pte->host_va; 201 __entry->host_vpn = pte->host_vpn;
202 __entry->pfn = pte->pfn; 202 __entry->pfn = pte->pfn;
203 __entry->eaddr = pte->pte.eaddr; 203 __entry->eaddr = pte->pte.eaddr;
204 __entry->vpage = pte->pte.vpage; 204 __entry->vpage = pte->pte.vpage;
@@ -208,8 +208,8 @@ TRACE_EVENT(kvm_book3s_mmu_map,
208 (pte->pte.may_execute ? 0x1 : 0); 208 (pte->pte.may_execute ? 0x1 : 0);
209 ), 209 ),
210 210
211 TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 211 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
212 __entry->host_va, __entry->pfn, __entry->eaddr, 212 __entry->host_vpn, __entry->pfn, __entry->eaddr,
213 __entry->vpage, __entry->raddr, __entry->flags) 213 __entry->vpage, __entry->raddr, __entry->flags)
214); 214);
215 215
@@ -218,7 +218,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
218 TP_ARGS(pte), 218 TP_ARGS(pte),
219 219
220 TP_STRUCT__entry( 220 TP_STRUCT__entry(
221 __field( u64, host_va ) 221 __field( u64, host_vpn )
222 __field( u64, pfn ) 222 __field( u64, pfn )
223 __field( ulong, eaddr ) 223 __field( ulong, eaddr )
224 __field( u64, vpage ) 224 __field( u64, vpage )
@@ -227,7 +227,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
227 ), 227 ),
228 228
229 TP_fast_assign( 229 TP_fast_assign(
230 __entry->host_va = pte->host_va; 230 __entry->host_vpn = pte->host_vpn;
231 __entry->pfn = pte->pfn; 231 __entry->pfn = pte->pfn;
232 __entry->eaddr = pte->pte.eaddr; 232 __entry->eaddr = pte->pte.eaddr;
233 __entry->vpage = pte->pte.vpage; 233 __entry->vpage = pte->pte.vpage;
@@ -238,7 +238,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
238 ), 238 ),
239 239
240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
241 __entry->host_va, __entry->pfn, __entry->eaddr, 241 __entry->host_vpn, __entry->pfn, __entry->eaddr,
242 __entry->vpage, __entry->raddr, __entry->flags) 242 __entry->vpage, __entry->raddr, __entry->flags)
243); 243);
244 244
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9a52349874ee..e15c521846ca 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -566,7 +566,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
566 unsigned long int ea; 566 unsigned long int ea;
567 unsigned int cr, mb, me, sh; 567 unsigned int cr, mb, me, sh;
568 int err; 568 int err;
569 unsigned long old_ra; 569 unsigned long old_ra, val3;
570 long ival; 570 long ival;
571 571
572 opcode = instr >> 26; 572 opcode = instr >> 26;
@@ -1486,11 +1486,43 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1486 goto ldst_done; 1486 goto ldst_done;
1487 1487
1488 case 36: /* stw */ 1488 case 36: /* stw */
1489 case 37: /* stwu */
1490 val = regs->gpr[rd]; 1489 val = regs->gpr[rd];
1491 err = write_mem(val, dform_ea(instr, regs), 4, regs); 1490 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1492 goto ldst_done; 1491 goto ldst_done;
1493 1492
1493 case 37: /* stwu */
1494 val = regs->gpr[rd];
1495 val3 = dform_ea(instr, regs);
1496 /*
1497 * For PPC32 we always use stwu to change stack point with r1. So
1498 * this emulated store may corrupt the exception frame, now we
1499 * have to provide the exception frame trampoline, which is pushed
1500 * below the kprobed function stack. So we only update gpr[1] but
1501 * don't emulate the real store operation. We will do real store
1502 * operation safely in exception return code by checking this flag.
1503 */
1504 if ((ra == 1) && !(regs->msr & MSR_PR) \
1505 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1506 /*
1507 * Check if we will touch kernel sack overflow
1508 */
1509 if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1510 printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
1511 err = -EINVAL;
1512 break;
1513 }
1514
1515 /*
1516 * Check if we already set since that means we'll
1517 * lose the previous value.
1518 */
1519 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1520 set_thread_flag(TIF_EMULATE_STACK_STORE);
1521 err = 0;
1522 } else
1523 err = write_mem(val, val3, 4, regs);
1524 goto ldst_done;
1525
1494 case 38: /* stb */ 1526 case 38: /* stb */
1495 case 39: /* stbu */ 1527 case 39: /* stbu */
1496 val = regs->gpr[rd]; 1528 val = regs->gpr[rd];
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 602aeb06d298..56585086413a 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
63 /* Save non-volatile registers. 63 /* Save non-volatile registers.
64 * r31 will hold "old PTE" 64 * r31 will hold "old PTE"
65 * r30 is "new PTE" 65 * r30 is "new PTE"
66 * r29 is "va" 66 * r29 is vpn
67 * r28 is a hash value 67 * r28 is a hash value
68 * r27 is hashtab mask (maybe dynamic patched instead ?) 68 * r27 is hashtab mask (maybe dynamic patched instead ?)
69 */ 69 */
@@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
111 cmpdi r9,0 /* check segment size */ 111 cmpdi r9,0 /* check segment size */
112 bne 3f 112 bne 3f
113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
114 /* Calc va and put it in r29 */ 114 /* Calc vpn and put it in r29 */
115 rldicr r29,r5,28,63-28 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT
116 rldicl r3,r3,0,36 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
117 or r29,r3,r29 117 or r29,r28,r29
118 118
119 /* Calculate hash value for primary slot and store it in r28 */ 119 /* Calculate hash value for primary slot and store it in r28 */
120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
122 xor r28,r5,r0 122 xor r28,r5,r0
123 b 4f 123 b 4f
124 124
1253: /* Calc VA and hash in r29 and r28 for 1T segment */ 1253: /* Calc vpn and put it in r29 */
126 sldi r29,r5,40 /* vsid << 40 */ 126 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
127 clrldi r3,r3,24 /* ea & 0xffffffffff */ 127 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
128 or r29,r28,r29
129
130 /*
131 * calculate hash value for primary slot and
132 * store it in r28 for 1T segment
133 */
128 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 134 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
129 clrldi r5,r5,40 /* vsid & 0xffffff */ 135 clrldi r5,r5,40 /* vsid & 0xffffff */
130 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 136 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
131 xor r28,r28,r5 137 xor r28,r28,r5
132 or r29,r3,r29 /* VA */
133 xor r28,r28,r0 /* hash */ 138 xor r28,r28,r0 /* hash */
134 139
135 /* Convert linux PTE bits into HW equivalents */ 140 /* Convert linux PTE bits into HW equivalents */
@@ -185,7 +190,7 @@ htab_insert_pte:
185 190
186 /* Call ppc_md.hpte_insert */ 191 /* Call ppc_md.hpte_insert */
187 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 192 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
188 mr r4,r29 /* Retrieve va */ 193 mr r4,r29 /* Retrieve vpn */
189 li r7,0 /* !bolted, !secondary */ 194 li r7,0 /* !bolted, !secondary */
190 li r8,MMU_PAGE_4K /* page size */ 195 li r8,MMU_PAGE_4K /* page size */
191 ld r9,STK_PARAM(R9)(r1) /* segment size */ 196 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
208 213
209 /* Call ppc_md.hpte_insert */ 214 /* Call ppc_md.hpte_insert */
210 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 215 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
211 mr r4,r29 /* Retrieve va */ 216 mr r4,r29 /* Retrieve vpn */
212 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 217 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
213 li r8,MMU_PAGE_4K /* page size */ 218 li r8,MMU_PAGE_4K /* page size */
214 ld r9,STK_PARAM(R9)(r1) /* segment size */ 219 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -278,7 +283,7 @@ htab_modify_pte:
278 add r3,r0,r3 /* add slot idx */ 283 add r3,r0,r3 /* add slot idx */
279 284
280 /* Call ppc_md.hpte_updatepp */ 285 /* Call ppc_md.hpte_updatepp */
281 mr r5,r29 /* va */ 286 mr r5,r29 /* vpn */
282 li r6,MMU_PAGE_4K /* page size */ 287 li r6,MMU_PAGE_4K /* page size */
283 ld r7,STK_PARAM(R9)(r1) /* segment size */ 288 ld r7,STK_PARAM(R9)(r1) /* segment size */
284 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 289 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
339 /* Save non-volatile registers. 344 /* Save non-volatile registers.
340 * r31 will hold "old PTE" 345 * r31 will hold "old PTE"
341 * r30 is "new PTE" 346 * r30 is "new PTE"
342 * r29 is "va" 347 * r29 is vpn
343 * r28 is a hash value 348 * r28 is a hash value
344 * r27 is hashtab mask (maybe dynamic patched instead ?) 349 * r27 is hashtab mask (maybe dynamic patched instead ?)
345 * r26 is the hidx mask 350 * r26 is the hidx mask
@@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
394 cmpdi r9,0 /* check segment size */ 399 cmpdi r9,0 /* check segment size */
395 bne 3f 400 bne 3f
396END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 401END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
397 /* Calc va and put it in r29 */ 402 /* Calc vpn and put it in r29 */
398 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 403 sldi r29,r5,SID_SHIFT - VPN_SHIFT
399 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 404 /*
400 or r29,r3,r29 /* r29 = va */ 405 * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
406 * srdi r28,r3,VPN_SHIFT
407 */
408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
409 or r29,r28,r29
401 410
402 /* Calculate hash value for primary slot and store it in r28 */ 411 /* Calculate hash value for primary slot and store it in r28 */
403 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 412 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
405 xor r28,r5,r0 414 xor r28,r5,r0
406 b 4f 415 b 4f
407 416
4083: /* Calc VA and hash in r29 and r28 for 1T segment */ 4173: /* Calc vpn and put it in r29 */
409 sldi r29,r5,40 /* vsid << 40 */ 418 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
410 clrldi r3,r3,24 /* ea & 0xffffffffff */ 419 /*
420 * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
421 * srdi r28,r3,VPN_SHIFT
422 */
423 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
424 or r29,r28,r29
425
426 /*
427 * Calculate hash value for primary slot and
428 * store it in r28 for 1T segment
429 */
411 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 430 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
412 clrldi r5,r5,40 /* vsid & 0xffffff */ 431 clrldi r5,r5,40 /* vsid & 0xffffff */
413 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 432 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
414 xor r28,r28,r5 433 xor r28,r28,r5
415 or r29,r3,r29 /* VA */
416 xor r28,r28,r0 /* hash */ 434 xor r28,r28,r0 /* hash */
417 435
418 /* Convert linux PTE bits into HW equivalents */ 436 /* Convert linux PTE bits into HW equivalents */
@@ -488,7 +506,7 @@ htab_special_pfn:
488 506
489 /* Call ppc_md.hpte_insert */ 507 /* Call ppc_md.hpte_insert */
490 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 508 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
491 mr r4,r29 /* Retrieve va */ 509 mr r4,r29 /* Retrieve vpn */
492 li r7,0 /* !bolted, !secondary */ 510 li r7,0 /* !bolted, !secondary */
493 li r8,MMU_PAGE_4K /* page size */ 511 li r8,MMU_PAGE_4K /* page size */
494 ld r9,STK_PARAM(R9)(r1) /* segment size */ 512 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
515 533
516 /* Call ppc_md.hpte_insert */ 534 /* Call ppc_md.hpte_insert */
517 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 535 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
518 mr r4,r29 /* Retrieve va */ 536 mr r4,r29 /* Retrieve vpn */
519 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 537 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
520 li r8,MMU_PAGE_4K /* page size */ 538 li r8,MMU_PAGE_4K /* page size */
521 ld r9,STK_PARAM(R9)(r1) /* segment size */ 539 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
547 * useless now that the segment has been switched to 4k pages. 565 * useless now that the segment has been switched to 4k pages.
548 */ 566 */
549htab_inval_old_hpte: 567htab_inval_old_hpte:
550 mr r3,r29 /* virtual addr */ 568 mr r3,r29 /* vpn */
551 mr r4,r31 /* PTE.pte */ 569 mr r4,r31 /* PTE.pte */
552 li r5,0 /* PTE.hidx */ 570 li r5,0 /* PTE.hidx */
553 li r6,MMU_PAGE_64K /* psize */ 571 li r6,MMU_PAGE_64K /* psize */
@@ -620,7 +638,7 @@ htab_modify_pte:
620 add r3,r0,r3 /* add slot idx */ 638 add r3,r0,r3 /* add slot idx */
621 639
622 /* Call ppc_md.hpte_updatepp */ 640 /* Call ppc_md.hpte_updatepp */
623 mr r5,r29 /* va */ 641 mr r5,r29 /* vpn */
624 li r6,MMU_PAGE_4K /* page size */ 642 li r6,MMU_PAGE_4K /* page size */
625 ld r7,STK_PARAM(R9)(r1) /* segment size */ 643 ld r7,STK_PARAM(R9)(r1) /* segment size */
626 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 644 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
676 /* Save non-volatile registers. 694 /* Save non-volatile registers.
677 * r31 will hold "old PTE" 695 * r31 will hold "old PTE"
678 * r30 is "new PTE" 696 * r30 is "new PTE"
679 * r29 is "va" 697 * r29 is vpn
680 * r28 is a hash value 698 * r28 is a hash value
681 * r27 is hashtab mask (maybe dynamic patched instead ?) 699 * r27 is hashtab mask (maybe dynamic patched instead ?)
682 */ 700 */
@@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
729 cmpdi r9,0 /* check segment size */ 747 cmpdi r9,0 /* check segment size */
730 bne 3f 748 bne 3f
731END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 749END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
732 /* Calc va and put it in r29 */ 750 /* Calc vpn and put it in r29 */
733 rldicr r29,r5,28,63-28 751 sldi r29,r5,SID_SHIFT - VPN_SHIFT
734 rldicl r3,r3,0,36 752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
735 or r29,r3,r29 753 or r29,r28,r29
736 754
737 /* Calculate hash value for primary slot and store it in r28 */ 755 /* Calculate hash value for primary slot and store it in r28 */
738 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 756 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
740 xor r28,r5,r0 758 xor r28,r5,r0
741 b 4f 759 b 4f
742 760
7433: /* Calc VA and hash in r29 and r28 for 1T segment */ 7613: /* Calc vpn and put it in r29 */
744 sldi r29,r5,40 /* vsid << 40 */ 762 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
745 clrldi r3,r3,24 /* ea & 0xffffffffff */ 763 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
764 or r29,r28,r29
765
766 /*
767 * calculate hash value for primary slot and
768 * store it in r28 for 1T segment
769 */
746 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 770 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
747 clrldi r5,r5,40 /* vsid & 0xffffff */ 771 clrldi r5,r5,40 /* vsid & 0xffffff */
748 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 772 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
749 xor r28,r28,r5 773 xor r28,r28,r5
750 or r29,r3,r29 /* VA */
751 xor r28,r28,r0 /* hash */ 774 xor r28,r28,r0 /* hash */
752 775
753 /* Convert linux PTE bits into HW equivalents */ 776 /* Convert linux PTE bits into HW equivalents */
@@ -806,7 +829,7 @@ ht64_insert_pte:
806 829
807 /* Call ppc_md.hpte_insert */ 830 /* Call ppc_md.hpte_insert */
808 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
809 mr r4,r29 /* Retrieve va */ 832 mr r4,r29 /* Retrieve vpn */
810 li r7,0 /* !bolted, !secondary */ 833 li r7,0 /* !bolted, !secondary */
811 li r8,MMU_PAGE_64K 834 li r8,MMU_PAGE_64K
812 ld r9,STK_PARAM(R9)(r1) /* segment size */ 835 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
829 852
830 /* Call ppc_md.hpte_insert */ 853 /* Call ppc_md.hpte_insert */
831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 854 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
832 mr r4,r29 /* Retrieve va */ 855 mr r4,r29 /* Retrieve vpn */
833 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 856 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
834 li r8,MMU_PAGE_64K 857 li r8,MMU_PAGE_64K
835 ld r9,STK_PARAM(R9)(r1) /* segment size */ 858 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -899,7 +922,7 @@ ht64_modify_pte:
899 add r3,r0,r3 /* add slot idx */ 922 add r3,r0,r3 /* add slot idx */
900 923
901 /* Call ppc_md.hpte_updatepp */ 924 /* Call ppc_md.hpte_updatepp */
902 mr r5,r29 /* va */ 925 mr r5,r29 /* vpn */
903 li r6,MMU_PAGE_64K 926 li r6,MMU_PAGE_64K
904 ld r7,STK_PARAM(R9)(r1) /* segment size */ 927 ld r7,STK_PARAM(R9)(r1) /* segment size */
905 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 928 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index f21e8ce8db33..a4a1c728f269 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,22 +39,35 @@
39 39
40DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long va, int psize, int ssize) 42static inline void __tlbie(unsigned long vpn, int psize, int ssize)
43{ 43{
44 unsigned long va;
44 unsigned int penc; 45 unsigned int penc;
45 46
46 /* clear top 16 bits, non SLS segment */ 47 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page
49 * With vpn we ignore the lower VPN_SHIFT bits already.
50 * And top two bits are already ignored because we can
51 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
52 * of 12.
53 */
54 va = vpn << VPN_SHIFT;
55 /*
56 * clear top 16 bits of 64bit va, non SLS segment
57 * Older versions of the architecture (2.02 and earler) require the
58 * masking of the top 16 bits.
59 */
47 va &= ~(0xffffULL << 48); 60 va &= ~(0xffffULL << 48);
48 61
49 switch (psize) { 62 switch (psize) {
50 case MMU_PAGE_4K: 63 case MMU_PAGE_4K:
51 va &= ~0xffful;
52 va |= ssize << 8; 64 va |= ssize << 8;
53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 65 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 66 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
55 : "memory"); 67 : "memory");
56 break; 68 break;
57 default: 69 default:
70 /* We need 14 to 14 + i bits of va */
58 penc = mmu_psize_defs[psize].penc; 71 penc = mmu_psize_defs[psize].penc;
59 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
60 va |= penc << 12; 73 va |= penc << 12;
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
67 } 80 }
68} 81}
69 82
70static inline void __tlbiel(unsigned long va, int psize, int ssize) 83static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
71{ 84{
85 unsigned long va;
72 unsigned int penc; 86 unsigned int penc;
73 87
74 /* clear top 16 bits, non SLS segment */ 88 /* VPN_SHIFT can be atmost 12 */
89 va = vpn << VPN_SHIFT;
90 /*
91 * clear top 16 bits of 64 bit va, non SLS segment
92 * Older versions of the architecture (2.02 and earler) require the
93 * masking of the top 16 bits.
94 */
75 va &= ~(0xffffULL << 48); 95 va &= ~(0xffffULL << 48);
76 96
77 switch (psize) { 97 switch (psize) {
78 case MMU_PAGE_4K: 98 case MMU_PAGE_4K:
79 va &= ~0xffful;
80 va |= ssize << 8; 99 va |= ssize << 8;
81 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 100 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
82 : : "r"(va) : "memory"); 101 : : "r"(va) : "memory");
83 break; 102 break;
84 default: 103 default:
104 /* We need 14 to 14 + i bits of va */
85 penc = mmu_psize_defs[psize].penc; 105 penc = mmu_psize_defs[psize].penc;
86 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
87 va |= penc << 12; 107 va |= penc << 12;
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
94 114
95} 115}
96 116
97static inline void tlbie(unsigned long va, int psize, int ssize, int local) 117static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
98{ 118{
99 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
100 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
105 raw_spin_lock(&native_tlbie_lock); 125 raw_spin_lock(&native_tlbie_lock);
106 asm volatile("ptesync": : :"memory"); 126 asm volatile("ptesync": : :"memory");
107 if (use_local) { 127 if (use_local) {
108 __tlbiel(va, psize, ssize); 128 __tlbiel(vpn, psize, ssize);
109 asm volatile("ptesync": : :"memory"); 129 asm volatile("ptesync": : :"memory");
110 } else { 130 } else {
111 __tlbie(va, psize, ssize); 131 __tlbie(vpn, psize, ssize);
112 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 132 asm volatile("eieio; tlbsync; ptesync": : :"memory");
113 } 133 }
114 if (lock_tlbie && !use_local) 134 if (lock_tlbie && !use_local)
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
134 clear_bit_unlock(HPTE_LOCK_BIT, word); 154 clear_bit_unlock(HPTE_LOCK_BIT, word);
135} 155}
136 156
137static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 157static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
138 unsigned long pa, unsigned long rflags, 158 unsigned long pa, unsigned long rflags,
139 unsigned long vflags, int psize, int ssize) 159 unsigned long vflags, int psize, int ssize)
140{ 160{
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
143 int i; 163 int i;
144 164
145 if (!(vflags & HPTE_V_BOLTED)) { 165 if (!(vflags & HPTE_V_BOLTED)) {
146 DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," 166 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
147 " rflags=%lx, vflags=%lx, psize=%d)\n", 167 " rflags=%lx, vflags=%lx, psize=%d)\n",
148 hpte_group, va, pa, rflags, vflags, psize); 168 hpte_group, vpn, pa, rflags, vflags, psize);
149 } 169 }
150 170
151 for (i = 0; i < HPTES_PER_GROUP; i++) { 171 for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
163 if (i == HPTES_PER_GROUP) 183 if (i == HPTES_PER_GROUP)
164 return -1; 184 return -1;
165 185
166 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
167 hpte_r = hpte_encode_r(pa, psize) | rflags; 187 hpte_r = hpte_encode_r(pa, psize) | rflags;
168 188
169 if (!(vflags & HPTE_V_BOLTED)) { 189 if (!(vflags & HPTE_V_BOLTED)) {
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
225} 245}
226 246
227static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 247static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
228 unsigned long va, int psize, int ssize, 248 unsigned long vpn, int psize, int ssize,
229 int local) 249 int local)
230{ 250{
231 struct hash_pte *hptep = htab_address + slot; 251 struct hash_pte *hptep = htab_address + slot;
232 unsigned long hpte_v, want_v; 252 unsigned long hpte_v, want_v;
233 int ret = 0; 253 int ret = 0;
234 254
235 want_v = hpte_encode_v(va, psize, ssize); 255 want_v = hpte_encode_v(vpn, psize, ssize);
236 256
237 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 257 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
238 va, want_v & HPTE_V_AVPN, slot, newpp); 258 vpn, want_v & HPTE_V_AVPN, slot, newpp);
239 259
240 native_lock_hpte(hptep); 260 native_lock_hpte(hptep);
241 261
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
254 native_unlock_hpte(hptep); 274 native_unlock_hpte(hptep);
255 275
256 /* Ensure it is out of the tlb too. */ 276 /* Ensure it is out of the tlb too. */
257 tlbie(va, psize, ssize, local); 277 tlbie(vpn, psize, ssize, local);
258 278
259 return ret; 279 return ret;
260} 280}
261 281
262static long native_hpte_find(unsigned long va, int psize, int ssize) 282static long native_hpte_find(unsigned long vpn, int psize, int ssize)
263{ 283{
264 struct hash_pte *hptep; 284 struct hash_pte *hptep;
265 unsigned long hash; 285 unsigned long hash;
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
267 long slot; 287 long slot;
268 unsigned long want_v, hpte_v; 288 unsigned long want_v, hpte_v;
269 289
270 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 290 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
271 want_v = hpte_encode_v(va, psize, ssize); 291 want_v = hpte_encode_v(vpn, psize, ssize);
272 292
273 /* Bolted mappings are only ever in the primary group */ 293 /* Bolted mappings are only ever in the primary group */
274 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 294 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
295static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 315static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
296 int psize, int ssize) 316 int psize, int ssize)
297{ 317{
298 unsigned long vsid, va; 318 unsigned long vpn;
319 unsigned long vsid;
299 long slot; 320 long slot;
300 struct hash_pte *hptep; 321 struct hash_pte *hptep;
301 322
302 vsid = get_kernel_vsid(ea, ssize); 323 vsid = get_kernel_vsid(ea, ssize);
303 va = hpt_va(ea, vsid, ssize); 324 vpn = hpt_vpn(ea, vsid, ssize);
304 325
305 slot = native_hpte_find(va, psize, ssize); 326 slot = native_hpte_find(vpn, psize, ssize);
306 if (slot == -1) 327 if (slot == -1)
307 panic("could not find page to bolt\n"); 328 panic("could not find page to bolt\n");
308 hptep = htab_address + slot; 329 hptep = htab_address + slot;
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
312 (newpp & (HPTE_R_PP | HPTE_R_N)); 333 (newpp & (HPTE_R_PP | HPTE_R_N));
313 334
314 /* Ensure it is out of the tlb too. */ 335 /* Ensure it is out of the tlb too. */
315 tlbie(va, psize, ssize, 0); 336 tlbie(vpn, psize, ssize, 0);
316} 337}
317 338
318static void native_hpte_invalidate(unsigned long slot, unsigned long va, 339static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
319 int psize, int ssize, int local) 340 int psize, int ssize, int local)
320{ 341{
321 struct hash_pte *hptep = htab_address + slot; 342 struct hash_pte *hptep = htab_address + slot;
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
325 346
326 local_irq_save(flags); 347 local_irq_save(flags);
327 348
328 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 349 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
329 350
330 want_v = hpte_encode_v(va, psize, ssize); 351 want_v = hpte_encode_v(vpn, psize, ssize);
331 native_lock_hpte(hptep); 352 native_lock_hpte(hptep);
332 hpte_v = hptep->v; 353 hpte_v = hptep->v;
333 354
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
339 hptep->v = 0; 360 hptep->v = 0;
340 361
341 /* Invalidate the TLB */ 362 /* Invalidate the TLB */
342 tlbie(va, psize, ssize, local); 363 tlbie(vpn, psize, ssize, local);
343 364
344 local_irq_restore(flags); 365 local_irq_restore(flags);
345} 366}
@@ -349,11 +370,12 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
349#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 370#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
350 371
351static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 372static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
352 int *psize, int *ssize, unsigned long *va) 373 int *psize, int *ssize, unsigned long *vpn)
353{ 374{
375 unsigned long avpn, pteg, vpi;
354 unsigned long hpte_r = hpte->r; 376 unsigned long hpte_r = hpte->r;
355 unsigned long hpte_v = hpte->v; 377 unsigned long hpte_v = hpte->v;
356 unsigned long avpn; 378 unsigned long vsid, seg_off;
357 int i, size, shift, penc; 379 int i, size, shift, penc;
358 380
359 if (!(hpte_v & HPTE_V_LARGE)) 381 if (!(hpte_v & HPTE_V_LARGE))
@@ -380,32 +402,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
380 } 402 }
381 403
382 /* This works for all page sizes, and for 256M and 1T segments */ 404 /* This works for all page sizes, and for 256M and 1T segments */
405 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
383 shift = mmu_psize_defs[size].shift; 406 shift = mmu_psize_defs[size].shift;
384 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
385
386 if (shift < 23) {
387 unsigned long vpi, vsid, pteg;
388 407
389 pteg = slot / HPTES_PER_GROUP; 408 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
390 if (hpte_v & HPTE_V_SECONDARY) 409 pteg = slot / HPTES_PER_GROUP;
391 pteg = ~pteg; 410 if (hpte_v & HPTE_V_SECONDARY)
392 switch (hpte_v >> HPTE_V_SSIZE_SHIFT) { 411 pteg = ~pteg;
393 case MMU_SEGSIZE_256M: 412
394 vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; 413 switch (*ssize) {
395 break; 414 case MMU_SEGSIZE_256M:
396 case MMU_SEGSIZE_1T: 415 /* We only have 28 - 23 bits of seg_off in avpn */
397 vsid = avpn >> 40; 416 seg_off = (avpn & 0x1f) << 23;
417 vsid = avpn >> 5;
418 /* We can find more bits from the pteg value */
419 if (shift < 23) {
420 vpi = (vsid ^ pteg) & htab_hash_mask;
421 seg_off |= vpi << shift;
422 }
423 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
424 case MMU_SEGSIZE_1T:
425 /* We only have 40 - 23 bits of seg_off in avpn */
426 seg_off = (avpn & 0x1ffff) << 23;
427 vsid = avpn >> 17;
428 if (shift < 23) {
398 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 429 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
399 break; 430 seg_off |= vpi << shift;
400 default:
401 avpn = vpi = size = 0;
402 } 431 }
403 avpn |= (vpi << mmu_psize_defs[size].shift); 432 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
433 default:
434 *vpn = size = 0;
404 } 435 }
405
406 *va = avpn;
407 *psize = size; 436 *psize = size;
408 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
409} 437}
410 438
411/* 439/*
@@ -418,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
418 */ 446 */
419static void native_hpte_clear(void) 447static void native_hpte_clear(void)
420{ 448{
449 unsigned long vpn = 0;
421 unsigned long slot, slots, flags; 450 unsigned long slot, slots, flags;
422 struct hash_pte *hptep = htab_address; 451 struct hash_pte *hptep = htab_address;
423 unsigned long hpte_v, va; 452 unsigned long hpte_v;
424 unsigned long pteg_count; 453 unsigned long pteg_count;
425 int psize, ssize; 454 int psize, ssize;
426 455
@@ -448,9 +477,9 @@ static void native_hpte_clear(void)
448 * already hold the native_tlbie_lock. 477 * already hold the native_tlbie_lock.
449 */ 478 */
450 if (hpte_v & HPTE_V_VALID) { 479 if (hpte_v & HPTE_V_VALID) {
451 hpte_decode(hptep, slot, &psize, &ssize, &va); 480 hpte_decode(hptep, slot, &psize, &ssize, &vpn);
452 hptep->v = 0; 481 hptep->v = 0;
453 __tlbie(va, psize, ssize); 482 __tlbie(vpn, psize, ssize);
454 } 483 }
455 } 484 }
456 485
@@ -465,7 +494,8 @@ static void native_hpte_clear(void)
465 */ 494 */
466static void native_flush_hash_range(unsigned long number, int local) 495static void native_flush_hash_range(unsigned long number, int local)
467{ 496{
468 unsigned long va, hash, index, hidx, shift, slot; 497 unsigned long vpn;
498 unsigned long hash, index, hidx, shift, slot;
469 struct hash_pte *hptep; 499 struct hash_pte *hptep;
470 unsigned long hpte_v; 500 unsigned long hpte_v;
471 unsigned long want_v; 501 unsigned long want_v;
@@ -479,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
479 local_irq_save(flags); 509 local_irq_save(flags);
480 510
481 for (i = 0; i < number; i++) { 511 for (i = 0; i < number; i++) {
482 va = batch->vaddr[i]; 512 vpn = batch->vpn[i];
483 pte = batch->pte[i]; 513 pte = batch->pte[i];
484 514
485 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 515 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
486 hash = hpt_hash(va, shift, ssize); 516 hash = hpt_hash(vpn, shift, ssize);
487 hidx = __rpte_to_hidx(pte, index); 517 hidx = __rpte_to_hidx(pte, index);
488 if (hidx & _PTEIDX_SECONDARY) 518 if (hidx & _PTEIDX_SECONDARY)
489 hash = ~hash; 519 hash = ~hash;
490 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 520 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
491 slot += hidx & _PTEIDX_GROUP_IX; 521 slot += hidx & _PTEIDX_GROUP_IX;
492 hptep = htab_address + slot; 522 hptep = htab_address + slot;
493 want_v = hpte_encode_v(va, psize, ssize); 523 want_v = hpte_encode_v(vpn, psize, ssize);
494 native_lock_hpte(hptep); 524 native_lock_hpte(hptep);
495 hpte_v = hptep->v; 525 hpte_v = hptep->v;
496 if (!HPTE_V_COMPARE(hpte_v, want_v) || 526 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -505,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
505 mmu_psize_defs[psize].tlbiel && local) { 535 mmu_psize_defs[psize].tlbiel && local) {
506 asm volatile("ptesync":::"memory"); 536 asm volatile("ptesync":::"memory");
507 for (i = 0; i < number; i++) { 537 for (i = 0; i < number; i++) {
508 va = batch->vaddr[i]; 538 vpn = batch->vpn[i];
509 pte = batch->pte[i]; 539 pte = batch->pte[i];
510 540
511 pte_iterate_hashed_subpages(pte, psize, va, index, 541 pte_iterate_hashed_subpages(pte, psize,
512 shift) { 542 vpn, index, shift) {
513 __tlbiel(va, psize, ssize); 543 __tlbiel(vpn, psize, ssize);
514 } pte_iterate_hashed_end(); 544 } pte_iterate_hashed_end();
515 } 545 }
516 asm volatile("ptesync":::"memory"); 546 asm volatile("ptesync":::"memory");
@@ -522,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
522 552
523 asm volatile("ptesync":::"memory"); 553 asm volatile("ptesync":::"memory");
524 for (i = 0; i < number; i++) { 554 for (i = 0; i < number; i++) {
525 va = batch->vaddr[i]; 555 vpn = batch->vpn[i];
526 pte = batch->pte[i]; 556 pte = batch->pte[i];
527 557
528 pte_iterate_hashed_subpages(pte, psize, va, index, 558 pte_iterate_hashed_subpages(pte, psize,
529 shift) { 559 vpn, index, shift) {
530 __tlbie(va, psize, ssize); 560 __tlbie(vpn, psize, ssize);
531 } pte_iterate_hashed_end(); 561 } pte_iterate_hashed_end();
532 } 562 }
533 asm volatile("eieio; tlbsync; ptesync":::"memory"); 563 asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ba45739bdfe8..3a292be2e079 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -191,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
191 vaddr += step, paddr += step) { 191 vaddr += step, paddr += step) {
192 unsigned long hash, hpteg; 192 unsigned long hash, hpteg;
193 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
194 unsigned long va = hpt_va(vaddr, vsid, ssize); 194 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
195 unsigned long tprot = prot; 195 unsigned long tprot = prot;
196 196
197 /* Make kernel text executable */ 197 /* Make kernel text executable */
198 if (overlaps_kernel_text(vaddr, vaddr + step)) 198 if (overlaps_kernel_text(vaddr, vaddr + step))
199 tprot &= ~HPTE_R_N; 199 tprot &= ~HPTE_R_N;
200 200
201 hash = hpt_hash(va, shift, ssize); 201 hash = hpt_hash(vpn, shift, ssize);
202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
203 203
204 BUG_ON(!ppc_md.hpte_insert); 204 BUG_ON(!ppc_md.hpte_insert);
205 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, 205 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
206 HPTE_V_BOLTED, psize, ssize); 206 HPTE_V_BOLTED, psize, ssize);
207 207
208 if (ret < 0) 208 if (ret < 0)
@@ -803,16 +803,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
803#ifdef CONFIG_PPC_MM_SLICES 803#ifdef CONFIG_PPC_MM_SLICES
804unsigned int get_paca_psize(unsigned long addr) 804unsigned int get_paca_psize(unsigned long addr)
805{ 805{
806 unsigned long index, slices; 806 u64 lpsizes;
807 unsigned char *hpsizes;
808 unsigned long index, mask_index;
807 809
808 if (addr < SLICE_LOW_TOP) { 810 if (addr < SLICE_LOW_TOP) {
809 slices = get_paca()->context.low_slices_psize; 811 lpsizes = get_paca()->context.low_slices_psize;
810 index = GET_LOW_SLICE_INDEX(addr); 812 index = GET_LOW_SLICE_INDEX(addr);
811 } else { 813 return (lpsizes >> (index * 4)) & 0xF;
812 slices = get_paca()->context.high_slices_psize;
813 index = GET_HIGH_SLICE_INDEX(addr);
814 } 814 }
815 return (slices >> (index * 4)) & 0xF; 815 hpsizes = get_paca()->context.high_slices_psize;
816 index = GET_HIGH_SLICE_INDEX(addr);
817 mask_index = index & 0x1;
818 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
816} 819}
817 820
818#else 821#else
@@ -1152,21 +1155,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1152/* WARNING: This is called from hash_low_64.S, if you change this prototype, 1155/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1153 * do not forget to update the assembly call site ! 1156 * do not forget to update the assembly call site !
1154 */ 1157 */
1155void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 1158void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1156 int local) 1159 int local)
1157{ 1160{
1158 unsigned long hash, index, shift, hidx, slot; 1161 unsigned long hash, index, shift, hidx, slot;
1159 1162
1160 DBG_LOW("flush_hash_page(va=%016lx)\n", va); 1163 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1161 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1164 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1162 hash = hpt_hash(va, shift, ssize); 1165 hash = hpt_hash(vpn, shift, ssize);
1163 hidx = __rpte_to_hidx(pte, index); 1166 hidx = __rpte_to_hidx(pte, index);
1164 if (hidx & _PTEIDX_SECONDARY) 1167 if (hidx & _PTEIDX_SECONDARY)
1165 hash = ~hash; 1168 hash = ~hash;
1166 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1169 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1167 slot += hidx & _PTEIDX_GROUP_IX; 1170 slot += hidx & _PTEIDX_GROUP_IX;
1168 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1171 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1169 ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1172 ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
1170 } pte_iterate_hashed_end(); 1173 } pte_iterate_hashed_end();
1171} 1174}
1172 1175
@@ -1180,7 +1183,7 @@ void flush_hash_range(unsigned long number, int local)
1180 &__get_cpu_var(ppc64_tlb_batch); 1183 &__get_cpu_var(ppc64_tlb_batch);
1181 1184
1182 for (i = 0; i < number; i++) 1185 for (i = 0; i < number; i++)
1183 flush_hash_page(batch->vaddr[i], batch->pte[i], 1186 flush_hash_page(batch->vpn[i], batch->pte[i],
1184 batch->psize, batch->ssize, local); 1187 batch->psize, batch->ssize, local);
1185 } 1188 }
1186} 1189}
@@ -1207,14 +1210,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1207{ 1210{
1208 unsigned long hash, hpteg; 1211 unsigned long hash, hpteg;
1209 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1212 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1210 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1213 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1211 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1214 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1212 int ret; 1215 int ret;
1213 1216
1214 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1217 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1215 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1218 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1216 1219
1217 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 1220 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1218 mode, HPTE_V_BOLTED, 1221 mode, HPTE_V_BOLTED,
1219 mmu_linear_psize, mmu_kernel_ssize); 1222 mmu_linear_psize, mmu_kernel_ssize);
1220 BUG_ON (ret < 0); 1223 BUG_ON (ret < 0);
@@ -1228,9 +1231,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1228{ 1231{
1229 unsigned long hash, hidx, slot; 1232 unsigned long hash, hidx, slot;
1230 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1233 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1231 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1234 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1232 1235
1233 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1236 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1234 spin_lock(&linear_map_hash_lock); 1237 spin_lock(&linear_map_hash_lock);
1235 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1238 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1236 hidx = linear_map_hash_slots[lmi] & 0x7f; 1239 hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1240,7 +1243,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1240 hash = ~hash; 1243 hash = ~hash;
1241 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1244 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1242 slot += hidx & _PTEIDX_GROUP_IX; 1245 slot += hidx & _PTEIDX_GROUP_IX;
1243 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); 1246 ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
1244} 1247}
1245 1248
1246void kernel_map_pages(struct page *page, int numpages, int enable) 1249void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cc5c273086cf..cecad348f604 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
18 pte_t *ptep, unsigned long trap, int local, int ssize, 18 pte_t *ptep, unsigned long trap, int local, int ssize,
19 unsigned int shift, unsigned int mmu_psize) 19 unsigned int shift, unsigned int mmu_psize)
20{ 20{
21 unsigned long vpn;
21 unsigned long old_pte, new_pte; 22 unsigned long old_pte, new_pte;
22 unsigned long va, rflags, pa, sz; 23 unsigned long rflags, pa, sz;
23 long slot; 24 long slot;
24 25
25 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
26 27
27 /* Search the Linux page table for a match with va */ 28 /* Search the Linux page table for a match with va */
28 va = hpt_va(ea, vsid, ssize); 29 vpn = hpt_vpn(ea, vsid, ssize);
29 30
30 /* At this point, we have a pte (old_pte) which can be used to build 31 /* At this point, we have a pte (old_pte) which can be used to build
31 * or update an HPTE. There are 2 cases: 32 * or update an HPTE. There are 2 cases:
@@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
69 /* There MIGHT be an HPTE for this pte */ 70 /* There MIGHT be an HPTE for this pte */
70 unsigned long hash, slot; 71 unsigned long hash, slot;
71 72
72 hash = hpt_hash(va, shift, ssize); 73 hash = hpt_hash(vpn, shift, ssize);
73 if (old_pte & _PAGE_F_SECOND) 74 if (old_pte & _PAGE_F_SECOND)
74 hash = ~hash; 75 hash = ~hash;
75 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 76 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
76 slot += (old_pte & _PAGE_F_GIX) >> 12; 77 slot += (old_pte & _PAGE_F_GIX) >> 12;
77 78
78 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, 79 if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
79 ssize, local) == -1) 80 ssize, local) == -1)
80 old_pte &= ~_PAGE_HPTEFLAGS; 81 old_pte &= ~_PAGE_HPTEFLAGS;
81 } 82 }
82 83
83 if (likely(!(old_pte & _PAGE_HASHPTE))) { 84 if (likely(!(old_pte & _PAGE_HASHPTE))) {
84 unsigned long hash = hpt_hash(va, shift, ssize); 85 unsigned long hash = hpt_hash(vpn, shift, ssize);
85 unsigned long hpte_group; 86 unsigned long hpte_group;
86 87
87 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 88 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -101,14 +102,14 @@ repeat:
101 _PAGE_COHERENT | _PAGE_GUARDED)); 102 _PAGE_COHERENT | _PAGE_GUARDED));
102 103
103 /* Insert into the hash table, primary slot */ 104 /* Insert into the hash table, primary slot */
104 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 105 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
105 mmu_psize, ssize); 106 mmu_psize, ssize);
106 107
107 /* Primary is full, try the secondary */ 108 /* Primary is full, try the secondary */
108 if (unlikely(slot == -1)) { 109 if (unlikely(slot == -1)) {
109 hpte_group = ((~hash & htab_hash_mask) * 110 hpte_group = ((~hash & htab_hash_mask) *
110 HPTES_PER_GROUP) & ~0x7UL; 111 HPTES_PER_GROUP) & ~0x7UL;
111 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 112 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
112 HPTE_V_SECONDARY, 113 HPTE_V_SECONDARY,
113 mmu_psize, ssize); 114 mmu_psize, ssize);
114 if (slot == -1) { 115 if (slot == -1) {
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40677aa0190e..40bc5b0ace54 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -30,11 +30,13 @@ static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/* 32/*
33 * The proto-VSID space has 2^35 - 1 segments available for user mappings. 33 * 256MB segment
34 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, 34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). 35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
36 */ 38 */
37#define MAX_CONTEXT ((1UL << 19) - 1) 39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
38 40
39int __init_new_context(void) 41int __init_new_context(void)
40{ 42{
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 297d49547ea8..e212a271c7a4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -55,8 +55,18 @@
55 55
56#include "mmu_decl.h" 56#include "mmu_decl.h"
57 57
58unsigned long ioremap_bot = IOREMAP_BASE; 58/* Some sanity checking */
59#if TASK_SIZE_USER64 > PGTABLE_RANGE
60#error TASK_SIZE_USER64 exceeds pagetable range
61#endif
62
63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif
67#endif
59 68
69unsigned long ioremap_bot = IOREMAP_BASE;
60 70
61#ifdef CONFIG_PPC_MMU_NOHASH 71#ifdef CONFIG_PPC_MMU_NOHASH
62static void *early_alloc_pgtable(unsigned long size) 72static void *early_alloc_pgtable(unsigned long size)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index b9ee79ce2200..1a16ca227757 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -56,6 +56,12 @@ _GLOBAL(slb_allocate_realmode)
56 */ 56 */
57_GLOBAL(slb_miss_kernel_load_linear) 57_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 58 li r11,0
59 li r9,0x1
60 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do
62 * the necessary adjustment
63 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
59BEGIN_FTR_SECTION 65BEGIN_FTR_SECTION
60 b slb_finish_load 66 b slb_finish_load
61END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -85,6 +91,12 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
85 _GLOBAL(slb_miss_kernel_load_io) 91 _GLOBAL(slb_miss_kernel_load_io)
86 li r11,0 92 li r11,0
876: 936:
94 li r9,0x1
95 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do
97 * the necessary adjustment
98 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
88BEGIN_FTR_SECTION 100BEGIN_FTR_SECTION
89 b slb_finish_load 101 b slb_finish_load
90END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -108,17 +120,31 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
108 * between 4k and 64k standard page size 120 * between 4k and 64k standard page size
109 */ 121 */
110#ifdef CONFIG_PPC_MM_SLICES 122#ifdef CONFIG_PPC_MM_SLICES
123 /* r10 have esid */
111 cmpldi r10,16 124 cmpldi r10,16
112 125 /* below SLICE_LOW_TOP */
113 /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
114 ld r9,PACALOWSLICESPSIZE(r13)
115 sldi r11,r10,2
116 blt 5f 126 blt 5f
117 ld r9,PACAHIGHSLICEPSIZE(r13) 127 /*
118 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2) 128 * Handle hpsizes,
119 andi. r11,r11,0x3c 129 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
130 */
131 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
132 addi r9,r11,PACAHIGHSLICEPSIZE
133 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
134 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
135 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
136 b 6f
120 137
1215: /* Extract the psize and multiply to get an array offset */ 1385:
139 /*
140 * Handle lpsizes
141 * r9 is get_paca()->context.low_slices_psize, r11 is index
142 */
143 ld r9,PACALOWSLICESPSIZE(r13)
144 mr r11,r10
1456:
146 sldi r11,r11,2 /* index * 4 */
147 /* Extract the psize and multiply to get an array offset */
122 srd r9,r9,r11 148 srd r9,r9,r11
123 andi. r9,r9,0xf 149 andi. r9,r9,0xf
124 mulli r9,r9,MMUPSIZEDEFSIZE 150 mulli r9,r9,MMUPSIZEDEFSIZE
@@ -209,7 +235,11 @@ _GLOBAL(slb_allocate_user)
209 */ 235 */
210slb_finish_load: 236slb_finish_load:
211 ASM_VSID_SCRAMBLE(r10,r9,256M) 237 ASM_VSID_SCRAMBLE(r10,r9,256M)
212 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 238 /*
239 * bits above VSID_BITS_256M need to be ignored from r10
240 * also combine VSID and flags
241 */
242 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
213 243
214 /* r3 = EA, r11 = VSID data */ 244 /* r3 = EA, r11 = VSID data */
215 /* 245 /*
@@ -252,10 +282,10 @@ _GLOBAL(slb_compare_rr_to_size)
252 bge 1f 282 bge 1f
253 283
254 /* still room in the slb cache */ 284 /* still room in the slb cache */
255 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ 285 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
256 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ 286 srdi r10,r10,28 /* get the 36 bits of the ESID */
257 add r11,r11,r13 /* r11 = (u16 *)paca + offset */ 287 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
258 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ 288 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
259 addi r3,r3,1 /* offset++ */ 289 addi r3,r3,1 /* offset++ */
260 b 2f 290 b 2f
2611: /* offset >= SLB_CACHE_ENTRIES */ 2911: /* offset >= SLB_CACHE_ENTRIES */
@@ -273,7 +303,11 @@ _GLOBAL(slb_compare_rr_to_size)
273slb_finish_load_1T: 303slb_finish_load_1T:
274 srdi r10,r10,40-28 /* get 1T ESID */ 304 srdi r10,r10,40-28 /* get 1T ESID */
275 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
276 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10
308 * also combine VSID and flags
309 */
310 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
277 li r10,MMU_SEGSIZE_1T 311 li r10,MMU_SEGSIZE_1T
278 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ 312 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
279 313
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 73709f7ce92c..5829d2a950d4 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -34,6 +34,11 @@
34#include <asm/mmu.h> 34#include <asm/mmu.h>
35#include <asm/spu.h> 35#include <asm/spu.h>
36 36
37/* some sanity checks */
38#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
39#error PGTABLE_RANGE exceeds slice_mask high_slices size
40#endif
41
37static DEFINE_SPINLOCK(slice_convert_lock); 42static DEFINE_SPINLOCK(slice_convert_lock);
38 43
39 44
@@ -42,7 +47,7 @@ int _slice_debug = 1;
42 47
43static void slice_print_mask(const char *label, struct slice_mask mask) 48static void slice_print_mask(const char *label, struct slice_mask mask)
44{ 49{
45 char *p, buf[16 + 3 + 16 + 1]; 50 char *p, buf[16 + 3 + 64 + 1];
46 int i; 51 int i;
47 52
48 if (!_slice_debug) 53 if (!_slice_debug)
@@ -54,7 +59,7 @@ static void slice_print_mask(const char *label, struct slice_mask mask)
54 *(p++) = '-'; 59 *(p++) = '-';
55 *(p++) = ' '; 60 *(p++) = ' ';
56 for (i = 0; i < SLICE_NUM_HIGH; i++) 61 for (i = 0; i < SLICE_NUM_HIGH; i++)
57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; 62 *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
58 *(p++) = 0; 63 *(p++) = 0;
59 64
60 printk(KERN_DEBUG "%s:%s\n", label, buf); 65 printk(KERN_DEBUG "%s:%s\n", label, buf);
@@ -84,8 +89,8 @@ static struct slice_mask slice_range_to_mask(unsigned long start,
84 } 89 }
85 90
86 if ((start + len) > SLICE_LOW_TOP) 91 if ((start + len) > SLICE_LOW_TOP)
87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) 92 ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start)); 93 - (1ul << GET_HIGH_SLICE_INDEX(start));
89 94
90 return ret; 95 return ret;
91} 96}
@@ -135,26 +140,31 @@ static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
135 140
136 for (i = 0; i < SLICE_NUM_HIGH; i++) 141 for (i = 0; i < SLICE_NUM_HIGH; i++)
137 if (!slice_high_has_vma(mm, i)) 142 if (!slice_high_has_vma(mm, i))
138 ret.high_slices |= 1u << i; 143 ret.high_slices |= 1ul << i;
139 144
140 return ret; 145 return ret;
141} 146}
142 147
143static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) 148static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
144{ 149{
150 unsigned char *hpsizes;
151 int index, mask_index;
145 struct slice_mask ret = { 0, 0 }; 152 struct slice_mask ret = { 0, 0 };
146 unsigned long i; 153 unsigned long i;
147 u64 psizes; 154 u64 lpsizes;
148 155
149 psizes = mm->context.low_slices_psize; 156 lpsizes = mm->context.low_slices_psize;
150 for (i = 0; i < SLICE_NUM_LOW; i++) 157 for (i = 0; i < SLICE_NUM_LOW; i++)
151 if (((psizes >> (i * 4)) & 0xf) == psize) 158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
152 ret.low_slices |= 1u << i; 159 ret.low_slices |= 1u << i;
153 160
154 psizes = mm->context.high_slices_psize; 161 hpsizes = mm->context.high_slices_psize;
155 for (i = 0; i < SLICE_NUM_HIGH; i++) 162 for (i = 0; i < SLICE_NUM_HIGH; i++) {
156 if (((psizes >> (i * 4)) & 0xf) == psize) 163 mask_index = i & 0x1;
157 ret.high_slices |= 1u << i; 164 index = i >> 1;
165 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
166 ret.high_slices |= 1ul << i;
167 }
158 168
159 return ret; 169 return ret;
160} 170}
@@ -183,8 +193,10 @@ static void slice_flush_segments(void *parm)
183 193
184static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) 194static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
185{ 195{
196 int index, mask_index;
186 /* Write the new slice psize bits */ 197 /* Write the new slice psize bits */
187 u64 lpsizes, hpsizes; 198 unsigned char *hpsizes;
199 u64 lpsizes;
188 unsigned long i, flags; 200 unsigned long i, flags;
189 201
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); 202 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
@@ -201,14 +213,18 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
201 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 213 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 (((unsigned long)psize) << (i * 4)); 214 (((unsigned long)psize) << (i * 4));
203 215
204 hpsizes = mm->context.high_slices_psize; 216 /* Assign the value back */
205 for (i = 0; i < SLICE_NUM_HIGH; i++)
206 if (mask.high_slices & (1u << i))
207 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 (((unsigned long)psize) << (i * 4));
209
210 mm->context.low_slices_psize = lpsizes; 217 mm->context.low_slices_psize = lpsizes;
211 mm->context.high_slices_psize = hpsizes; 218
219 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) {
221 mask_index = i & 0x1;
222 index = i >> 1;
223 if (mask.high_slices & (1ul << i))
224 hpsizes[index] = (hpsizes[index] &
225 ~(0xf << (mask_index * 4))) |
226 (((unsigned long)psize) << (mask_index * 4));
227 }
212 228
213 slice_dbg(" lsps=%lx, hsps=%lx\n", 229 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm->context.low_slices_psize, 230 mm->context.low_slices_psize,
@@ -587,18 +603,19 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
587 603
588unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 604unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
589{ 605{
590 u64 psizes; 606 unsigned char *hpsizes;
591 int index; 607 int index, mask_index;
592 608
593 if (addr < SLICE_LOW_TOP) { 609 if (addr < SLICE_LOW_TOP) {
594 psizes = mm->context.low_slices_psize; 610 u64 lpsizes;
611 lpsizes = mm->context.low_slices_psize;
595 index = GET_LOW_SLICE_INDEX(addr); 612 index = GET_LOW_SLICE_INDEX(addr);
596 } else { 613 return (lpsizes >> (index * 4)) & 0xf;
597 psizes = mm->context.high_slices_psize;
598 index = GET_HIGH_SLICE_INDEX(addr);
599 } 614 }
600 615 hpsizes = mm->context.high_slices_psize;
601 return (psizes >> (index * 4)) & 0xf; 616 index = GET_HIGH_SLICE_INDEX(addr);
617 mask_index = index & 0x1;
618 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
602} 619}
603EXPORT_SYMBOL_GPL(get_slice_psize); 620EXPORT_SYMBOL_GPL(get_slice_psize);
604 621
@@ -618,7 +635,9 @@ EXPORT_SYMBOL_GPL(get_slice_psize);
618 */ 635 */
619void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) 636void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
620{ 637{
621 unsigned long flags, lpsizes, hpsizes; 638 int index, mask_index;
639 unsigned char *hpsizes;
640 unsigned long flags, lpsizes;
622 unsigned int old_psize; 641 unsigned int old_psize;
623 int i; 642 int i;
624 643
@@ -639,15 +658,21 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
639 if (((lpsizes >> (i * 4)) & 0xf) == old_psize) 658 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
640 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 659 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
641 (((unsigned long)psize) << (i * 4)); 660 (((unsigned long)psize) << (i * 4));
661 /* Assign the value back */
662 mm->context.low_slices_psize = lpsizes;
642 663
643 hpsizes = mm->context.high_slices_psize; 664 hpsizes = mm->context.high_slices_psize;
644 for (i = 0; i < SLICE_NUM_HIGH; i++) 665 for (i = 0; i < SLICE_NUM_HIGH; i++) {
645 if (((hpsizes >> (i * 4)) & 0xf) == old_psize) 666 mask_index = i & 0x1;
646 hpsizes = (hpsizes & ~(0xful << (i * 4))) | 667 index = i >> 1;
647 (((unsigned long)psize) << (i * 4)); 668 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
669 hpsizes[index] = (hpsizes[index] &
670 ~(0xf << (mask_index * 4))) |
671 (((unsigned long)psize) << (mask_index * 4));
672 }
673
674
648 675
649 mm->context.low_slices_psize = lpsizes;
650 mm->context.high_slices_psize = hpsizes;
651 676
652 slice_dbg(" lsps=%lx, hsps=%lx\n", 677 slice_dbg(" lsps=%lx, hsps=%lx\n",
653 mm->context.low_slices_psize, 678 mm->context.low_slices_psize,
@@ -660,18 +685,27 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
660void slice_set_psize(struct mm_struct *mm, unsigned long address, 685void slice_set_psize(struct mm_struct *mm, unsigned long address,
661 unsigned int psize) 686 unsigned int psize)
662{ 687{
688 unsigned char *hpsizes;
663 unsigned long i, flags; 689 unsigned long i, flags;
664 u64 *p; 690 u64 *lpsizes;
665 691
666 spin_lock_irqsave(&slice_convert_lock, flags); 692 spin_lock_irqsave(&slice_convert_lock, flags);
667 if (address < SLICE_LOW_TOP) { 693 if (address < SLICE_LOW_TOP) {
668 i = GET_LOW_SLICE_INDEX(address); 694 i = GET_LOW_SLICE_INDEX(address);
669 p = &mm->context.low_slices_psize; 695 lpsizes = &mm->context.low_slices_psize;
696 *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
697 ((unsigned long) psize << (i * 4));
670 } else { 698 } else {
699 int index, mask_index;
671 i = GET_HIGH_SLICE_INDEX(address); 700 i = GET_HIGH_SLICE_INDEX(address);
672 p = &mm->context.high_slices_psize; 701 hpsizes = mm->context.high_slices_psize;
702 mask_index = i & 0x1;
703 index = i >> 1;
704 hpsizes[index] = (hpsizes[index] &
705 ~(0xf << (mask_index * 4))) |
706 (((unsigned long)psize) << (mask_index * 4));
673 } 707 }
674 *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4)); 708
675 spin_unlock_irqrestore(&slice_convert_lock, flags); 709 spin_unlock_irqrestore(&slice_convert_lock, flags);
676 710
677#ifdef CONFIG_SPU_BASE 711#ifdef CONFIG_SPU_BASE
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 31f18207970b..ae758b3ff72c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
42void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
44{ 44{
45 unsigned long vpn;
45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 46 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
46 unsigned long vsid, vaddr; 47 unsigned long vsid;
47 unsigned int psize; 48 unsigned int psize;
48 int ssize; 49 int ssize;
49 real_pte_t rpte; 50 real_pte_t rpte;
@@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
87 ssize = mmu_kernel_ssize; 88 ssize = mmu_kernel_ssize;
88 } 89 }
89 vaddr = hpt_va(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
90 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
91 92
92 /* 93 /*
@@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
96 * and decide to use local invalidates instead... 97 * and decide to use local invalidates instead...
97 */ 98 */
98 if (!batch->active) { 99 if (!batch->active) {
99 flush_hash_page(vaddr, rpte, psize, ssize, 0); 100 flush_hash_page(vpn, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch); 101 put_cpu_var(ppc64_tlb_batch);
101 return; 102 return;
102 } 103 }
@@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
122 batch->ssize = ssize; 123 batch->ssize = ssize;
123 } 124 }
124 batch->pte[i] = rpte; 125 batch->pte[i] = rpte;
125 batch->vaddr[i] = vaddr; 126 batch->vpn[i] = vpn;
126 batch->index = ++i; 127 batch->index = ++i;
127 if (i >= PPC64_TLB_BATCH_NR) 128 if (i >= PPC64_TLB_BATCH_NR)
128 __flush_tlb_pending(batch); 129 __flush_tlb_pending(batch);
@@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
146 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) 147 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
147 local = 1; 148 local = 1;
148 if (i == 1) 149 if (i == 1)
149 flush_hash_page(batch->vaddr[0], batch->pte[0], 150 flush_hash_page(batch->vpn[0], batch->pte[0],
150 batch->psize, batch->ssize, local); 151 batch->psize, batch->ssize, local);
151 else 152 else
152 flush_hash_range(i, local); 153 flush_hash_range(i, local);
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 943c9d39aa16..0f6f83988b3d 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
88} 88}
89 89
90static long beat_lpar_hpte_insert(unsigned long hpte_group, 90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long va, unsigned long pa, 91 unsigned long vpn, unsigned long pa,
92 unsigned long rflags, unsigned long vflags, 92 unsigned long rflags, unsigned long vflags,
93 int psize, int ssize) 93 int psize, int ssize)
94{ 94{
@@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
103 "rflags=%lx, vflags=%lx, psize=%d)\n", 103 "rflags=%lx, vflags=%lx, psize=%d)\n",
104 hpte_group, va, pa, rflags, vflags, psize); 104 hpte_group, va, pa, rflags, vflags, psize);
105 105
106 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 106 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
107 vflags | HPTE_V_VALID; 107 vflags | HPTE_V_VALID;
108 hpte_r = hpte_encode_r(pa, psize) | rflags; 108 hpte_r = hpte_encode_r(pa, psize) | rflags;
109 109
@@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void)
184 */ 184 */
185static long beat_lpar_hpte_updatepp(unsigned long slot, 185static long beat_lpar_hpte_updatepp(unsigned long slot,
186 unsigned long newpp, 186 unsigned long newpp,
187 unsigned long va, 187 unsigned long vpn,
188 int psize, int ssize, int local) 188 int psize, int ssize, int local)
189{ 189{
190 unsigned long lpar_rc; 190 unsigned long lpar_rc;
191 u64 dummy0, dummy1; 191 u64 dummy0, dummy1;
192 unsigned long want_v; 192 unsigned long want_v;
193 193
194 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 194 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
195 195
196 DBG_LOW(" update: " 196 DBG_LOW(" update: "
197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", 197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
220 return 0; 220 return 0;
221} 221}
222 222
223static long beat_lpar_hpte_find(unsigned long va, int psize) 223static long beat_lpar_hpte_find(unsigned long vpn, int psize)
224{ 224{
225 unsigned long hash; 225 unsigned long hash;
226 unsigned long i, j; 226 unsigned long i, j;
227 long slot; 227 long slot;
228 unsigned long want_v, hpte_v; 228 unsigned long want_v, hpte_v;
229 229
230 hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); 230 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
231 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 231 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
232 232
233 for (j = 0; j < 2; j++) { 233 for (j = 0; j < 2; j++) {
234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
255 unsigned long ea, 255 unsigned long ea,
256 int psize, int ssize) 256 int psize, int ssize)
257{ 257{
258 unsigned long lpar_rc, slot, vsid, va; 258 unsigned long vpn;
259 unsigned long lpar_rc, slot, vsid;
259 u64 dummy0, dummy1; 260 u64 dummy0, dummy1;
260 261
261 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
262 va = (vsid << 28) | (ea & 0x0fffffff); 263 vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
263 264
264 raw_spin_lock(&beat_htab_lock); 265 raw_spin_lock(&beat_htab_lock);
265 slot = beat_lpar_hpte_find(va, psize); 266 slot = beat_lpar_hpte_find(vpn, psize);
266 BUG_ON(slot == -1); 267 BUG_ON(slot == -1);
267 268
268 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, 269 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
272 BUG_ON(lpar_rc != 0); 273 BUG_ON(lpar_rc != 0);
273} 274}
274 275
275static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 276static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
276 int psize, int ssize, int local) 277 int psize, int ssize, int local)
277{ 278{
278 unsigned long want_v; 279 unsigned long want_v;
@@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
282 283
283 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 284 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
284 slot, va, psize, local); 285 slot, va, psize, local);
285 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 286 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
286 287
287 raw_spin_lock_irqsave(&beat_htab_lock, flags); 288 raw_spin_lock_irqsave(&beat_htab_lock, flags);
288 dummy1 = beat_lpar_hpte_getword0(slot); 289 dummy1 = beat_lpar_hpte_getword0(slot);
@@ -311,7 +312,7 @@ void __init hpte_init_beat(void)
311} 312}
312 313
313static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 314static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
314 unsigned long va, unsigned long pa, 315 unsigned long vpn, unsigned long pa,
315 unsigned long rflags, unsigned long vflags, 316 unsigned long rflags, unsigned long vflags,
316 int psize, int ssize) 317 int psize, int ssize)
317{ 318{
@@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
322 return -1; 323 return -1;
323 324
324 if (!(vflags & HPTE_V_BOLTED)) 325 if (!(vflags & HPTE_V_BOLTED))
325 DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 326 DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
326 "rflags=%lx, vflags=%lx, psize=%d)\n", 327 "rflags=%lx, vflags=%lx, psize=%d)\n",
327 hpte_group, va, pa, rflags, vflags, psize); 328 hpte_group, vpn, pa, rflags, vflags, psize);
328 329
329 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 330 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
330 vflags | HPTE_V_VALID; 331 vflags | HPTE_V_VALID;
331 hpte_r = hpte_encode_r(pa, psize) | rflags; 332 hpte_r = hpte_encode_r(pa, psize) | rflags;
332 333
@@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
364 */ 365 */
365static long beat_lpar_hpte_updatepp_v3(unsigned long slot, 366static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
366 unsigned long newpp, 367 unsigned long newpp,
367 unsigned long va, 368 unsigned long vpn,
368 int psize, int ssize, int local) 369 int psize, int ssize, int local)
369{ 370{
370 unsigned long lpar_rc; 371 unsigned long lpar_rc;
371 unsigned long want_v; 372 unsigned long want_v;
372 unsigned long pss; 373 unsigned long pss;
373 374
374 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 375 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
375 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 376 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
376 377
377 DBG_LOW(" update: " 378 DBG_LOW(" update: "
@@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
392 return 0; 393 return 0;
393} 394}
394 395
395static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, 396static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
396 int psize, int ssize, int local) 397 int psize, int ssize, int local)
397{ 398{
398 unsigned long want_v; 399 unsigned long want_v;
399 unsigned long lpar_rc; 400 unsigned long lpar_rc;
400 unsigned long pss; 401 unsigned long pss;
401 402
402 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 403 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
403 slot, va, psize, local); 404 slot, vpn, psize, local);
404 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 405 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
405 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 406 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
406 407
407 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); 408 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5e75dcfe51b9..471aa3ccd9fd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -34,14 +34,6 @@
34#include "powernv.h" 34#include "powernv.h"
35#include "pci.h" 35#include "pci.h"
36 36
37struct resource_wrap {
38 struct list_head link;
39 resource_size_t size;
40 resource_size_t align;
41 struct pci_dev *dev; /* Set if it's a device */
42 struct pci_bus *bus; /* Set if it's a bridge */
43};
44
45static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, 37static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
46 struct va_format *vaf) 38 struct va_format *vaf)
47{ 39{
@@ -77,273 +69,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
77define_pe_printk_level(pe_warn, KERN_WARNING); 69define_pe_printk_level(pe_warn, KERN_WARNING);
78define_pe_printk_level(pe_info, KERN_INFO); 70define_pe_printk_level(pe_info, KERN_INFO);
79 71
80
81/* Calculate resource usage & alignment requirement of a single
82 * device. This will also assign all resources within the device
83 * for a given type starting at 0 for the biggest one and then
84 * assigning in decreasing order of size.
85 */
86static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags,
87 resource_size_t *size,
88 resource_size_t *align)
89{
90 resource_size_t start;
91 struct resource *r;
92 int i;
93
94 pr_devel(" -> CDR %s\n", pci_name(dev));
95
96 *size = *align = 0;
97
98 /* Clear the resources out and mark them all unset */
99 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
100 r = &dev->resource[i];
101 if (!(r->flags & flags))
102 continue;
103 if (r->start) {
104 r->end -= r->start;
105 r->start = 0;
106 }
107 r->flags |= IORESOURCE_UNSET;
108 }
109
110 /* We currently keep all memory resources together, we
111 * will handle prefetch & 64-bit separately in the future
112 * but for now we stick everybody in M32
113 */
114 start = 0;
115 for (;;) {
116 resource_size_t max_size = 0;
117 int max_no = -1;
118
119 /* Find next biggest resource */
120 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
121 r = &dev->resource[i];
122 if (!(r->flags & IORESOURCE_UNSET) ||
123 !(r->flags & flags))
124 continue;
125 if (resource_size(r) > max_size) {
126 max_size = resource_size(r);
127 max_no = i;
128 }
129 }
130 if (max_no < 0)
131 break;
132 r = &dev->resource[max_no];
133 if (max_size > *align)
134 *align = max_size;
135 *size += max_size;
136 r->start = start;
137 start += max_size;
138 r->end = r->start + max_size - 1;
139 r->flags &= ~IORESOURCE_UNSET;
140 pr_devel(" -> R%d %016llx..%016llx\n",
141 max_no, r->start, r->end);
142 }
143 pr_devel(" <- CDR %s size=%llx align=%llx\n",
144 pci_name(dev), *size, *align);
145}
146
147/* Allocate a resource "wrap" for a given device or bridge and
148 * insert it at the right position in the sorted list
149 */
150static void __devinit pnv_ioda_add_wrap(struct list_head *list,
151 struct pci_bus *bus,
152 struct pci_dev *dev,
153 resource_size_t size,
154 resource_size_t align)
155{
156 struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL);
157
158 w->size = size;
159 w->align = align;
160 w->dev = dev;
161 w->bus = bus;
162
163 list_for_each_entry(w1, list, link) {
164 if (w1->align < align) {
165 list_add_tail(&w->link, &w1->link);
166 return;
167 }
168 }
169 list_add_tail(&w->link, list);
170}
171
172/* Offset device resources of a given type */
173static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev,
174 unsigned int flags,
175 resource_size_t offset)
176{
177 struct resource *r;
178 int i;
179
180 pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
181
182 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
183 r = &dev->resource[i];
184 if (r->flags & flags) {
185 dev->resource[i].start += offset;
186 dev->resource[i].end += offset;
187 }
188 }
189
190 pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
191}
192
193/* Offset bus resources (& all children) of a given type */
194static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
195 unsigned int flags,
196 resource_size_t offset)
197{
198 struct resource *r;
199 struct pci_dev *dev;
200 struct pci_bus *cbus;
201 int i;
202
203 pr_devel(" -> OBR %s [%x] +%016llx\n",
204 bus->self ? pci_name(bus->self) : "root", flags, offset);
205
206 pci_bus_for_each_resource(bus, r, i) {
207 if (r && (r->flags & flags)) {
208 r->start += offset;
209 r->end += offset;
210 }
211 }
212 list_for_each_entry(dev, &bus->devices, bus_list)
213 pnv_ioda_offset_dev(dev, flags, offset);
214 list_for_each_entry(cbus, &bus->children, node)
215 pnv_ioda_offset_bus(cbus, flags, offset);
216
217 pr_devel(" <- OBR %s [%x]\n",
218 bus->self ? pci_name(bus->self) : "root", flags);
219}
220
221/* This is the guts of our IODA resource allocation. This is called
222 * recursively for each bus in the system. It calculates all the
223 * necessary size and requirements for children and assign them
224 * resources such that:
225 *
226 * - Each function fits in it's own contiguous set of IO/M32
227 * segment
228 *
229 * - All segments behind a P2P bridge are contiguous and obey
230 * alignment constraints of those bridges
231 */
232static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
233 resource_size_t *size,
234 resource_size_t *align)
235{
236 struct pci_controller *hose = pci_bus_to_host(bus);
237 struct pnv_phb *phb = hose->private_data;
238 resource_size_t dev_size, dev_align, start;
239 resource_size_t min_align, min_balign;
240 struct pci_dev *cdev;
241 struct pci_bus *cbus;
242 struct list_head head;
243 struct resource_wrap *w;
244 unsigned int bres;
245
246 *size = *align = 0;
247
248 pr_devel("-> CBR %s [%x]\n",
249 bus->self ? pci_name(bus->self) : "root", flags);
250
251 /* Calculate alignment requirements based on the type
252 * of resource we are working on
253 */
254 if (flags & IORESOURCE_IO) {
255 bres = 0;
256 min_align = phb->ioda.io_segsize;
257 min_balign = 0x1000;
258 } else {
259 bres = 1;
260 min_align = phb->ioda.m32_segsize;
261 min_balign = 0x100000;
262 }
263
264 /* Gather all our children resources ordered by alignment */
265 INIT_LIST_HEAD(&head);
266
267 /* - Busses */
268 list_for_each_entry(cbus, &bus->children, node) {
269 pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align);
270 pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align);
271 }
272
273 /* - Devices */
274 list_for_each_entry(cdev, &bus->devices, bus_list) {
275 pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align);
276 /* Align them to segment size */
277 if (dev_align < min_align)
278 dev_align = min_align;
279 pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align);
280 }
281 if (list_empty(&head))
282 goto empty;
283
284 /* Now we can do two things: assign offsets to them within that
285 * level and get our total alignment & size requirements. The
286 * assignment algorithm is going to be uber-trivial for now, we
287 * can try to be smarter later at filling out holes.
288 */
289 if (bus->self) {
290 /* No offset for downstream bridges */
291 start = 0;
292 } else {
293 /* Offset from the root */
294 if (flags & IORESOURCE_IO)
295 /* Don't hand out IO 0 */
296 start = hose->io_resource.start + 0x1000;
297 else
298 start = hose->mem_resources[0].start;
299 }
300 while(!list_empty(&head)) {
301 w = list_first_entry(&head, struct resource_wrap, link);
302 list_del(&w->link);
303 if (w->size) {
304 if (start) {
305 start = ALIGN(start, w->align);
306 if (w->dev)
307 pnv_ioda_offset_dev(w->dev,flags,start);
308 else if (w->bus)
309 pnv_ioda_offset_bus(w->bus,flags,start);
310 }
311 if (w->align > *align)
312 *align = w->align;
313 }
314 start += w->size;
315 kfree(w);
316 }
317 *size = start;
318
319 /* Align and setup bridge resources */
320 *align = max_t(resource_size_t, *align,
321 max_t(resource_size_t, min_align, min_balign));
322 *size = ALIGN(*size,
323 max_t(resource_size_t, min_align, min_balign));
324 empty:
325 /* Only setup P2P's, not the PHB itself */
326 if (bus->self) {
327 struct resource *res = bus->resource[bres];
328
329 if (WARN_ON(res == NULL))
330 return;
331
332 /*
333 * FIXME: We should probably export and call
334 * pci_bridge_check_ranges() to properly re-initialize
335 * the PCI portion of the flags here, and to detect
336 * what the bridge actually supports.
337 */
338 res->start = 0;
339 res->flags = (*size) ? flags : 0;
340 res->end = (*size) ? (*size - 1) : 0;
341 }
342
343 pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
344 bus->self ? pci_name(bus->self) : "root", flags,*size,*align);
345}
346
347static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) 72static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
348{ 73{
349 struct device_node *np; 74 struct device_node *np;
@@ -354,172 +79,6 @@ static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
354 return PCI_DN(np); 79 return PCI_DN(np);
355} 80}
356 81
357static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
358{
359 struct pci_controller *hose = pci_bus_to_host(dev->bus);
360 struct pnv_phb *phb = hose->private_data;
361 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
362 unsigned int pe, i;
363 resource_size_t pos;
364 struct resource io_res;
365 struct resource m32_res;
366 struct pci_bus_region region;
367 int rc;
368
369 /* Anything not referenced in the device-tree gets PE#0 */
370 pe = pdn ? pdn->pe_number : 0;
371
372 /* Calculate the device min/max */
373 io_res.start = m32_res.start = (resource_size_t)-1;
374 io_res.end = m32_res.end = 0;
375 io_res.flags = IORESOURCE_IO;
376 m32_res.flags = IORESOURCE_MEM;
377
378 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
379 struct resource *r = NULL;
380 if (dev->resource[i].flags & IORESOURCE_IO)
381 r = &io_res;
382 if (dev->resource[i].flags & IORESOURCE_MEM)
383 r = &m32_res;
384 if (!r)
385 continue;
386 if (dev->resource[i].start < r->start)
387 r->start = dev->resource[i].start;
388 if (dev->resource[i].end > r->end)
389 r->end = dev->resource[i].end;
390 }
391
392 /* Setup IO segments */
393 if (io_res.start < io_res.end) {
394 pcibios_resource_to_bus(dev, &region, &io_res);
395 pos = region.start;
396 i = pos / phb->ioda.io_segsize;
397 while(i < phb->ioda.total_pe && pos <= region.end) {
398 if (phb->ioda.io_segmap[i]) {
399 pr_err("%s: Trying to use IO seg #%d which is"
400 " already used by PE# %d\n",
401 pci_name(dev), i,
402 phb->ioda.io_segmap[i]);
403 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
404 break;
405 }
406 phb->ioda.io_segmap[i] = pe;
407 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
408 OPAL_IO_WINDOW_TYPE,
409 0, i);
410 if (rc != OPAL_SUCCESS) {
411 pr_err("%s: OPAL error %d setting up mapping"
412 " for IO seg# %d\n",
413 pci_name(dev), rc, i);
414 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
415 break;
416 }
417 pos += phb->ioda.io_segsize;
418 i++;
419 };
420 }
421
422 /* Setup M32 segments */
423 if (m32_res.start < m32_res.end) {
424 pcibios_resource_to_bus(dev, &region, &m32_res);
425 pos = region.start;
426 i = pos / phb->ioda.m32_segsize;
427 while(i < phb->ioda.total_pe && pos <= region.end) {
428 if (phb->ioda.m32_segmap[i]) {
429 pr_err("%s: Trying to use M32 seg #%d which is"
430 " already used by PE# %d\n",
431 pci_name(dev), i,
432 phb->ioda.m32_segmap[i]);
433 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
434 break;
435 }
436 phb->ioda.m32_segmap[i] = pe;
437 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
438 OPAL_M32_WINDOW_TYPE,
439 0, i);
440 if (rc != OPAL_SUCCESS) {
441 pr_err("%s: OPAL error %d setting up mapping"
442 " for M32 seg# %d\n",
443 pci_name(dev), rc, i);
444 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
445 break;
446 }
447 pos += phb->ioda.m32_segsize;
448 i++;
449 }
450 }
451}
452
453/* Check if a resource still fits in the total IO or M32 range
454 * for a given PHB
455 */
456static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose,
457 struct resource *r)
458{
459 struct resource *bounds;
460
461 if (r->flags & IORESOURCE_IO)
462 bounds = &hose->io_resource;
463 else if (r->flags & IORESOURCE_MEM)
464 bounds = &hose->mem_resources[0];
465 else
466 return 1;
467
468 if (r->start >= bounds->start && r->end <= bounds->end)
469 return 1;
470 r->flags = 0;
471 return 0;
472}
473
474static void __devinit pnv_ioda_update_resources(struct pci_bus *bus)
475{
476 struct pci_controller *hose = pci_bus_to_host(bus);
477 struct pci_bus *cbus;
478 struct pci_dev *cdev;
479 unsigned int i;
480
481 /* We used to clear all device enables here. However it looks like
482 * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers,
483 * and shoot fatal errors to the PHB which in turns fences itself
484 * and we can't recover from that ... yet. So for now, let's leave
485 * the enables as-is and hope for the best.
486 */
487
488 /* Check if bus resources fit in our IO or M32 range */
489 for (i = 0; bus->self && (i < 2); i++) {
490 struct resource *r = bus->resource[i];
491 if (r && !pnv_ioda_resource_fit(hose, r))
492 pr_err("%s: Bus %d resource %d disabled, no room\n",
493 pci_name(bus->self), bus->number, i);
494 }
495
496 /* Update self if it's not a PHB */
497 if (bus->self)
498 pci_setup_bridge(bus);
499
500 /* Update child devices */
501 list_for_each_entry(cdev, &bus->devices, bus_list) {
502 /* Check if resource fits, if not, disabled it */
503 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
504 struct resource *r = &cdev->resource[i];
505 if (!pnv_ioda_resource_fit(hose, r))
506 pr_err("%s: Resource %d disabled, no room\n",
507 pci_name(cdev), i);
508 }
509
510 /* Assign segments */
511 pnv_ioda_setup_pe_segments(cdev);
512
513 /* Update HW BARs */
514 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
515 pci_update_resource(cdev, i);
516 }
517
518 /* Update child busses */
519 list_for_each_entry(cbus, &bus->children, node)
520 pnv_ioda_update_resources(cbus);
521}
522
523static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) 82static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb)
524{ 83{
525 unsigned long pe; 84 unsigned long pe;
@@ -547,7 +106,7 @@ static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
547 * but in the meantime, we need to protect them to avoid warnings 106 * but in the meantime, we need to protect them to avoid warnings
548 */ 107 */
549#ifdef CONFIG_PCI_MSI 108#ifdef CONFIG_PCI_MSI
550static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev) 109static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
551{ 110{
552 struct pci_controller *hose = pci_bus_to_host(dev->bus); 111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
553 struct pnv_phb *phb = hose->private_data; 112 struct pnv_phb *phb = hose->private_data;
@@ -559,19 +118,6 @@ static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
559 return NULL; 118 return NULL;
560 return &phb->ioda.pe_array[pdn->pe_number]; 119 return &phb->ioda.pe_array[pdn->pe_number];
561} 120}
562
563static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
564{
565 struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev);
566
567 while (!pe && dev->bus->self) {
568 dev = dev->bus->self;
569 pe = __pnv_ioda_get_one_pe(dev);
570 if (pe)
571 pe = pe->bus_pe;
572 }
573 return pe;
574}
575#endif /* CONFIG_PCI_MSI */ 121#endif /* CONFIG_PCI_MSI */
576 122
577static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, 123static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
@@ -588,7 +134,11 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
588 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 134 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
589 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 135 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
590 parent = pe->pbus->self; 136 parent = pe->pbus->self;
591 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; 137 if (pe->flags & PNV_IODA_PE_BUS_ALL)
138 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
139 else
140 count = 1;
141
592 switch(count) { 142 switch(count) {
593 case 1: bcomp = OpalPciBusAll; break; 143 case 1: bcomp = OpalPciBusAll; break;
594 case 2: bcomp = OpalPciBus7Bits; break; 144 case 2: bcomp = OpalPciBus7Bits; break;
@@ -665,13 +215,13 @@ static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
665{ 215{
666 struct pnv_ioda_pe *lpe; 216 struct pnv_ioda_pe *lpe;
667 217
668 list_for_each_entry(lpe, &phb->ioda.pe_list, link) { 218 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
669 if (lpe->dma_weight < pe->dma_weight) { 219 if (lpe->dma_weight < pe->dma_weight) {
670 list_add_tail(&pe->link, &lpe->link); 220 list_add_tail(&pe->dma_link, &lpe->dma_link);
671 return; 221 return;
672 } 222 }
673 } 223 }
674 list_add_tail(&pe->link, &phb->ioda.pe_list); 224 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
675} 225}
676 226
677static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) 227static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
@@ -698,6 +248,7 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
698 return 10; 248 return 10;
699} 249}
700 250
251#if 0
701static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev) 252static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
702{ 253{
703 struct pci_controller *hose = pci_bus_to_host(dev->bus); 254 struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -766,6 +317,7 @@ static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
766 317
767 return pe; 318 return pe;
768} 319}
320#endif /* Useful for SRIOV case */
769 321
770static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) 322static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
771{ 323{
@@ -783,34 +335,33 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
783 pdn->pcidev = dev; 335 pdn->pcidev = dev;
784 pdn->pe_number = pe->pe_number; 336 pdn->pe_number = pe->pe_number;
785 pe->dma_weight += pnv_ioda_dma_weight(dev); 337 pe->dma_weight += pnv_ioda_dma_weight(dev);
786 if (dev->subordinate) 338 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
787 pnv_ioda_setup_same_PE(dev->subordinate, pe); 339 pnv_ioda_setup_same_PE(dev->subordinate, pe);
788 } 340 }
789} 341}
790 342
791static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev, 343/*
792 struct pnv_ioda_pe *ppe) 344 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
345 * single PCI bus. Another one that contains the primary PCI bus and its
346 * subordinate PCI devices and buses. The second type of PE is normally
347 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
348 */
349static void __devinit pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
793{ 350{
794 struct pci_controller *hose = pci_bus_to_host(dev->bus); 351 struct pci_controller *hose = pci_bus_to_host(bus);
795 struct pnv_phb *phb = hose->private_data; 352 struct pnv_phb *phb = hose->private_data;
796 struct pci_bus *bus = dev->subordinate;
797 struct pnv_ioda_pe *pe; 353 struct pnv_ioda_pe *pe;
798 int pe_num; 354 int pe_num;
799 355
800 if (!bus) {
801 pr_warning("%s: Bridge without a subordinate bus !\n",
802 pci_name(dev));
803 return;
804 }
805 pe_num = pnv_ioda_alloc_pe(phb); 356 pe_num = pnv_ioda_alloc_pe(phb);
806 if (pe_num == IODA_INVALID_PE) { 357 if (pe_num == IODA_INVALID_PE) {
807 pr_warning("%s: Not enough PE# available, disabling bus\n", 358 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
808 pci_name(dev)); 359 __func__, pci_domain_nr(bus), bus->number);
809 return; 360 return;
810 } 361 }
811 362
812 pe = &phb->ioda.pe_array[pe_num]; 363 pe = &phb->ioda.pe_array[pe_num];
813 ppe->bus_pe = pe; 364 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
814 pe->pbus = bus; 365 pe->pbus = bus;
815 pe->pdev = NULL; 366 pe->pdev = NULL;
816 pe->tce32_seg = -1; 367 pe->tce32_seg = -1;
@@ -818,8 +369,12 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
818 pe->rid = bus->busn_res.start << 8; 369 pe->rid = bus->busn_res.start << 8;
819 pe->dma_weight = 0; 370 pe->dma_weight = 0;
820 371
821 pe_info(pe, "Secondary busses %pR associated with PE\n", 372 if (all)
822 &bus->busn_res); 373 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
374 bus->busn_res.start, bus->busn_res.end, pe_num);
375 else
376 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
377 bus->busn_res.start, pe_num);
823 378
824 if (pnv_ioda_configure_pe(phb, pe)) { 379 if (pnv_ioda_configure_pe(phb, pe)) {
825 /* XXX What do we do here ? */ 380 /* XXX What do we do here ? */
@@ -832,6 +387,9 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
832 /* Associate it with all child devices */ 387 /* Associate it with all child devices */
833 pnv_ioda_setup_same_PE(bus, pe); 388 pnv_ioda_setup_same_PE(bus, pe);
834 389
390 /* Put PE to the list */
391 list_add_tail(&pe->list, &phb->ioda.pe_list);
392
835 /* Account for one DMA PE if at least one DMA capable device exist 393 /* Account for one DMA PE if at least one DMA capable device exist
836 * below the bridge 394 * below the bridge
837 */ 395 */
@@ -847,17 +405,33 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
847static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) 405static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus)
848{ 406{
849 struct pci_dev *dev; 407 struct pci_dev *dev;
850 struct pnv_ioda_pe *pe; 408
409 pnv_ioda_setup_bus_PE(bus, 0);
851 410
852 list_for_each_entry(dev, &bus->devices, bus_list) { 411 list_for_each_entry(dev, &bus->devices, bus_list) {
853 pe = pnv_ioda_setup_dev_PE(dev); 412 if (dev->subordinate) {
854 if (pe == NULL) 413 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
855 continue; 414 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
856 /* Leaving the PCIe domain ... single PE# */ 415 else
857 if (dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 416 pnv_ioda_setup_PEs(dev->subordinate);
858 pnv_ioda_setup_bus_PE(dev, pe); 417 }
859 else if (dev->subordinate) 418 }
860 pnv_ioda_setup_PEs(dev->subordinate); 419}
420
421/*
422 * Configure PEs so that the downstream PCI buses and devices
423 * could have their associated PE#. Unfortunately, we didn't
424 * figure out the way to identify the PLX bridge yet. So we
425 * simply put the PCI bus and the subordinate behind the root
426 * port to PE# here. The game rule here is expected to be changed
427 * as soon as we can detected PLX bridge correctly.
428 */
429static void __devinit pnv_pci_ioda_setup_PEs(void)
430{
431 struct pci_controller *hose, *tmp;
432
433 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
434 pnv_ioda_setup_PEs(hose->bus);
861 } 435 }
862} 436}
863 437
@@ -999,7 +573,7 @@ static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb)
999 remaining = phb->ioda.tce32_count; 573 remaining = phb->ioda.tce32_count;
1000 tw = phb->ioda.dma_weight; 574 tw = phb->ioda.dma_weight;
1001 base = 0; 575 base = 0;
1002 list_for_each_entry(pe, &phb->ioda.pe_list, link) { 576 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
1003 if (!pe->dma_weight) 577 if (!pe->dma_weight)
1004 continue; 578 continue;
1005 if (!remaining) { 579 if (!remaining) {
@@ -1108,34 +682,151 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
1108static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } 682static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
1109#endif /* CONFIG_PCI_MSI */ 683#endif /* CONFIG_PCI_MSI */
1110 684
1111/* This is the starting point of our IODA specific resource 685/*
1112 * allocation process 686 * This function is supposed to be called on basis of PE from top
687 * to bottom style. So the the I/O or MMIO segment assigned to
688 * parent PE could be overrided by its child PEs if necessary.
1113 */ 689 */
1114static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose) 690static void __devinit pnv_ioda_setup_pe_seg(struct pci_controller *hose,
691 struct pnv_ioda_pe *pe)
1115{ 692{
1116 resource_size_t size, align; 693 struct pnv_phb *phb = hose->private_data;
1117 struct pci_bus *child; 694 struct pci_bus_region region;
695 struct resource *res;
696 int i, index;
697 int rc;
1118 698
1119 /* Associate PEs per functions */ 699 /*
1120 pnv_ioda_setup_PEs(hose->bus); 700 * NOTE: We only care PCI bus based PE for now. For PCI
701 * device based PE, for example SRIOV sensitive VF should
702 * be figured out later.
703 */
704 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
705
706 pci_bus_for_each_resource(pe->pbus, res, i) {
707 if (!res || !res->flags ||
708 res->start > res->end)
709 continue;
710
711 if (res->flags & IORESOURCE_IO) {
712 region.start = res->start - phb->ioda.io_pci_base;
713 region.end = res->end - phb->ioda.io_pci_base;
714 index = region.start / phb->ioda.io_segsize;
715
716 while (index < phb->ioda.total_pe &&
717 region.start <= region.end) {
718 phb->ioda.io_segmap[index] = pe->pe_number;
719 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
720 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
721 if (rc != OPAL_SUCCESS) {
722 pr_err("%s: OPAL error %d when mapping IO "
723 "segment #%d to PE#%d\n",
724 __func__, rc, index, pe->pe_number);
725 break;
726 }
727
728 region.start += phb->ioda.io_segsize;
729 index++;
730 }
731 } else if (res->flags & IORESOURCE_MEM) {
732 region.start = res->start -
733 hose->pci_mem_offset -
734 phb->ioda.m32_pci_base;
735 region.end = res->end -
736 hose->pci_mem_offset -
737 phb->ioda.m32_pci_base;
738 index = region.start / phb->ioda.m32_segsize;
739
740 while (index < phb->ioda.total_pe &&
741 region.start <= region.end) {
742 phb->ioda.m32_segmap[index] = pe->pe_number;
743 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
744 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
745 if (rc != OPAL_SUCCESS) {
746 pr_err("%s: OPAL error %d when mapping M32 "
747 "segment#%d to PE#%d",
748 __func__, rc, index, pe->pe_number);
749 break;
750 }
751
752 region.start += phb->ioda.m32_segsize;
753 index++;
754 }
755 }
756 }
757}
1121 758
1122 /* Calculate all resources */ 759static void __devinit pnv_pci_ioda_setup_seg(void)
1123 pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align); 760{
1124 pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align); 761 struct pci_controller *tmp, *hose;
762 struct pnv_phb *phb;
763 struct pnv_ioda_pe *pe;
1125 764
1126 /* Apply then to HW */ 765 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1127 pnv_ioda_update_resources(hose->bus); 766 phb = hose->private_data;
767 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
768 pnv_ioda_setup_pe_seg(hose, pe);
769 }
770 }
771}
1128 772
1129 /* Setup DMA */ 773static void __devinit pnv_pci_ioda_setup_DMA(void)
1130 pnv_ioda_setup_dma(hose->private_data); 774{
775 struct pci_controller *hose, *tmp;
776 struct pnv_phb *phb;
1131 777
1132 /* Configure PCI Express settings */ 778 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1133 list_for_each_entry(child, &hose->bus->children, node) { 779 pnv_ioda_setup_dma(hose->private_data);
1134 struct pci_dev *self = child->self; 780
1135 if (!self) 781 /* Mark the PHB initialization done */
1136 continue; 782 phb = hose->private_data;
1137 pcie_bus_configure_settings(child, self->pcie_mpss); 783 phb->initialized = 1;
784 }
785}
786
787static void __devinit pnv_pci_ioda_fixup(void)
788{
789 pnv_pci_ioda_setup_PEs();
790 pnv_pci_ioda_setup_seg();
791 pnv_pci_ioda_setup_DMA();
792}
793
794/*
795 * Returns the alignment for I/O or memory windows for P2P
796 * bridges. That actually depends on how PEs are segmented.
797 * For now, we return I/O or M32 segment size for PE sensitive
798 * P2P bridges. Otherwise, the default values (4KiB for I/O,
799 * 1MiB for memory) will be returned.
800 *
801 * The current PCI bus might be put into one PE, which was
802 * create against the parent PCI bridge. For that case, we
803 * needn't enlarge the alignment so that we can save some
804 * resources.
805 */
806static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
807 unsigned long type)
808{
809 struct pci_dev *bridge;
810 struct pci_controller *hose = pci_bus_to_host(bus);
811 struct pnv_phb *phb = hose->private_data;
812 int num_pci_bridges = 0;
813
814 bridge = bus->self;
815 while (bridge) {
816 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
817 num_pci_bridges++;
818 if (num_pci_bridges >= 2)
819 return 1;
820 }
821
822 bridge = bridge->bus->self;
1138 } 823 }
824
825 /* We need support prefetchable memory window later */
826 if (type & IORESOURCE_MEM)
827 return phb->ioda.m32_segsize;
828
829 return phb->ioda.io_segsize;
1139} 830}
1140 831
1141/* Prevent enabling devices for which we couldn't properly 832/* Prevent enabling devices for which we couldn't properly
@@ -1143,10 +834,22 @@ static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose)
1143 */ 834 */
1144static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev) 835static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev)
1145{ 836{
1146 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 837 struct pci_controller *hose = pci_bus_to_host(dev->bus);
838 struct pnv_phb *phb = hose->private_data;
839 struct pci_dn *pdn;
840
841 /* The function is probably called while the PEs have
842 * not be created yet. For example, resource reassignment
843 * during PCI probe period. We just skip the check if
844 * PEs isn't ready.
845 */
846 if (!phb->initialized)
847 return 0;
1147 848
849 pdn = pnv_ioda_get_pdn(dev);
1148 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 850 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1149 return -EINVAL; 851 return -EINVAL;
852
1150 return 0; 853 return 0;
1151} 854}
1152 855
@@ -1237,9 +940,9 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1237 /* Allocate aux data & arrays */ 940 /* Allocate aux data & arrays */
1238 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 941 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1239 m32map_off = size; 942 m32map_off = size;
1240 size += phb->ioda.total_pe; 943 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1241 iomap_off = size; 944 iomap_off = size;
1242 size += phb->ioda.total_pe; 945 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1243 pemap_off = size; 946 pemap_off = size;
1244 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); 947 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1245 aux = alloc_bootmem(size); 948 aux = alloc_bootmem(size);
@@ -1250,6 +953,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1250 phb->ioda.pe_array = aux + pemap_off; 953 phb->ioda.pe_array = aux + pemap_off;
1251 set_bit(0, phb->ioda.pe_alloc); 954 set_bit(0, phb->ioda.pe_alloc);
1252 955
956 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1253 INIT_LIST_HEAD(&phb->ioda.pe_list); 957 INIT_LIST_HEAD(&phb->ioda.pe_list);
1254 958
1255 /* Calculate how many 32-bit TCE segments we have */ 959 /* Calculate how many 32-bit TCE segments we have */
@@ -1298,14 +1002,17 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1298 /* Setup MSI support */ 1002 /* Setup MSI support */
1299 pnv_pci_init_ioda_msis(phb); 1003 pnv_pci_init_ioda_msis(phb);
1300 1004
1301 /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an 1005 /*
1302 * odd combination which essentially means that we skip all resource 1006 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1303 * fixups and assignments in the generic code, and do it all 1007 * to let the PCI core do resource assignment. It's supposed
1304 * ourselves here 1008 * that the PCI core will do correct I/O and MMIO alignment
1009 * for the P2P bridge bars so that each PCI bus (excluding
1010 * the child P2P bridges) can form individual PE.
1305 */ 1011 */
1306 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; 1012 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1307 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 1013 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1308 pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC); 1014 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1015 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1309 1016
1310 /* Reset IODA tables to a clean state */ 1017 /* Reset IODA tables to a clean state */
1311 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); 1018 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8bc479634643..7cfb7c883deb 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -17,9 +17,14 @@ enum pnv_phb_model {
17}; 17};
18 18
19#define PNV_PCI_DIAG_BUF_SIZE 4096 19#define PNV_PCI_DIAG_BUF_SIZE 4096
20#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
21#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
22#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
20 23
21/* Data associated with a PE, including IOMMU tracking etc.. */ 24/* Data associated with a PE, including IOMMU tracking etc.. */
22struct pnv_ioda_pe { 25struct pnv_ioda_pe {
26 unsigned long flags;
27
23 /* A PE can be associated with a single device or an 28 /* A PE can be associated with a single device or an
24 * entire bus (& children). In the former case, pdev 29 * entire bus (& children). In the former case, pdev
25 * is populated, in the later case, pbus is. 30 * is populated, in the later case, pbus is.
@@ -40,11 +45,6 @@ struct pnv_ioda_pe {
40 */ 45 */
41 unsigned int dma_weight; 46 unsigned int dma_weight;
42 47
43 /* This is a PCI-E -> PCI-X bridge, this points to the
44 * corresponding bus PE
45 */
46 struct pnv_ioda_pe *bus_pe;
47
48 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ 48 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
49 int tce32_seg; 49 int tce32_seg;
50 int tce32_segcount; 50 int tce32_segcount;
@@ -59,7 +59,8 @@ struct pnv_ioda_pe {
59 int mve_number; 59 int mve_number;
60 60
61 /* Link in list of PE#s */ 61 /* Link in list of PE#s */
62 struct list_head link; 62 struct list_head dma_link;
63 struct list_head list;
63}; 64};
64 65
65struct pnv_phb { 66struct pnv_phb {
@@ -68,6 +69,7 @@ struct pnv_phb {
68 enum pnv_phb_model model; 69 enum pnv_phb_model model;
69 u64 opal_id; 70 u64 opal_id;
70 void __iomem *regs; 71 void __iomem *regs;
72 int initialized;
71 spinlock_t lock; 73 spinlock_t lock;
72 74
73#ifdef CONFIG_PCI_MSI 75#ifdef CONFIG_PCI_MSI
@@ -107,6 +109,11 @@ struct pnv_phb {
107 unsigned int *io_segmap; 109 unsigned int *io_segmap;
108 struct pnv_ioda_pe *pe_array; 110 struct pnv_ioda_pe *pe_array;
109 111
112 /* Sorted list of used PE's based
113 * on the sequence of creation
114 */
115 struct list_head pe_list;
116
110 /* Reverse map of PEs, will have to extend if 117 /* Reverse map of PEs, will have to extend if
111 * we are to support more than 256 PEs, indexed 118 * we are to support more than 256 PEs, indexed
112 * bus { bus, devfn } 119 * bus { bus, devfn }
@@ -125,7 +132,7 @@ struct pnv_phb {
125 /* Sorted list of used PE's, sorted at 132 /* Sorted list of used PE's, sorted at
126 * boot for resource allocation purposes 133 * boot for resource allocation purposes
127 */ 134 */
128 struct list_head pe_list; 135 struct list_head pe_dma_list;
129 } ioda; 136 } ioda;
130 }; 137 };
131 138
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3124cf791ebb..d00d7b0a3bda 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -43,7 +43,7 @@ enum ps3_lpar_vas_id {
43 43
44static DEFINE_SPINLOCK(ps3_htab_lock); 44static DEFINE_SPINLOCK(ps3_htab_lock);
45 45
46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
47 unsigned long pa, unsigned long rflags, unsigned long vflags, 47 unsigned long pa, unsigned long rflags, unsigned long vflags,
48 int psize, int ssize) 48 int psize, int ssize)
49{ 49{
@@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
61 */ 61 */
62 vflags &= ~HPTE_V_SECONDARY; 62 vflags &= ~HPTE_V_SECONDARY;
63 63
64 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 64 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
66 66
67 spin_lock_irqsave(&ps3_htab_lock, flags); 67 spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
75 75
76 if (result) { 76 if (result) {
77 /* all entries bolted !*/ 77 /* all entries bolted !*/
78 pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", 78 pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
79 __func__, result, va, pa, hpte_group, hpte_v, hpte_r); 79 __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
80 BUG(); 80 BUG();
81 } 81 }
82 82
@@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
107} 107}
108 108
109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
110 unsigned long va, int psize, int ssize, int local) 110 unsigned long vpn, int psize, int ssize, int local)
111{ 111{
112 int result; 112 int result;
113 u64 hpte_v, want_v, hpte_rs; 113 u64 hpte_v, want_v, hpte_rs;
@@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
115 unsigned long flags; 115 unsigned long flags;
116 long ret; 116 long ret;
117 117
118 want_v = hpte_encode_v(va, psize, ssize); 118 want_v = hpte_encode_v(vpn, psize, ssize);
119 119
120 spin_lock_irqsave(&ps3_htab_lock, flags); 120 spin_lock_irqsave(&ps3_htab_lock, flags);
121 121
@@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
125 &hpte_rs); 125 &hpte_rs);
126 126
127 if (result) { 127 if (result) {
128 pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", 128 pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
129 __func__, result, va, slot, psize); 129 __func__, result, vpn, slot, psize);
130 BUG(); 130 BUG();
131 } 131 }
132 132
@@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
159 panic("ps3_hpte_updateboltedpp() not implemented"); 159 panic("ps3_hpte_updateboltedpp() not implemented");
160} 160}
161 161
162static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, 162static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
163 int psize, int ssize, int local) 163 int psize, int ssize, int local)
164{ 164{
165 unsigned long flags; 165 unsigned long flags;
@@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); 170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
171 171
172 if (result) { 172 if (result) {
173 pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", 173 pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
174 __func__, result, va, slot, psize); 174 __func__, result, vpn, slot, psize);
175 BUG(); 175 BUG();
176 } 176 }
177 177
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 18c168b752da..9a04322b1736 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -728,7 +728,7 @@ static void eeh_add_device_early(struct device_node *dn)
728{ 728{
729 struct pci_controller *phb; 729 struct pci_controller *phb;
730 730
731 if (!dn || !of_node_to_eeh_dev(dn)) 731 if (!of_node_to_eeh_dev(dn))
732 return; 732 return;
733 phb = of_node_to_eeh_dev(dn)->phb; 733 phb = of_node_to_eeh_dev(dn)->phb;
734 734
@@ -817,6 +817,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
817/** 817/**
818 * eeh_remove_device - Undo EEH setup for the indicated pci device 818 * eeh_remove_device - Undo EEH setup for the indicated pci device
819 * @dev: pci device to be removed 819 * @dev: pci device to be removed
820 * @purge_pe: remove the PE or not
820 * 821 *
821 * This routine should be called when a device is removed from 822 * This routine should be called when a device is removed from
822 * a running system (e.g. by hotplug or dlpar). It unregisters 823 * a running system (e.g. by hotplug or dlpar). It unregisters
@@ -824,7 +825,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
824 * this device will no longer be detected after this call; thus, 825 * this device will no longer be detected after this call; thus,
825 * i/o errors affecting this slot may leave this device unusable. 826 * i/o errors affecting this slot may leave this device unusable.
826 */ 827 */
827static void eeh_remove_device(struct pci_dev *dev) 828static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
828{ 829{
829 struct eeh_dev *edev; 830 struct eeh_dev *edev;
830 831
@@ -843,7 +844,7 @@ static void eeh_remove_device(struct pci_dev *dev)
843 dev->dev.archdata.edev = NULL; 844 dev->dev.archdata.edev = NULL;
844 pci_dev_put(dev); 845 pci_dev_put(dev);
845 846
846 eeh_rmv_from_parent_pe(edev); 847 eeh_rmv_from_parent_pe(edev, purge_pe);
847 eeh_addr_cache_rmv_dev(dev); 848 eeh_addr_cache_rmv_dev(dev);
848 eeh_sysfs_remove_device(dev); 849 eeh_sysfs_remove_device(dev);
849} 850}
@@ -851,21 +852,22 @@ static void eeh_remove_device(struct pci_dev *dev)
851/** 852/**
852 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device 853 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
853 * @dev: PCI device 854 * @dev: PCI device
855 * @purge_pe: remove the corresponding PE or not
854 * 856 *
855 * This routine must be called when a device is removed from the 857 * This routine must be called when a device is removed from the
856 * running system through hotplug or dlpar. The corresponding 858 * running system through hotplug or dlpar. The corresponding
857 * PCI address cache will be removed. 859 * PCI address cache will be removed.
858 */ 860 */
859void eeh_remove_bus_device(struct pci_dev *dev) 861void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
860{ 862{
861 struct pci_bus *bus = dev->subordinate; 863 struct pci_bus *bus = dev->subordinate;
862 struct pci_dev *child, *tmp; 864 struct pci_dev *child, *tmp;
863 865
864 eeh_remove_device(dev); 866 eeh_remove_device(dev, purge_pe);
865 867
866 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 868 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
867 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) 869 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
868 eeh_remove_bus_device(child); 870 eeh_remove_bus_device(child, purge_pe);
869 } 871 }
870} 872}
871EXPORT_SYMBOL_GPL(eeh_remove_bus_device); 873EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 8370ce7d5931..a3fefb61097c 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -25,6 +25,7 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/module.h>
28#include <linux/pci.h> 29#include <linux/pci.h>
29#include <asm/eeh.h> 30#include <asm/eeh.h>
30#include <asm/eeh_event.h> 31#include <asm/eeh_event.h>
@@ -47,6 +48,41 @@ static inline const char *eeh_pcid_name(struct pci_dev *pdev)
47 return ""; 48 return "";
48} 49}
49 50
51/**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
60static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61{
62 if (!pdev || !pdev->driver)
63 return NULL;
64
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
67
68 return pdev->driver;
69}
70
71/**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
78static inline void eeh_pcid_put(struct pci_dev *pdev)
79{
80 if (!pdev || !pdev->driver)
81 return;
82
83 module_put(pdev->driver->driver.owner);
84}
85
50#if 0 86#if 0
51static void print_device_node_tree(struct pci_dn *pdn, int dent) 87static void print_device_node_tree(struct pci_dn *pdn, int dent)
52{ 88{
@@ -128,23 +164,24 @@ static void *eeh_report_error(void *data, void *userdata)
128 struct eeh_dev *edev = (struct eeh_dev *)data; 164 struct eeh_dev *edev = (struct eeh_dev *)data;
129 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 165 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
130 enum pci_ers_result rc, *res = userdata; 166 enum pci_ers_result rc, *res = userdata;
131 struct pci_driver *driver = dev->driver; 167 struct pci_driver *driver;
132 168
133 /* We might not have the associated PCI device, 169 /* We might not have the associated PCI device,
134 * then we should continue for next one. 170 * then we should continue for next one.
135 */ 171 */
136 if (!dev) return NULL; 172 if (!dev) return NULL;
137
138 dev->error_state = pci_channel_io_frozen; 173 dev->error_state = pci_channel_io_frozen;
139 174
140 if (!driver) 175 driver = eeh_pcid_get(dev);
141 return NULL; 176 if (!driver) return NULL;
142 177
143 eeh_disable_irq(dev); 178 eeh_disable_irq(dev);
144 179
145 if (!driver->err_handler || 180 if (!driver->err_handler ||
146 !driver->err_handler->error_detected) 181 !driver->err_handler->error_detected) {
182 eeh_pcid_put(dev);
147 return NULL; 183 return NULL;
184 }
148 185
149 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); 186 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
150 187
@@ -152,6 +189,7 @@ static void *eeh_report_error(void *data, void *userdata)
152 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 189 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
153 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 190 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
154 191
192 eeh_pcid_put(dev);
155 return NULL; 193 return NULL;
156} 194}
157 195
@@ -171,12 +209,14 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
171 enum pci_ers_result rc, *res = userdata; 209 enum pci_ers_result rc, *res = userdata;
172 struct pci_driver *driver; 210 struct pci_driver *driver;
173 211
174 if (!dev) return NULL; 212 driver = eeh_pcid_get(dev);
213 if (!driver) return NULL;
175 214
176 if (!(driver = dev->driver) || 215 if (!driver->err_handler ||
177 !driver->err_handler || 216 !driver->err_handler->mmio_enabled) {
178 !driver->err_handler->mmio_enabled) 217 eeh_pcid_put(dev);
179 return NULL; 218 return NULL;
219 }
180 220
181 rc = driver->err_handler->mmio_enabled(dev); 221 rc = driver->err_handler->mmio_enabled(dev);
182 222
@@ -184,6 +224,7 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
184 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 224 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
185 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 225 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
186 226
227 eeh_pcid_put(dev);
187 return NULL; 228 return NULL;
188} 229}
189 230
@@ -204,16 +245,19 @@ static void *eeh_report_reset(void *data, void *userdata)
204 enum pci_ers_result rc, *res = userdata; 245 enum pci_ers_result rc, *res = userdata;
205 struct pci_driver *driver; 246 struct pci_driver *driver;
206 247
207 if (!dev || !(driver = dev->driver)) 248 if (!dev) return NULL;
208 return NULL;
209
210 dev->error_state = pci_channel_io_normal; 249 dev->error_state = pci_channel_io_normal;
211 250
251 driver = eeh_pcid_get(dev);
252 if (!driver) return NULL;
253
212 eeh_enable_irq(dev); 254 eeh_enable_irq(dev);
213 255
214 if (!driver->err_handler || 256 if (!driver->err_handler ||
215 !driver->err_handler->slot_reset) 257 !driver->err_handler->slot_reset) {
258 eeh_pcid_put(dev);
216 return NULL; 259 return NULL;
260 }
217 261
218 rc = driver->err_handler->slot_reset(dev); 262 rc = driver->err_handler->slot_reset(dev);
219 if ((*res == PCI_ERS_RESULT_NONE) || 263 if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -221,6 +265,7 @@ static void *eeh_report_reset(void *data, void *userdata)
221 if (*res == PCI_ERS_RESULT_DISCONNECT && 265 if (*res == PCI_ERS_RESULT_DISCONNECT &&
222 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 266 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
223 267
268 eeh_pcid_put(dev);
224 return NULL; 269 return NULL;
225} 270}
226 271
@@ -240,20 +285,22 @@ static void *eeh_report_resume(void *data, void *userdata)
240 struct pci_driver *driver; 285 struct pci_driver *driver;
241 286
242 if (!dev) return NULL; 287 if (!dev) return NULL;
243
244 dev->error_state = pci_channel_io_normal; 288 dev->error_state = pci_channel_io_normal;
245 289
246 if (!(driver = dev->driver)) 290 driver = eeh_pcid_get(dev);
247 return NULL; 291 if (!driver) return NULL;
248 292
249 eeh_enable_irq(dev); 293 eeh_enable_irq(dev);
250 294
251 if (!driver->err_handler || 295 if (!driver->err_handler ||
252 !driver->err_handler->resume) 296 !driver->err_handler->resume) {
297 eeh_pcid_put(dev);
253 return NULL; 298 return NULL;
299 }
254 300
255 driver->err_handler->resume(dev); 301 driver->err_handler->resume(dev);
256 302
303 eeh_pcid_put(dev);
257 return NULL; 304 return NULL;
258} 305}
259 306
@@ -272,20 +319,22 @@ static void *eeh_report_failure(void *data, void *userdata)
272 struct pci_driver *driver; 319 struct pci_driver *driver;
273 320
274 if (!dev) return NULL; 321 if (!dev) return NULL;
275
276 dev->error_state = pci_channel_io_perm_failure; 322 dev->error_state = pci_channel_io_perm_failure;
277 323
278 if (!(driver = dev->driver)) 324 driver = eeh_pcid_get(dev);
279 return NULL; 325 if (!driver) return NULL;
280 326
281 eeh_disable_irq(dev); 327 eeh_disable_irq(dev);
282 328
283 if (!driver->err_handler || 329 if (!driver->err_handler ||
284 !driver->err_handler->error_detected) 330 !driver->err_handler->error_detected) {
331 eeh_pcid_put(dev);
285 return NULL; 332 return NULL;
333 }
286 334
287 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); 335 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
288 336
337 eeh_pcid_put(dev);
289 return NULL; 338 return NULL;
290} 339}
291 340
@@ -305,8 +354,14 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
305 /* pcibios will clear the counter; save the value */ 354 /* pcibios will clear the counter; save the value */
306 cnt = pe->freeze_count; 355 cnt = pe->freeze_count;
307 356
357 /*
358 * We don't remove the corresponding PE instances because
359 * we need the information afterwords. The attached EEH
360 * devices are expected to be attached soon when calling
361 * into pcibios_add_pci_devices().
362 */
308 if (bus) 363 if (bus)
309 pcibios_remove_pci_devices(bus); 364 __pcibios_remove_pci_devices(bus, 0);
310 365
311 /* Reset the pci controller. (Asserts RST#; resets config space). 366 /* Reset the pci controller. (Asserts RST#; resets config space).
312 * Reconfigure bridges and devices. Don't try to bring the system 367 * Reconfigure bridges and devices. Don't try to bring the system
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
index 904123c7657b..9d35543736ed 100644
--- a/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -99,23 +99,19 @@ static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
99{ 99{
100 struct eeh_pe *pe; 100 struct eeh_pe *pe;
101 101
102 eeh_lock();
103
104 list_for_each_entry(pe, &eeh_phb_pe, child) { 102 list_for_each_entry(pe, &eeh_phb_pe, child) {
105 /* 103 /*
106 * Actually, we needn't check the type since 104 * Actually, we needn't check the type since
107 * the PE for PHB has been determined when that 105 * the PE for PHB has been determined when that
108 * was created. 106 * was created.
109 */ 107 */
110 if (pe->type == EEH_PE_PHB && 108 if ((pe->type & EEH_PE_PHB) &&
111 pe->phb == phb) { 109 pe->phb == phb) {
112 eeh_unlock(); 110 eeh_unlock();
113 return pe; 111 return pe;
114 } 112 }
115 } 113 }
116 114
117 eeh_unlock();
118
119 return NULL; 115 return NULL;
120} 116}
121 117
@@ -192,14 +188,21 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
192 return NULL; 188 return NULL;
193 } 189 }
194 190
191 eeh_lock();
192
195 /* Traverse root PE */ 193 /* Traverse root PE */
196 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 194 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
197 eeh_pe_for_each_dev(pe, edev) { 195 eeh_pe_for_each_dev(pe, edev) {
198 ret = fn(edev, flag); 196 ret = fn(edev, flag);
199 if (ret) return ret; 197 if (ret) {
198 eeh_unlock();
199 return ret;
200 }
200 } 201 }
201 } 202 }
202 203
204 eeh_unlock();
205
203 return NULL; 206 return NULL;
204} 207}
205 208
@@ -219,7 +222,7 @@ static void *__eeh_pe_get(void *data, void *flag)
219 struct eeh_dev *edev = (struct eeh_dev *)flag; 222 struct eeh_dev *edev = (struct eeh_dev *)flag;
220 223
221 /* Unexpected PHB PE */ 224 /* Unexpected PHB PE */
222 if (pe->type == EEH_PE_PHB) 225 if (pe->type & EEH_PE_PHB)
223 return NULL; 226 return NULL;
224 227
225 /* We prefer PE address */ 228 /* We prefer PE address */
@@ -251,9 +254,7 @@ static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
251 struct eeh_pe *root = eeh_phb_pe_get(edev->phb); 254 struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
252 struct eeh_pe *pe; 255 struct eeh_pe *pe;
253 256
254 eeh_lock();
255 pe = eeh_pe_traverse(root, __eeh_pe_get, edev); 257 pe = eeh_pe_traverse(root, __eeh_pe_get, edev);
256 eeh_unlock();
257 258
258 return pe; 259 return pe;
259} 260}
@@ -307,6 +308,8 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
307{ 308{
308 struct eeh_pe *pe, *parent; 309 struct eeh_pe *pe, *parent;
309 310
311 eeh_lock();
312
310 /* 313 /*
311 * Search the PE has been existing or not according 314 * Search the PE has been existing or not according
312 * to the PE address. If that has been existing, the 315 * to the PE address. If that has been existing, the
@@ -314,8 +317,9 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
314 * components. 317 * components.
315 */ 318 */
316 pe = eeh_pe_get(edev); 319 pe = eeh_pe_get(edev);
317 if (pe) { 320 if (pe && !(pe->type & EEH_PE_INVALID)) {
318 if (!edev->pe_config_addr) { 321 if (!edev->pe_config_addr) {
322 eeh_unlock();
319 pr_err("%s: PE with addr 0x%x already exists\n", 323 pr_err("%s: PE with addr 0x%x already exists\n",
320 __func__, edev->config_addr); 324 __func__, edev->config_addr);
321 return -EEXIST; 325 return -EEXIST;
@@ -327,15 +331,36 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
327 331
328 /* Put the edev to PE */ 332 /* Put the edev to PE */
329 list_add_tail(&edev->list, &pe->edevs); 333 list_add_tail(&edev->list, &pe->edevs);
334 eeh_unlock();
330 pr_debug("EEH: Add %s to Bus PE#%x\n", 335 pr_debug("EEH: Add %s to Bus PE#%x\n",
331 edev->dn->full_name, pe->addr); 336 edev->dn->full_name, pe->addr);
332 337
333 return 0; 338 return 0;
339 } else if (pe && (pe->type & EEH_PE_INVALID)) {
340 list_add_tail(&edev->list, &pe->edevs);
341 edev->pe = pe;
342 /*
343 * We're running to here because of PCI hotplug caused by
344 * EEH recovery. We need clear EEH_PE_INVALID until the top.
345 */
346 parent = pe;
347 while (parent) {
348 if (!(parent->type & EEH_PE_INVALID))
349 break;
350 parent->type &= ~EEH_PE_INVALID;
351 parent = parent->parent;
352 }
353 eeh_unlock();
354 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
355 edev->dn->full_name, pe->addr, pe->parent->addr);
356
357 return 0;
334 } 358 }
335 359
336 /* Create a new EEH PE */ 360 /* Create a new EEH PE */
337 pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); 361 pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
338 if (!pe) { 362 if (!pe) {
363 eeh_unlock();
339 pr_err("%s: out of memory!\n", __func__); 364 pr_err("%s: out of memory!\n", __func__);
340 return -ENOMEM; 365 return -ENOMEM;
341 } 366 }
@@ -352,6 +377,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
352 if (!parent) { 377 if (!parent) {
353 parent = eeh_phb_pe_get(edev->phb); 378 parent = eeh_phb_pe_get(edev->phb);
354 if (!parent) { 379 if (!parent) {
380 eeh_unlock();
355 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", 381 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
356 __func__, edev->phb->global_number); 382 __func__, edev->phb->global_number);
357 edev->pe = NULL; 383 edev->pe = NULL;
@@ -368,6 +394,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
368 list_add_tail(&pe->child, &parent->child_list); 394 list_add_tail(&pe->child, &parent->child_list);
369 list_add_tail(&edev->list, &pe->edevs); 395 list_add_tail(&edev->list, &pe->edevs);
370 edev->pe = pe; 396 edev->pe = pe;
397 eeh_unlock();
371 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 398 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
372 edev->dn->full_name, pe->addr, pe->parent->addr); 399 edev->dn->full_name, pe->addr, pe->parent->addr);
373 400
@@ -377,15 +404,17 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
377/** 404/**
378 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 405 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
379 * @edev: EEH device 406 * @edev: EEH device
407 * @purge_pe: remove PE or not
380 * 408 *
381 * The PE hierarchy tree might be changed when doing PCI hotplug. 409 * The PE hierarchy tree might be changed when doing PCI hotplug.
382 * Also, the PCI devices or buses could be removed from the system 410 * Also, the PCI devices or buses could be removed from the system
383 * during EEH recovery. So we have to call the function remove the 411 * during EEH recovery. So we have to call the function remove the
384 * corresponding PE accordingly if necessary. 412 * corresponding PE accordingly if necessary.
385 */ 413 */
386int eeh_rmv_from_parent_pe(struct eeh_dev *edev) 414int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
387{ 415{
388 struct eeh_pe *pe, *parent; 416 struct eeh_pe *pe, *parent, *child;
417 int cnt;
389 418
390 if (!edev->pe) { 419 if (!edev->pe) {
391 pr_warning("%s: No PE found for EEH device %s\n", 420 pr_warning("%s: No PE found for EEH device %s\n",
@@ -393,6 +422,8 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
393 return -EEXIST; 422 return -EEXIST;
394 } 423 }
395 424
425 eeh_lock();
426
396 /* Remove the EEH device */ 427 /* Remove the EEH device */
397 pe = edev->pe; 428 pe = edev->pe;
398 edev->pe = NULL; 429 edev->pe = NULL;
@@ -406,18 +437,39 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
406 */ 437 */
407 while (1) { 438 while (1) {
408 parent = pe->parent; 439 parent = pe->parent;
409 if (pe->type == EEH_PE_PHB) 440 if (pe->type & EEH_PE_PHB)
410 break; 441 break;
411 442
412 if (list_empty(&pe->edevs) && 443 if (purge_pe) {
413 list_empty(&pe->child_list)) { 444 if (list_empty(&pe->edevs) &&
414 list_del(&pe->child); 445 list_empty(&pe->child_list)) {
415 kfree(pe); 446 list_del(&pe->child);
447 kfree(pe);
448 } else {
449 break;
450 }
451 } else {
452 if (list_empty(&pe->edevs)) {
453 cnt = 0;
454 list_for_each_entry(child, &pe->child_list, child) {
455 if (!(pe->type & EEH_PE_INVALID)) {
456 cnt++;
457 break;
458 }
459 }
460
461 if (!cnt)
462 pe->type |= EEH_PE_INVALID;
463 else
464 break;
465 }
416 } 466 }
417 467
418 pe = parent; 468 pe = parent;
419 } 469 }
420 470
471 eeh_unlock();
472
421 return 0; 473 return 0;
422} 474}
423 475
@@ -463,7 +515,9 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
463 */ 515 */
464void eeh_pe_state_mark(struct eeh_pe *pe, int state) 516void eeh_pe_state_mark(struct eeh_pe *pe, int state)
465{ 517{
518 eeh_lock();
466 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); 519 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
520 eeh_unlock();
467} 521}
468 522
469/** 523/**
@@ -497,7 +551,9 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
497 */ 551 */
498void eeh_pe_state_clear(struct eeh_pe *pe, int state) 552void eeh_pe_state_clear(struct eeh_pe *pe, int state)
499{ 553{
554 eeh_lock();
500 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); 555 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
556 eeh_unlock();
501} 557}
502 558
503/** 559/**
@@ -559,6 +615,10 @@ static void *eeh_restore_one_device_bars(void *data, void *flag)
559 */ 615 */
560void eeh_pe_restore_bars(struct eeh_pe *pe) 616void eeh_pe_restore_bars(struct eeh_pe *pe)
561{ 617{
618 /*
619 * We needn't take the EEH lock since eeh_pe_dev_traverse()
620 * will take that.
621 */
562 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); 622 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
563} 623}
564 624
@@ -578,14 +638,18 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
578 struct eeh_dev *edev; 638 struct eeh_dev *edev;
579 struct pci_dev *pdev; 639 struct pci_dev *pdev;
580 640
581 if (pe->type == EEH_PE_PHB) { 641 eeh_lock();
642
643 if (pe->type & EEH_PE_PHB) {
582 bus = pe->phb->bus; 644 bus = pe->phb->bus;
583 } else if (pe->type == EEH_PE_BUS) { 645 } else if (pe->type & EEH_PE_BUS) {
584 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 646 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
585 pdev = eeh_dev_to_pci_dev(edev); 647 pdev = eeh_dev_to_pci_dev(edev);
586 if (pdev) 648 if (pdev)
587 bus = pdev->bus; 649 bus = pdev->bus;
588 } 650 }
589 651
652 eeh_unlock();
653
590 return bus; 654 return bus;
591} 655}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 177055d0186b..0da39fed355a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -107,9 +107,9 @@ void vpa_init(int cpu)
107} 107}
108 108
109static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 109static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
110 unsigned long va, unsigned long pa, 110 unsigned long vpn, unsigned long pa,
111 unsigned long rflags, unsigned long vflags, 111 unsigned long rflags, unsigned long vflags,
112 int psize, int ssize) 112 int psize, int ssize)
113{ 113{
114 unsigned long lpar_rc; 114 unsigned long lpar_rc;
115 unsigned long flags; 115 unsigned long flags;
@@ -117,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
117 unsigned long hpte_v, hpte_r; 117 unsigned long hpte_v, hpte_r;
118 118
119 if (!(vflags & HPTE_V_BOLTED)) 119 if (!(vflags & HPTE_V_BOLTED))
120 pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 120 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
121 "rflags=%lx, vflags=%lx, psize=%d)\n", 121 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
122 hpte_group, va, pa, rflags, vflags, psize); 122 hpte_group, vpn, pa, rflags, vflags, psize);
123 123
124 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 124 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
125 hpte_r = hpte_encode_r(pa, psize) | rflags; 125 hpte_r = hpte_encode_r(pa, psize) | rflags;
126 126
127 if (!(vflags & HPTE_V_BOLTED)) 127 if (!(vflags & HPTE_V_BOLTED))
@@ -226,22 +226,6 @@ static void pSeries_lpar_hptab_clear(void)
226} 226}
227 227
228/* 228/*
229 * This computes the AVPN and B fields of the first dword of a HPTE,
230 * for use when we want to match an existing PTE. The bottom 7 bits
231 * of the returned value are zero.
232 */
233static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
234 int ssize)
235{
236 unsigned long v;
237
238 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
239 v <<= HPTE_V_AVPN_SHIFT;
240 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
241 return v;
242}
243
244/*
245 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 229 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
246 * the low 3 bits of flags happen to line up. So no transform is needed. 230 * the low 3 bits of flags happen to line up. So no transform is needed.
247 * We can probably optimize here and assume the high bits of newpp are 231 * We can probably optimize here and assume the high bits of newpp are
@@ -249,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
249 */ 233 */
250static long pSeries_lpar_hpte_updatepp(unsigned long slot, 234static long pSeries_lpar_hpte_updatepp(unsigned long slot,
251 unsigned long newpp, 235 unsigned long newpp,
252 unsigned long va, 236 unsigned long vpn,
253 int psize, int ssize, int local) 237 int psize, int ssize, int local)
254{ 238{
255 unsigned long lpar_rc; 239 unsigned long lpar_rc;
256 unsigned long flags = (newpp & 7) | H_AVPN; 240 unsigned long flags = (newpp & 7) | H_AVPN;
257 unsigned long want_v; 241 unsigned long want_v;
258 242
259 want_v = hpte_encode_avpn(va, psize, ssize); 243 want_v = hpte_encode_avpn(vpn, psize, ssize);
260 244
261 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 245 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
262 want_v, slot, flags, psize); 246 want_v, slot, flags, psize);
@@ -294,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
294 return dword0; 278 return dword0;
295} 279}
296 280
297static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) 281static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
298{ 282{
299 unsigned long hash; 283 unsigned long hash;
300 unsigned long i; 284 unsigned long i;
301 long slot; 285 long slot;
302 unsigned long want_v, hpte_v; 286 unsigned long want_v, hpte_v;
303 287
304 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 288 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
305 want_v = hpte_encode_avpn(va, psize, ssize); 289 want_v = hpte_encode_avpn(vpn, psize, ssize);
306 290
307 /* Bolted entries are always in the primary group */ 291 /* Bolted entries are always in the primary group */
308 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 292 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -322,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
322 unsigned long ea, 306 unsigned long ea,
323 int psize, int ssize) 307 int psize, int ssize)
324{ 308{
325 unsigned long lpar_rc, slot, vsid, va, flags; 309 unsigned long vpn;
310 unsigned long lpar_rc, slot, vsid, flags;
326 311
327 vsid = get_kernel_vsid(ea, ssize); 312 vsid = get_kernel_vsid(ea, ssize);
328 va = hpt_va(ea, vsid, ssize); 313 vpn = hpt_vpn(ea, vsid, ssize);
329 314
330 slot = pSeries_lpar_hpte_find(va, psize, ssize); 315 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
331 BUG_ON(slot == -1); 316 BUG_ON(slot == -1);
332 317
333 flags = newpp & 7; 318 flags = newpp & 7;
@@ -336,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
336 BUG_ON(lpar_rc != H_SUCCESS); 321 BUG_ON(lpar_rc != H_SUCCESS);
337} 322}
338 323
339static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 324static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
340 int psize, int ssize, int local) 325 int psize, int ssize, int local)
341{ 326{
342 unsigned long want_v; 327 unsigned long want_v;
343 unsigned long lpar_rc; 328 unsigned long lpar_rc;
344 unsigned long dummy1, dummy2; 329 unsigned long dummy1, dummy2;
345 330
346 pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 331 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
347 slot, va, psize, local); 332 slot, vpn, psize, local);
348 333
349 want_v = hpte_encode_avpn(va, psize, ssize); 334 want_v = hpte_encode_avpn(vpn, psize, ssize);
350 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 335 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
351 if (lpar_rc == H_NOT_FOUND) 336 if (lpar_rc == H_NOT_FOUND)
352 return; 337 return;
@@ -357,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
357static void pSeries_lpar_hpte_removebolted(unsigned long ea, 342static void pSeries_lpar_hpte_removebolted(unsigned long ea,
358 int psize, int ssize) 343 int psize, int ssize)
359{ 344{
360 unsigned long slot, vsid, va; 345 unsigned long vpn;
346 unsigned long slot, vsid;
361 347
362 vsid = get_kernel_vsid(ea, ssize); 348 vsid = get_kernel_vsid(ea, ssize);
363 va = hpt_va(ea, vsid, ssize); 349 vpn = hpt_vpn(ea, vsid, ssize);
364 350
365 slot = pSeries_lpar_hpte_find(va, psize, ssize); 351 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
366 BUG_ON(slot == -1); 352 BUG_ON(slot == -1);
367 353
368 pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); 354 pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
369} 355}
370 356
371/* Flag bits for H_BULK_REMOVE */ 357/* Flag bits for H_BULK_REMOVE */
@@ -381,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
381 */ 367 */
382static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 368static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
383{ 369{
370 unsigned long vpn;
384 unsigned long i, pix, rc; 371 unsigned long i, pix, rc;
385 unsigned long flags = 0; 372 unsigned long flags = 0;
386 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 373 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
387 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 374 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
388 unsigned long param[9]; 375 unsigned long param[9];
389 unsigned long va;
390 unsigned long hash, index, shift, hidx, slot; 376 unsigned long hash, index, shift, hidx, slot;
391 real_pte_t pte; 377 real_pte_t pte;
392 int psize, ssize; 378 int psize, ssize;
@@ -398,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
398 ssize = batch->ssize; 384 ssize = batch->ssize;
399 pix = 0; 385 pix = 0;
400 for (i = 0; i < number; i++) { 386 for (i = 0; i < number; i++) {
401 va = batch->vaddr[i]; 387 vpn = batch->vpn[i];
402 pte = batch->pte[i]; 388 pte = batch->pte[i];
403 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 389 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
404 hash = hpt_hash(va, shift, ssize); 390 hash = hpt_hash(vpn, shift, ssize);
405 hidx = __rpte_to_hidx(pte, index); 391 hidx = __rpte_to_hidx(pte, index);
406 if (hidx & _PTEIDX_SECONDARY) 392 if (hidx & _PTEIDX_SECONDARY)
407 hash = ~hash; 393 hash = ~hash;
408 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 394 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
409 slot += hidx & _PTEIDX_GROUP_IX; 395 slot += hidx & _PTEIDX_GROUP_IX;
410 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 396 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
411 pSeries_lpar_hpte_invalidate(slot, va, psize, 397 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
412 ssize, local); 398 ssize, local);
413 } else { 399 } else {
414 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 400 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
415 param[pix+1] = hpte_encode_avpn(va, psize, 401 param[pix+1] = hpte_encode_avpn(vpn, psize,
416 ssize); 402 ssize);
417 pix += 2; 403 pix += 2;
418 if (pix == 8) { 404 if (pix == 8) {
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 3ccebc83dc02..261a577a3dd2 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -65,27 +65,43 @@ pcibios_find_pci_bus(struct device_node *dn)
65EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); 65EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
66 66
67/** 67/**
68 * pcibios_remove_pci_devices - remove all devices under this bus 68 * __pcibios_remove_pci_devices - remove all devices under this bus
69 * @bus: the indicated PCI bus
70 * @purge_pe: destroy the PE on removal of PCI devices
69 * 71 *
70 * Remove all of the PCI devices under this bus both from the 72 * Remove all of the PCI devices under this bus both from the
71 * linux pci device tree, and from the powerpc EEH address cache. 73 * linux pci device tree, and from the powerpc EEH address cache.
74 * By default, the corresponding PE will be destroied during the
75 * normal PCI hotplug path. For PCI hotplug during EEH recovery,
76 * the corresponding PE won't be destroied and deallocated.
72 */ 77 */
73void pcibios_remove_pci_devices(struct pci_bus *bus) 78void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
74{ 79{
75 struct pci_dev *dev, *tmp; 80 struct pci_dev *dev, *tmp;
76 struct pci_bus *child_bus; 81 struct pci_bus *child_bus;
77 82
78 /* First go down child busses */ 83 /* First go down child busses */
79 list_for_each_entry(child_bus, &bus->children, node) 84 list_for_each_entry(child_bus, &bus->children, node)
80 pcibios_remove_pci_devices(child_bus); 85 __pcibios_remove_pci_devices(child_bus, purge_pe);
81 86
82 pr_debug("PCI: Removing devices on bus %04x:%02x\n", 87 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
83 pci_domain_nr(bus), bus->number); 88 pci_domain_nr(bus), bus->number);
84 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 89 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
85 pr_debug(" * Removing %s...\n", pci_name(dev)); 90 pr_debug(" * Removing %s...\n", pci_name(dev));
86 eeh_remove_bus_device(dev); 91 eeh_remove_bus_device(dev, purge_pe);
87 pci_stop_and_remove_bus_device(dev); 92 pci_stop_and_remove_bus_device(dev);
88 } 93 }
94}
95
96/**
97 * pcibios_remove_pci_devices - remove all devices under this bus
98 *
99 * Remove all of the PCI devices under this bus both from the
100 * linux pci device tree, and from the powerpc EEH address cache.
101 */
102void pcibios_remove_pci_devices(struct pci_bus *bus)
103{
104 __pcibios_remove_pci_devices(bus, 1);
89} 105}
90EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 106EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
91 107
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 987f441525cb..3a56a639a92e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -60,6 +60,8 @@ static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
60static unsigned long xmon_taken = 1; 60static unsigned long xmon_taken = 1;
61static int xmon_owner; 61static int xmon_owner;
62static int xmon_gate; 62static int xmon_gate;
63#else
64#define xmon_owner 0
63#endif /* CONFIG_SMP */ 65#endif /* CONFIG_SMP */
64 66
65static unsigned long in_xmon __read_mostly = 0; 67static unsigned long in_xmon __read_mostly = 0;
@@ -202,7 +204,13 @@ Commands:\n\
202 di dump instructions\n\ 204 di dump instructions\n\
203 df dump float values\n\ 205 df dump float values\n\
204 dd dump double values\n\ 206 dd dump double values\n\
205 dl dump the kernel log buffer\n\ 207 dl dump the kernel log buffer\n"
208#ifdef CONFIG_PPC64
209 "\
210 dp[#] dump paca for current cpu, or cpu #\n\
211 dpa dump paca for all possible cpus\n"
212#endif
213 "\
206 dr dump stream of raw bytes\n\ 214 dr dump stream of raw bytes\n\
207 e print exception information\n\ 215 e print exception information\n\
208 f flush cache\n\ 216 f flush cache\n\
@@ -2009,6 +2017,95 @@ static void xmon_rawdump (unsigned long adrs, long ndump)
2009 printf("\n"); 2017 printf("\n");
2010} 2018}
2011 2019
2020#ifdef CONFIG_PPC64
2021static void dump_one_paca(int cpu)
2022{
2023 struct paca_struct *p;
2024
2025 if (setjmp(bus_error_jmp) != 0) {
2026 printf("*** Error dumping paca for cpu 0x%x!\n", cpu);
2027 return;
2028 }
2029
2030 catch_memory_errors = 1;
2031 sync();
2032
2033 p = &paca[cpu];
2034
2035 printf("paca for cpu 0x%x @ %p:\n", cpu, p);
2036
2037 printf(" %-*s = %s\n", 16, "possible", cpu_possible(cpu) ? "yes" : "no");
2038 printf(" %-*s = %s\n", 16, "present", cpu_present(cpu) ? "yes" : "no");
2039 printf(" %-*s = %s\n", 16, "online", cpu_online(cpu) ? "yes" : "no");
2040
2041#define DUMP(paca, name, format) \
2042 printf(" %-*s = %#-*"format"\t(0x%lx)\n", 16, #name, 18, paca->name, \
2043 offsetof(struct paca_struct, name));
2044
2045 DUMP(p, lock_token, "x");
2046 DUMP(p, paca_index, "x");
2047 DUMP(p, kernel_toc, "lx");
2048 DUMP(p, kernelbase, "lx");
2049 DUMP(p, kernel_msr, "lx");
2050#ifdef CONFIG_PPC_STD_MMU_64
2051 DUMP(p, stab_real, "lx");
2052 DUMP(p, stab_addr, "lx");
2053#endif
2054 DUMP(p, emergency_sp, "p");
2055 DUMP(p, data_offset, "lx");
2056 DUMP(p, hw_cpu_id, "x");
2057 DUMP(p, cpu_start, "x");
2058 DUMP(p, kexec_state, "x");
2059 DUMP(p, __current, "p");
2060 DUMP(p, kstack, "lx");
2061 DUMP(p, stab_rr, "lx");
2062 DUMP(p, saved_r1, "lx");
2063 DUMP(p, trap_save, "x");
2064 DUMP(p, soft_enabled, "x");
2065 DUMP(p, irq_happened, "x");
2066 DUMP(p, io_sync, "x");
2067 DUMP(p, irq_work_pending, "x");
2068 DUMP(p, nap_state_lost, "x");
2069
2070#undef DUMP
2071
2072 catch_memory_errors = 0;
2073 sync();
2074}
2075
2076static void dump_all_pacas(void)
2077{
2078 int cpu;
2079
2080 if (num_possible_cpus() == 0) {
2081 printf("No possible cpus, use 'dp #' to dump individual cpus\n");
2082 return;
2083 }
2084
2085 for_each_possible_cpu(cpu)
2086 dump_one_paca(cpu);
2087}
2088
2089static void dump_pacas(void)
2090{
2091 unsigned long num;
2092 int c;
2093
2094 c = inchar();
2095 if (c == 'a') {
2096 dump_all_pacas();
2097 return;
2098 }
2099
2100 termch = c; /* Put c back, it wasn't 'a' */
2101
2102 if (scanhex(&num))
2103 dump_one_paca(num);
2104 else
2105 dump_one_paca(xmon_owner);
2106}
2107#endif
2108
2012#define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 2109#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
2013 || ('a' <= (c) && (c) <= 'f') \ 2110 || ('a' <= (c) && (c) <= 'f') \
2014 || ('A' <= (c) && (c) <= 'F')) 2111 || ('A' <= (c) && (c) <= 'F'))
@@ -2018,6 +2115,14 @@ dump(void)
2018 int c; 2115 int c;
2019 2116
2020 c = inchar(); 2117 c = inchar();
2118
2119#ifdef CONFIG_PPC64
2120 if (c == 'p') {
2121 dump_pacas();
2122 return;
2123 }
2124#endif
2125
2021 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') 2126 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
2022 termch = c; 2127 termch = c;
2023 scanhex((void *)&adrs); 2128 scanhex((void *)&adrs);
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 33c10864d2f7..d2292be6fb90 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -246,16 +246,13 @@ static void __devinit fixup_read_and_payload_sizes(void)
246 246
247 /* Scan for the smallest maximum payload size. */ 247 /* Scan for the smallest maximum payload size. */
248 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 248 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
249 int pcie_caps_offset;
250 u32 devcap; 249 u32 devcap;
251 int max_payload; 250 int max_payload;
252 251
253 pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); 252 if (!pci_is_pcie(dev))
254 if (pcie_caps_offset == 0)
255 continue; 253 continue;
256 254
257 pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, 255 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap);
258 &devcap);
259 max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; 256 max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
260 if (max_payload < smallest_max_payload) 257 if (max_payload < smallest_max_payload)
261 smallest_max_payload = max_payload; 258 smallest_max_payload = max_payload;
@@ -263,21 +260,10 @@ static void __devinit fixup_read_and_payload_sizes(void)
263 260
264 /* Now, set the max_payload_size for all devices to that value. */ 261 /* Now, set the max_payload_size for all devices to that value. */
265 new_values = (max_read_size << 12) | (smallest_max_payload << 5); 262 new_values = (max_read_size << 12) | (smallest_max_payload << 5);
266 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 263 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
267 int pcie_caps_offset; 264 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
268 u16 devctl; 265 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
269 266 new_values);
270 pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
271 if (pcie_caps_offset == 0)
272 continue;
273
274 pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
275 &devctl);
276 devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
277 devctl |= new_values;
278 pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
279 devctl);
280 }
281} 267}
282 268
283 269
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index f60238559af3..0748fe0c8a73 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -114,7 +114,7 @@ static void deliver_alarm(void)
114 skew += this_tick - last_tick; 114 skew += this_tick - last_tick;
115 115
116 while (skew >= one_tick) { 116 while (skew >= one_tick) {
117 alarm_handler(SIGVTALRM, NULL); 117 alarm_handler(SIGVTALRM, NULL, NULL);
118 skew -= one_tick; 118 skew -= one_tick;
119 } 119 }
120 120
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b65a76133f4f..5141d808e751 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); 1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1284 1284
1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1286 if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { 1286 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1287 args->op.cmd = MMUEXT_INVLPG_MULTI; 1287 args->op.cmd = MMUEXT_INVLPG_MULTI;
1288 args->op.arg1.linear_addr = start; 1288 args->op.arg1.linear_addr = start;
1289 } 1289 }
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index d4b255463253..76ba0e97e530 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
599 if (p2m_index(set_pfn)) 599 if (p2m_index(set_pfn))
600 return false; 600 return false;
601 601
602 for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { 602 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
603 topidx = p2m_top_index(pfn); 603 topidx = p2m_top_index(pfn);
604 604
605 if (!p2m_top[topidx]) 605 if (!p2m_top[topidx])
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 78efb0306a44..34d94c762a1e 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
250 return -EINVAL; 250 return -EINVAL;
251 251
252 /* Sanitise input arguments */ 252 /* Sanitise input arguments */
253 alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); 253 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
254 base = ALIGN(base, alignment); 254 base = ALIGN(base, alignment);
255 size = ALIGN(size, alignment); 255 size = ALIGN(size, alignment);
256 limit &= ~(alignment - 1); 256 limit &= ~(alignment - 1);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b16c8a72a2e2..ba7926f5c099 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ
294 294
295config GPIO_MC9S08DZ60 295config GPIO_MC9S08DZ60
296 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" 296 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions"
297 depends on I2C && MACH_MX35_3DS 297 depends on I2C=y && MACH_MX35_3DS
298 help 298 help
299 Select this to enable the MC9S08DZ60 GPIO driver 299 Select this to enable the MC9S08DZ60 GPIO driver
300 300
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index ae37181798b3..ec48ed512628 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
247 247
248 p->irq_base = irq_alloc_descs(pdata->irq_base, 0, 248 p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
249 pdata->number_of_pins, numa_node_id()); 249 pdata->number_of_pins, numa_node_id());
250 if (IS_ERR_VALUE(p->irq_base)) { 250 if (p->irq_base < 0) {
251 dev_err(&pdev->dev, "cannot get irq_desc\n"); 251 dev_err(&pdev->dev, "cannot get irq_desc\n");
252 return -ENXIO; 252 return p->irq_base;
253 } 253 }
254 pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", 254 pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
255 pdata->gpio_base, pdata->number_of_pins, p->irq_base); 255 pdata->gpio_base, pdata->number_of_pins, p->irq_base);
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index e97016af6443..b62d443e9a59 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
170 rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; 170 rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
171 171
172 rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; 172 rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
173 rdc321x_gpio_dev->chip.owner = THIS_MODULE;
173 rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; 174 rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
174 rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; 175 rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
175 rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; 176 rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a18c4aa68b1e..f1a45997aea8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
82 gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); 82 gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
83 83
84 of_node_put(gg_data.gpiospec.np); 84 of_node_put(gg_data.gpiospec.np);
85 pr_debug("%s exited with status %d\n", __func__, ret); 85 pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
86 return gg_data.out_gpio; 86 return gg_data.out_gpio;
87} 87}
88EXPORT_SYMBOL(of_get_named_gpio_flags); 88EXPORT_SYMBOL(of_get_named_gpio_flags);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e93b80a6d4e9..ed3340adeb6f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -77,13 +77,9 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
77void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 77void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
78{ 78{
79 u16 ctl, v; 79 u16 ctl, v;
80 int cap, err; 80 int err;
81 81
82 cap = pci_pcie_cap(rdev->pdev); 82 err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
83 if (!cap)
84 return;
85
86 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
87 if (err) 83 if (err)
88 return; 84 return;
89 85
@@ -95,7 +91,7 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
95 if ((v == 0) || (v == 6) || (v == 7)) { 91 if ((v == 0) || (v == 6) || (v == 7)) {
96 ctl &= ~PCI_EXP_DEVCTL_READRQ; 92 ctl &= ~PCI_EXP_DEVCTL_READRQ;
97 ctl |= (2 << 12); 93 ctl |= (2 << 12);
98 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); 94 pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
99 } 95 }
100} 96}
101 97
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8bf8a64e5115..8bcd168fffae 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
996 struct hid_driver *hdrv = hid->driver; 996 struct hid_driver *hdrv = hid->driver;
997 int ret; 997 int ret;
998 998
999 hid_dump_input(hid, usage, value); 999 if (!list_empty(&hid->debug_list))
1000 hid_dump_input(hid, usage, value);
1000 1001
1001 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1002 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1002 ret = hdrv->event(hid, field, usage, value); 1003 ret = hdrv->event(hid, field, usage, value);
@@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1558 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 1559 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
1559 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1560 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
1560 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, 1561 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
1561 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, 1562#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
1563 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
1564#endif
1562 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, 1565 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
1563 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, 1566 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
1564 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, 1567 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 0f9c146fc00d..4d524b5f52f5 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
439 struct dj_report *dj_report; 439 struct dj_report *dj_report;
440 int retval; 440 int retval;
441 441
442 dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); 442 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
443 if (!dj_report) 443 if (!dj_report)
444 return -ENOMEM; 444 return -ENOMEM;
445 dj_report->report_id = REPORT_ID_DJ_SHORT; 445 dj_report->report_id = REPORT_ID_DJ_SHORT;
@@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
456 struct dj_report *dj_report; 456 struct dj_report *dj_report;
457 int retval; 457 int retval;
458 458
459 dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); 459 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
460 if (!dj_report) 460 if (!dj_report)
461 return -ENOMEM; 461 return -ENOMEM;
462 dj_report->report_id = REPORT_ID_DJ_SHORT; 462 dj_report->report_id = REPORT_ID_DJ_SHORT;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 903eef3d3e10..991e85c7325c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
70 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 70 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
71 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 71 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
72 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 74 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
74 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 75 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
75 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, 76 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 4fa3534ec233..74c6a9426047 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -241,16 +241,16 @@ good:
241 241
242 if (hca_pcie_cap) { 242 if (hca_pcie_cap) {
243 devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4]; 243 devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4];
244 if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL, 244 if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL,
245 devctl)) { 245 devctl)) {
246 err = -ENODEV; 246 err = -ENODEV;
247 mthca_err(mdev, "Couldn't restore HCA PCI Express " 247 mthca_err(mdev, "Couldn't restore HCA PCI Express "
248 "Device Control register, aborting.\n"); 248 "Device Control register, aborting.\n");
249 goto out; 249 goto out;
250 } 250 }
251 linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; 251 linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
252 if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL, 252 if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
253 linkctl)) { 253 linkctl)) {
254 err = -ENODEV; 254 err = -ENODEV;
255 mthca_err(mdev, "Couldn't restore HCA PCI Express " 255 mthca_err(mdev, "Couldn't restore HCA PCI Express "
256 "Link control register, aborting.\n"); 256 "Link control register, aborting.\n");
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 062c301ebf53..900137173210 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -273,10 +273,9 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
273 struct qib_msix_entry *entry) 273 struct qib_msix_entry *entry)
274{ 274{
275 u16 linkstat, speed; 275 u16 linkstat, speed;
276 int pos = 0, pose, ret = 1; 276 int pos = 0, ret = 1;
277 277
278 pose = pci_pcie_cap(dd->pcidev); 278 if (!pci_is_pcie(dd->pcidev)) {
279 if (!pose) {
280 qib_dev_err(dd, "Can't find PCI Express capability!\n"); 279 qib_dev_err(dd, "Can't find PCI Express capability!\n");
281 /* set up something... */ 280 /* set up something... */
282 dd->lbus_width = 1; 281 dd->lbus_width = 1;
@@ -298,7 +297,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
298 if (!pos) 297 if (!pos)
299 qib_enable_intx(dd->pcidev); 298 qib_enable_intx(dd->pcidev);
300 299
301 pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat); 300 pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
302 /* 301 /*
303 * speed is bits 0-3, linkwidth is bits 4-8 302 * speed is bits 0-3, linkwidth is bits 4-8
304 * no defines for them in headers 303 * no defines for them in headers
@@ -516,7 +515,6 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
516{ 515{
517 int r; 516 int r;
518 struct pci_dev *parent; 517 struct pci_dev *parent;
519 int ppos;
520 u16 devid; 518 u16 devid;
521 u32 mask, bits, val; 519 u32 mask, bits, val;
522 520
@@ -529,8 +527,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
529 qib_devinfo(dd->pcidev, "Parent not root\n"); 527 qib_devinfo(dd->pcidev, "Parent not root\n");
530 return 1; 528 return 1;
531 } 529 }
532 ppos = pci_pcie_cap(parent); 530 if (!pci_is_pcie(parent))
533 if (!ppos)
534 return 1; 531 return 1;
535 if (parent->vendor != 0x8086) 532 if (parent->vendor != 0x8086)
536 return 1; 533 return 1;
@@ -587,7 +584,6 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
587{ 584{
588 int ret = 1; /* Assume the worst */ 585 int ret = 1; /* Assume the worst */
589 struct pci_dev *parent; 586 struct pci_dev *parent;
590 int ppos, epos;
591 u16 pcaps, pctl, ecaps, ectl; 587 u16 pcaps, pctl, ecaps, ectl;
592 int rc_sup, ep_sup; 588 int rc_sup, ep_sup;
593 int rc_cur, ep_cur; 589 int rc_cur, ep_cur;
@@ -598,19 +594,15 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
598 qib_devinfo(dd->pcidev, "Parent not root\n"); 594 qib_devinfo(dd->pcidev, "Parent not root\n");
599 goto bail; 595 goto bail;
600 } 596 }
601 ppos = pci_pcie_cap(parent); 597
602 if (ppos) { 598 if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
603 pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
604 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
605 } else
606 goto bail; 599 goto bail;
600 pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps);
601 pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl);
607 /* Find out supported and configured values for endpoint (us) */ 602 /* Find out supported and configured values for endpoint (us) */
608 epos = pci_pcie_cap(dd->pcidev); 603 pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps);
609 if (epos) { 604 pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
610 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps); 605
611 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
612 } else
613 goto bail;
614 ret = 0; 606 ret = 0;
615 /* Find max payload supported by root, endpoint */ 607 /* Find max payload supported by root, endpoint */
616 rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD); 608 rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
@@ -629,14 +621,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
629 rc_cur = rc_sup; 621 rc_cur = rc_sup;
630 pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | 622 pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
631 val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD); 623 val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
632 pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); 624 pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
633 } 625 }
634 /* If less than (allowed, supported), bump endpoint payload */ 626 /* If less than (allowed, supported), bump endpoint payload */
635 if (rc_sup > ep_cur) { 627 if (rc_sup > ep_cur) {
636 ep_cur = rc_sup; 628 ep_cur = rc_sup;
637 ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) | 629 ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
638 val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD); 630 val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
639 pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); 631 pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
640 } 632 }
641 633
642 /* 634 /*
@@ -654,13 +646,13 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
654 rc_cur = rc_sup; 646 rc_cur = rc_sup;
655 pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) | 647 pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
656 val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ); 648 val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
657 pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); 649 pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
658 } 650 }
659 if (rc_sup > ep_cur) { 651 if (rc_sup > ep_cur) {
660 ep_cur = rc_sup; 652 ep_cur = rc_sup;
661 ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) | 653 ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
662 val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ); 654 val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
663 pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); 655 pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
664 } 656 }
665bail: 657bail:
666 return ret; 658 return ret;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index ff4c0a87a25f..ce68e361558c 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
358 /* Inhibit KDI and KRI interrupts. */ 358 /* Inhibit KDI and KRI interrupts. */
359 reg_val = readw(keypad->mmio_base + KPSR); 359 reg_val = readw(keypad->mmio_base + KPSR);
360 reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); 360 reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE);
361 reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD;
361 writew(reg_val, keypad->mmio_base + KPSR); 362 writew(reg_val, keypad->mmio_base + KPSR);
362 363
363 /* Colums as open drain and disable all rows */ 364 /* Colums as open drain and disable all rows */
@@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
515 input_set_drvdata(input_dev, keypad); 516 input_set_drvdata(input_dev, keypad);
516 517
517 /* Ensure that the keypad will stay dormant until opened */ 518 /* Ensure that the keypad will stay dormant until opened */
519 clk_enable(keypad->clk);
518 imx_keypad_inhibit(keypad); 520 imx_keypad_inhibit(keypad);
521 clk_disable(keypad->clk);
519 522
520 error = request_irq(irq, imx_keypad_irq_handler, 0, 523 error = request_irq(irq, imx_keypad_irq_handler, 0,
521 pdev->name, keypad); 524 pdev->name, keypad);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5ec774d6c82b..6918773ce024 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -177,6 +177,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
177 }, 177 },
178 }, 178 },
179 { 179 {
180 /* Gigabyte T1005 - defines wrong chassis type ("Other") */
181 .matches = {
182 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
183 DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
184 },
185 },
186 {
187 /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
190 DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
191 },
192 },
193 {
180 .matches = { 194 .matches = {
181 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 195 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
182 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), 196 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 002041975de9..532d067a9e07 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A =
1848 { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 1848 { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
1849 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1849 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1850static const struct wacom_features wacom_features_0xF4 = 1850static const struct wacom_features wacom_features_0xF4 =
1851 { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1851 { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
1852 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1853static const struct wacom_features wacom_features_0xF8 =
1854 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
1852 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1855 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1853static const struct wacom_features wacom_features_0x3F = 1856static const struct wacom_features wacom_features_0x3F =
1854 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 1857 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
@@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = {
2091 { USB_DEVICE_WACOM(0xEF) }, 2094 { USB_DEVICE_WACOM(0xEF) },
2092 { USB_DEVICE_WACOM(0x47) }, 2095 { USB_DEVICE_WACOM(0x47) },
2093 { USB_DEVICE_WACOM(0xF4) }, 2096 { USB_DEVICE_WACOM(0xF4) },
2097 { USB_DEVICE_WACOM(0xF8) },
2094 { USB_DEVICE_WACOM(0xFA) }, 2098 { USB_DEVICE_WACOM(0xFA) },
2095 { USB_DEVICE_LENOVO(0x6004) }, 2099 { USB_DEVICE_LENOVO(0x6004) },
2096 { } 2100 { }
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 9afc777a40a7..b06a5e3a665e 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
602{ 602{
603 if (tsdata->debug_dir) 603 if (tsdata->debug_dir)
604 debugfs_remove_recursive(tsdata->debug_dir); 604 debugfs_remove_recursive(tsdata->debug_dir);
605 kfree(tsdata->raw_buffer);
605} 606}
606 607
607#else 608#else
@@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
843 if (gpio_is_valid(pdata->reset_pin)) 844 if (gpio_is_valid(pdata->reset_pin))
844 gpio_free(pdata->reset_pin); 845 gpio_free(pdata->reset_pin);
845 846
846 kfree(tsdata->raw_buffer);
847 kfree(tsdata); 847 kfree(tsdata);
848 848
849 return 0; 849 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2297ec193eb4..db820d7dd0bc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2351,7 +2351,7 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2351 return 0; 2351 return 0;
2352 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) 2352 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2353 return 0; 2353 return 0;
2354 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 2354 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2355 return 0; 2355 return 0;
2356 2356
2357 /* 2357 /*
@@ -3546,10 +3546,10 @@ found:
3546 struct pci_dev *bridge = bus->self; 3546 struct pci_dev *bridge = bus->self;
3547 3547
3548 if (!bridge || !pci_is_pcie(bridge) || 3548 if (!bridge || !pci_is_pcie(bridge) ||
3549 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 3549 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3550 return 0; 3550 return 0;
3551 3551
3552 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { 3552 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
3553 for (i = 0; i < atsru->devices_cnt; i++) 3553 for (i = 0; i < atsru->devices_cnt; i++)
3554 if (atsru->devices[i] == bridge) 3554 if (atsru->devices[i] == bridge)
3555 return 1; 3555 return 1;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f1c84decb192..172a768036d8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1411 /* complete ongoing async transfer before issuing discard */ 1411 /* complete ongoing async transfer before issuing discard */
1412 if (card->host->areq) 1412 if (card->host->areq)
1413 mmc_blk_issue_rw_rq(mq, NULL); 1413 mmc_blk_issue_rw_rq(mq, NULL);
1414 if (req->cmd_flags & REQ_SECURE) 1414 if (req->cmd_flags & REQ_SECURE &&
1415 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1415 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1416 ret = mmc_blk_issue_secdiscard_rq(mq, req);
1416 else 1417 else
1417 ret = mmc_blk_issue_discard_rq(mq, req); 1418 ret = mmc_blk_issue_discard_rq(mq, req);
@@ -1716,6 +1717,7 @@ force_ro_fail:
1716#define CID_MANFID_SANDISK 0x2 1717#define CID_MANFID_SANDISK 0x2
1717#define CID_MANFID_TOSHIBA 0x11 1718#define CID_MANFID_TOSHIBA 0x11
1718#define CID_MANFID_MICRON 0x13 1719#define CID_MANFID_MICRON 0x13
1720#define CID_MANFID_SAMSUNG 0x15
1719 1721
1720static const struct mmc_fixup blk_fixups[] = 1722static const struct mmc_fixup blk_fixups[] =
1721{ 1723{
@@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] =
1752 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 1754 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1753 MMC_QUIRK_LONG_READ_TIME), 1755 MMC_QUIRK_LONG_READ_TIME),
1754 1756
1757 /*
1758 * On these Samsung MoviNAND parts, performing secure erase or
1759 * secure trim can result in unrecoverable corruption due to a
1760 * firmware bug.
1761 */
1762 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1763 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1764 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1765 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1766 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1767 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1768 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1769 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1770 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1771 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1772 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1773 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1774 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1775 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1776 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1777 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1778
1755 END_FIXUP 1779 END_FIXUP
1756}; 1780};
1757 1781
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 322412cec4ee..a53c7c478e05 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -81,6 +81,7 @@ struct atmel_mci_caps {
81 bool has_bad_data_ordering; 81 bool has_bad_data_ordering;
82 bool need_reset_after_xfer; 82 bool need_reset_after_xfer;
83 bool need_blksz_mul_4; 83 bool need_blksz_mul_4;
84 bool need_notbusy_for_read_ops;
84}; 85};
85 86
86struct atmel_mci_dma { 87struct atmel_mci_dma {
@@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv)
1625 __func__); 1626 __func__);
1626 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1627 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1627 1628
1628 if (host->data->flags & MMC_DATA_WRITE) { 1629 if (host->caps.need_notbusy_for_read_ops ||
1630 (host->data->flags & MMC_DATA_WRITE)) {
1629 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1631 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1630 state = STATE_WAITING_NOTBUSY; 1632 state = STATE_WAITING_NOTBUSY;
1631 } else if (host->mrq->stop) { 1633 } else if (host->mrq->stop) {
@@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2218 host->caps.has_bad_data_ordering = 1; 2220 host->caps.has_bad_data_ordering = 1;
2219 host->caps.need_reset_after_xfer = 1; 2221 host->caps.need_reset_after_xfer = 1;
2220 host->caps.need_blksz_mul_4 = 1; 2222 host->caps.need_blksz_mul_4 = 1;
2223 host->caps.need_notbusy_for_read_ops = 0;
2221 2224
2222 /* keep only major version number */ 2225 /* keep only major version number */
2223 switch (version & 0xf00) { 2226 switch (version & 0xf00) {
@@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2238 case 0x200: 2241 case 0x200:
2239 host->caps.has_rwproof = 1; 2242 host->caps.has_rwproof = 1;
2240 host->caps.need_blksz_mul_4 = 0; 2243 host->caps.need_blksz_mul_4 = 0;
2244 host->caps.need_notbusy_for_read_ops = 1;
2241 case 0x100: 2245 case 0x100:
2242 host->caps.has_bad_data_ordering = 0; 2246 host->caps.has_bad_data_ordering = 0;
2243 host->caps.need_reset_after_xfer = 0; 2247 host->caps.need_reset_after_xfer = 0;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 03666174ca48..a17dd7363ceb 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -49,13 +49,6 @@
49#define bfin_write_SDH_CFG bfin_write_RSI_CFG 49#define bfin_write_SDH_CFG bfin_write_RSI_CFG
50#endif 50#endif
51 51
52struct dma_desc_array {
53 unsigned long start_addr;
54 unsigned short cfg;
55 unsigned short x_count;
56 short x_modify;
57} __packed;
58
59struct sdh_host { 52struct sdh_host {
60 struct mmc_host *mmc; 53 struct mmc_host *mmc;
61 spinlock_t lock; 54 spinlock_t lock;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 72dc3cde646d..af40d227bece 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
627{ 627{
628 struct dw_mci *host = slot->host; 628 struct dw_mci *host = slot->host;
629 u32 div; 629 u32 div;
630 u32 clk_en_a;
630 631
631 if (slot->clock != host->current_speed) { 632 if (slot->clock != host->current_speed) {
632 div = host->bus_hz / slot->clock; 633 div = host->bus_hz / slot->clock;
@@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
659 mci_send_cmd(slot, 660 mci_send_cmd(slot,
660 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 661 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
661 662
662 /* enable clock */ 663 /* enable clock; only low power if no SDIO */
663 mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | 664 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
664 SDMMC_CLKEN_LOW_PWR) << slot->id)); 665 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
666 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
667 mci_writel(host, CLKENA, clk_en_a);
665 668
666 /* inform CIU */ 669 /* inform CIU */
667 mci_send_cmd(slot, 670 mci_send_cmd(slot,
@@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
862 return present; 865 return present;
863} 866}
864 867
868/*
869 * Disable lower power mode.
870 *
871 * Low power mode will stop the card clock when idle. According to the
872 * description of the CLKENA register we should disable low power mode
873 * for SDIO cards if we need SDIO interrupts to work.
874 *
875 * This function is fast if low power mode is already disabled.
876 */
877static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
878{
879 struct dw_mci *host = slot->host;
880 u32 clk_en_a;
881 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
882
883 clk_en_a = mci_readl(host, CLKENA);
884
885 if (clk_en_a & clken_low_pwr) {
886 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
887 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
888 SDMMC_CMD_PRV_DAT_WAIT, 0);
889 }
890}
891
865static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 892static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
866{ 893{
867 struct dw_mci_slot *slot = mmc_priv(mmc); 894 struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
871 /* Enable/disable Slot Specific SDIO interrupt */ 898 /* Enable/disable Slot Specific SDIO interrupt */
872 int_mask = mci_readl(host, INTMASK); 899 int_mask = mci_readl(host, INTMASK);
873 if (enb) { 900 if (enb) {
901 /*
902 * Turn off low power mode if it was enabled. This is a bit of
903 * a heavy operation and we disable / enable IRQs a lot, so
904 * we'll leave low power mode disabled and it will get
905 * re-enabled again in dw_mci_setup_bus().
906 */
907 dw_mci_disable_low_power(slot);
908
874 mci_writel(host, INTMASK, 909 mci_writel(host, INTMASK,
875 (int_mask | SDMMC_INT_SDIO(slot->id))); 910 (int_mask | SDMMC_INT_SDIO(slot->id)));
876 } else { 911 } else {
@@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1429 nbytes += len; 1464 nbytes += len;
1430 remain -= len; 1465 remain -= len;
1431 } while (remain); 1466 } while (remain);
1432 sg_miter->consumed = offset;
1433 1467
1468 sg_miter->consumed = offset;
1434 status = mci_readl(host, MINTSTS); 1469 status = mci_readl(host, MINTSTS);
1435 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1470 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1436 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1437 host->data_status = status;
1438 data->bytes_xfered += nbytes;
1439 sg_miter_stop(sg_miter);
1440 host->sg = NULL;
1441 smp_wmb();
1442
1443 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1444
1445 tasklet_schedule(&host->tasklet);
1446 return;
1447 }
1448 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1471 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1449 data->bytes_xfered += nbytes; 1472 data->bytes_xfered += nbytes;
1450 1473
@@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
1497 nbytes += len; 1520 nbytes += len;
1498 remain -= len; 1521 remain -= len;
1499 } while (remain); 1522 } while (remain);
1500 sg_miter->consumed = offset;
1501 1523
1524 sg_miter->consumed = offset;
1502 status = mci_readl(host, MINTSTS); 1525 status = mci_readl(host, MINTSTS);
1503 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1526 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1504 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1505 host->data_status = status;
1506 data->bytes_xfered += nbytes;
1507 sg_miter_stop(sg_miter);
1508 host->sg = NULL;
1509
1510 smp_wmb();
1511
1512 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1513
1514 tasklet_schedule(&host->tasklet);
1515 return;
1516 }
1517 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1527 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1518 data->bytes_xfered += nbytes; 1528 data->bytes_xfered += nbytes;
1519 1529
@@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1547static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1557static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1548{ 1558{
1549 struct dw_mci *host = dev_id; 1559 struct dw_mci *host = dev_id;
1550 u32 status, pending; 1560 u32 pending;
1551 unsigned int pass_count = 0; 1561 unsigned int pass_count = 0;
1552 int i; 1562 int i;
1553 1563
1554 do { 1564 do {
1555 status = mci_readl(host, RINTSTS);
1556 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1565 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1557 1566
1558 /* 1567 /*
@@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1570 1579
1571 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1580 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1572 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1581 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1573 host->cmd_status = status; 1582 host->cmd_status = pending;
1574 smp_wmb(); 1583 smp_wmb();
1575 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1584 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1576 } 1585 }
@@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1578 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1587 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1579 /* if there is an error report DATA_ERROR */ 1588 /* if there is an error report DATA_ERROR */
1580 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1589 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1581 host->data_status = status; 1590 host->data_status = pending;
1582 smp_wmb(); 1591 smp_wmb();
1583 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1592 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1584 if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | 1593 tasklet_schedule(&host->tasklet);
1585 SDMMC_INT_SBE | SDMMC_INT_EBE)))
1586 tasklet_schedule(&host->tasklet);
1587 } 1594 }
1588 1595
1589 if (pending & SDMMC_INT_DATA_OVER) { 1596 if (pending & SDMMC_INT_DATA_OVER) {
1590 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1597 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1591 if (!host->data_status) 1598 if (!host->data_status)
1592 host->data_status = status; 1599 host->data_status = pending;
1593 smp_wmb(); 1600 smp_wmb();
1594 if (host->dir_status == DW_MCI_RECV_STATUS) { 1601 if (host->dir_status == DW_MCI_RECV_STATUS) {
1595 if (host->sg != NULL) 1602 if (host->sg != NULL)
@@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1613 1620
1614 if (pending & SDMMC_INT_CMD_DONE) { 1621 if (pending & SDMMC_INT_CMD_DONE) {
1615 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1622 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1616 dw_mci_cmd_interrupt(host, status); 1623 dw_mci_cmd_interrupt(host, pending);
1617 } 1624 }
1618 1625
1619 if (pending & SDMMC_INT_CD) { 1626 if (pending & SDMMC_INT_CD) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index a51f9309ffbb..ad3fcea1269e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
285 writel(stat & MXS_MMC_IRQ_BITS, 285 writel(stat & MXS_MMC_IRQ_BITS,
286 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); 286 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
287 287
288 spin_unlock(&host->lock);
289
288 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) 290 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
289 mmc_signal_sdio_irq(host->mmc); 291 mmc_signal_sdio_irq(host->mmc);
290 292
291 spin_unlock(&host->lock);
292
293 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) 293 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
294 cmd->error = -ETIMEDOUT; 294 cmd->error = -ETIMEDOUT;
295 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) 295 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
@@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
644 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 644 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
645 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 645 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
646 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); 646 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
647
648 if (readl(host->base + HW_SSP_STATUS(host)) &
649 BM_SSP_STATUS_SDIO_IRQ)
650 mmc_signal_sdio_irq(host->mmc);
651
652 } else { 647 } else {
653 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 648 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
654 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 649 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
@@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
657 } 652 }
658 653
659 spin_unlock_irqrestore(&host->lock, flags); 654 spin_unlock_irqrestore(&host->lock, flags);
655
656 if (enable && readl(host->base + HW_SSP_STATUS(host)) &
657 BM_SSP_STATUS_SDIO_IRQ)
658 mmc_signal_sdio_irq(host->mmc);
659
660} 660}
661 661
662static const struct mmc_host_ops mxs_mmc_ops = { 662static const struct mmc_host_ops mxs_mmc_ops = {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 50e08f03aa65..a5999a74496a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data)
668static void 668static void
669mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 669mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
670{ 670{
671 int n; 671 int n, nwords;
672 672
673 if (host->buffer_bytes_left == 0) { 673 if (host->buffer_bytes_left == 0) {
674 host->sg_idx++; 674 host->sg_idx++;
@@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
678 n = 64; 678 n = 64;
679 if (n > host->buffer_bytes_left) 679 if (n > host->buffer_bytes_left)
680 n = host->buffer_bytes_left; 680 n = host->buffer_bytes_left;
681
682 nwords = n / 2;
683 nwords += n & 1; /* handle odd number of bytes to transfer */
684
681 host->buffer_bytes_left -= n; 685 host->buffer_bytes_left -= n;
682 host->total_bytes_left -= n; 686 host->total_bytes_left -= n;
683 host->data->bytes_xfered += n; 687 host->data->bytes_xfered += n;
684 688
685 if (write) { 689 if (write) {
686 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 690 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
691 host->buffer, nwords);
687 } else { 692 } else {
688 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 693 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
694 host->buffer, nwords);
689 } 695 }
696
697 host->buffer += nwords;
690} 698}
691 699
692static inline void mmc_omap_report_irq(u16 status) 700static inline void mmc_omap_report_irq(u16 status)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index b97b2f5dafdb..d25f9ab9a54d 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
48 int div = 1; 48 int div = 1;
49 u32 temp; 49 u32 temp;
50 50
51 if (clock == 0)
52 goto out;
53
51 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 54 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
52 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 55 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
53 | ESDHC_CLOCK_MASK); 56 | ESDHC_CLOCK_MASK);
54 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 57 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
55 58
56 if (clock == 0)
57 goto out;
58
59 while (host->max_clk / pre_div / 16 > clock && pre_div < 256) 59 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
60 pre_div *= 2; 60 pre_div *= 2;
61 61
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 437bc193e170..568307cc7caf 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -340,7 +340,7 @@ retry:
340 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. 340 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
341 */ 341 */
342 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); 342 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
343 kfree(new_aeb); 343 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
344 ubi_free_vid_hdr(ubi, vid_hdr); 344 ubi_free_vid_hdr(ubi, vid_hdr);
345 return err; 345 return err;
346 346
@@ -353,7 +353,7 @@ write_error:
353 list_add(&new_aeb->u.list, &ai->erase); 353 list_add(&new_aeb->u.list, &ai->erase);
354 goto retry; 354 goto retry;
355 } 355 }
356 kfree(new_aeb); 356 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
357out_free: 357out_free:
358 ubi_free_vid_hdr(ubi, vid_hdr); 358 ubi_free_vid_hdr(ubi, vid_hdr);
359 return err; 359 return err;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1bf5bbfe778e..8892e2b64498 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -149,7 +149,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
151 /* clear error status */ 151 /* clear error status */
152 pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA, 152 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED | 153 PCI_EXP_DEVSTA_NFED |
154 PCI_EXP_DEVSTA_FED | 154 PCI_EXP_DEVSTA_FED |
155 PCI_EXP_DEVSTA_CED | 155 PCI_EXP_DEVSTA_CED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 21054987257a..605c4574d32d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1162,14 +1162,9 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1162 1162
1163static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1163static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1164{ 1164{
1165 int pos;
1166 u16 status; 1165 u16 status;
1167 1166
1168 pos = pci_pcie_cap(dev); 1167 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1169 if (!pos)
1170 return false;
1171
1172 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
1173 return status & PCI_EXP_DEVSTA_TRPND; 1168 return status & PCI_EXP_DEVSTA_TRPND;
1174} 1169}
1175 1170
@@ -6135,8 +6130,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
6135 u16 devctl; 6130 u16 devctl;
6136 int r_order, w_order; 6131 int r_order, w_order;
6137 6132
6138 pci_read_config_word(bp->pdev, 6133 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6139 pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
6140 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6134 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6141 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6135 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6142 if (bp->mrrs == -1) 6136 if (bp->mrrs == -1)
@@ -9374,7 +9368,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
9374 9368
9375static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9369static int __devinit bnx2x_do_flr(struct bnx2x *bp)
9376{ 9370{
9377 int i, pos; 9371 int i;
9378 u16 status; 9372 u16 status;
9379 struct pci_dev *dev = bp->pdev; 9373 struct pci_dev *dev = bp->pdev;
9380 9374
@@ -9391,16 +9385,12 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
9391 return -EINVAL; 9385 return -EINVAL;
9392 } 9386 }
9393 9387
9394 pos = pci_pcie_cap(dev);
9395 if (!pos)
9396 return -ENOTTY;
9397
9398 /* Wait for Transaction Pending bit clean */ 9388 /* Wait for Transaction Pending bit clean */
9399 for (i = 0; i < 4; i++) { 9389 for (i = 0; i < 4; i++) {
9400 if (i) 9390 if (i)
9401 msleep((1 << (i - 1)) * 100); 9391 msleep((1 << (i - 1)) * 100);
9402 9392
9403 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 9393 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
9404 if (!(status & PCI_EXP_DEVSTA_TRPND)) 9394 if (!(status & PCI_EXP_DEVSTA_TRPND))
9405 goto clear; 9395 goto clear;
9406 } 9396 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf906c51d82a..8325fd8d4e5b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3653,17 +3653,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
3653 tg3_enable_register_access(tp); 3653 tg3_enable_register_access(tp);
3654 3654
3655 /* Restore the CLKREQ setting. */ 3655 /* Restore the CLKREQ setting. */
3656 if (tg3_flag(tp, CLKREQ_BUG)) { 3656 if (tg3_flag(tp, CLKREQ_BUG))
3657 u16 lnkctl; 3657 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3658 3658 PCI_EXP_LNKCTL_CLKREQ_EN);
3659 pci_read_config_word(tp->pdev,
3660 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3661 &lnkctl);
3662 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3663 pci_write_config_word(tp->pdev,
3664 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3665 lnkctl);
3666 }
3667 3659
3668 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 3660 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3669 tw32(TG3PCI_MISC_HOST_CTRL, 3661 tw32(TG3PCI_MISC_HOST_CTRL,
@@ -4434,20 +4426,13 @@ relink:
4434 4426
4435 /* Prevent send BD corruption. */ 4427 /* Prevent send BD corruption. */
4436 if (tg3_flag(tp, CLKREQ_BUG)) { 4428 if (tg3_flag(tp, CLKREQ_BUG)) {
4437 u16 oldlnkctl, newlnkctl;
4438
4439 pci_read_config_word(tp->pdev,
4440 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4441 &oldlnkctl);
4442 if (tp->link_config.active_speed == SPEED_100 || 4429 if (tp->link_config.active_speed == SPEED_100 ||
4443 tp->link_config.active_speed == SPEED_10) 4430 tp->link_config.active_speed == SPEED_10)
4444 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; 4431 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4432 PCI_EXP_LNKCTL_CLKREQ_EN);
4445 else 4433 else
4446 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; 4434 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4447 if (newlnkctl != oldlnkctl) 4435 PCI_EXP_LNKCTL_CLKREQ_EN);
4448 pci_write_config_word(tp->pdev,
4449 pci_pcie_cap(tp->pdev) +
4450 PCI_EXP_LNKCTL, newlnkctl);
4451 } 4436 }
4452 4437
4453 if (current_link_up != netif_carrier_ok(tp->dev)) { 4438 if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -8054,7 +8039,7 @@ static int tg3_chip_reset(struct tg3 *tp)
8054 8039
8055 udelay(120); 8040 udelay(120);
8056 8041
8057 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { 8042 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8058 u16 val16; 8043 u16 val16;
8059 8044
8060 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { 8045 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
@@ -8071,24 +8056,17 @@ static int tg3_chip_reset(struct tg3 *tp)
8071 } 8056 }
8072 8057
8073 /* Clear the "no snoop" and "relaxed ordering" bits. */ 8058 /* Clear the "no snoop" and "relaxed ordering" bits. */
8074 pci_read_config_word(tp->pdev, 8059 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8075 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8076 &val16);
8077 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8078 PCI_EXP_DEVCTL_NOSNOOP_EN);
8079 /* 8060 /*
8080 * Older PCIe devices only support the 128 byte 8061 * Older PCIe devices only support the 128 byte
8081 * MPS setting. Enforce the restriction. 8062 * MPS setting. Enforce the restriction.
8082 */ 8063 */
8083 if (!tg3_flag(tp, CPMU_PRESENT)) 8064 if (!tg3_flag(tp, CPMU_PRESENT))
8084 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 8065 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8085 pci_write_config_word(tp->pdev, 8066 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8086 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8087 val16);
8088 8067
8089 /* Clear error status */ 8068 /* Clear error status */
8090 pci_write_config_word(tp->pdev, 8069 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8091 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8092 PCI_EXP_DEVSTA_CED | 8070 PCI_EXP_DEVSTA_CED |
8093 PCI_EXP_DEVSTA_NFED | 8071 PCI_EXP_DEVSTA_NFED |
8094 PCI_EXP_DEVSTA_FED | 8072 PCI_EXP_DEVSTA_FED |
@@ -14565,9 +14543,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14565 14543
14566 tg3_flag_set(tp, PCI_EXPRESS); 14544 tg3_flag_set(tp, PCI_EXPRESS);
14567 14545
14568 pci_read_config_word(tp->pdev, 14546 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14569 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14570 &lnkctl);
14571 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 14547 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 14548 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14573 ASIC_REV_5906) { 14549 ASIC_REV_5906) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index bff8a3cdd3df..aef45d3113ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3289,22 +3289,18 @@ static void config_pcie(struct adapter *adap)
3289 unsigned int log2_width, pldsize; 3289 unsigned int log2_width, pldsize;
3290 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; 3290 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3291 3291
3292 pci_read_config_word(adap->pdev, 3292 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3293 adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
3294 &val);
3295 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; 3293 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3296 3294
3297 pci_read_config_word(adap->pdev, 0x2, &devid); 3295 pci_read_config_word(adap->pdev, 0x2, &devid);
3298 if (devid == 0x37) { 3296 if (devid == 0x37) {
3299 pci_write_config_word(adap->pdev, 3297 pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3300 adap->pdev->pcie_cap + PCI_EXP_DEVCTL, 3298 val & ~PCI_EXP_DEVCTL_READRQ &
3301 val & ~PCI_EXP_DEVCTL_READRQ & 3299 ~PCI_EXP_DEVCTL_PAYLOAD);
3302 ~PCI_EXP_DEVCTL_PAYLOAD);
3303 pldsize = 0; 3300 pldsize = 0;
3304 } 3301 }
3305 3302
3306 pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL, 3303 pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3307 &val);
3308 3304
3309 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); 3305 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3310 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : 3306 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
@@ -3425,15 +3421,13 @@ out_err:
3425static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3421static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3426{ 3422{
3427 static unsigned short speed_map[] = { 33, 66, 100, 133 }; 3423 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3428 u32 pci_mode, pcie_cap; 3424 u32 pci_mode;
3429 3425
3430 pcie_cap = pci_pcie_cap(adapter->pdev); 3426 if (pci_is_pcie(adapter->pdev)) {
3431 if (pcie_cap) {
3432 u16 val; 3427 u16 val;
3433 3428
3434 p->variant = PCI_VARIANT_PCIE; 3429 p->variant = PCI_VARIANT_PCIE;
3435 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, 3430 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3436 &val);
3437 p->width = (val >> 4) & 0x3f; 3431 p->width = (val >> 4) & 0x3f;
3438 return; 3432 return;
3439 } 3433 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5ed49af23d6a..4a20821511e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3694,15 +3694,7 @@ static void __devinit print_port_info(const struct net_device *dev)
3694 3694
3695static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) 3695static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3696{ 3696{
3697 u16 v; 3697 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
3698 int pos;
3699
3700 pos = pci_pcie_cap(dev);
3701 if (pos > 0) {
3702 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3703 v |= PCI_EXP_DEVCTL_RELAX_EN;
3704 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3705 }
3706} 3698}
3707 3699
3708/* 3700/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fa947dfa4c30..af1601323173 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2741,11 +2741,9 @@ static void __devinit get_pci_mode(struct adapter *adapter,
2741 struct pci_params *p) 2741 struct pci_params *p)
2742{ 2742{
2743 u16 val; 2743 u16 val;
2744 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2745 2744
2746 if (pcie_cap) { 2745 if (pci_is_pcie(adapter->pdev)) {
2747 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, 2746 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
2748 &val);
2749 p->speed = val & PCI_EXP_LNKSTA_CLS; 2747 p->speed = val & PCI_EXP_LNKSTA_CLS;
2750 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 2748 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2751 } 2749 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d01a099475a1..acc030efb67b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5584,16 +5584,15 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5584 */ 5584 */
5585 if (adapter->flags & FLAG_IS_QUAD_PORT) { 5585 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5586 struct pci_dev *us_dev = pdev->bus->self; 5586 struct pci_dev *us_dev = pdev->bus->self;
5587 int pos = pci_pcie_cap(us_dev);
5588 u16 devctl; 5587 u16 devctl;
5589 5588
5590 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); 5589 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
5591 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 5590 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
5592 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5591 (devctl & ~PCI_EXP_DEVCTL_CERE));
5593 5592
5594 e1000_power_off(pdev, sleep, wake); 5593 e1000_power_off(pdev, sleep, wake);
5595 5594
5596 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 5595 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
5597 } else { 5596 } else {
5598 e1000_power_off(pdev, sleep, wake); 5597 e1000_power_off(pdev, sleep, wake);
5599 } 5598 }
@@ -5607,25 +5606,15 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5607#else 5606#else
5608static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5607static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5609{ 5608{
5610 int pos;
5611 u16 reg16;
5612
5613 /* 5609 /*
5614 * Both device and parent should have the same ASPM setting. 5610 * Both device and parent should have the same ASPM setting.
5615 * Disable ASPM in downstream component first and then upstream. 5611 * Disable ASPM in downstream component first and then upstream.
5616 */ 5612 */
5617 pos = pci_pcie_cap(pdev); 5613 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
5618 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
5619 reg16 &= ~state;
5620 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5621
5622 if (!pdev->bus->self)
5623 return;
5624 5614
5625 pos = pci_pcie_cap(pdev->bus->self); 5615 if (pdev->bus->self)
5626 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16); 5616 pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
5627 reg16 &= ~state; 5617 state);
5628 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5629} 5618}
5630#endif 5619#endif
5631static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5620static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 48cc4fb1a307..2036ae365bae 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6538,28 +6538,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6538s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 6538s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6539{ 6539{
6540 struct igb_adapter *adapter = hw->back; 6540 struct igb_adapter *adapter = hw->back;
6541 u16 cap_offset;
6542 6541
6543 cap_offset = adapter->pdev->pcie_cap; 6542 if (pcie_capability_read_word(adapter->pdev, reg, value))
6544 if (!cap_offset)
6545 return -E1000_ERR_CONFIG; 6543 return -E1000_ERR_CONFIG;
6546 6544
6547 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6548
6549 return 0; 6545 return 0;
6550} 6546}
6551 6547
6552s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 6548s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6553{ 6549{
6554 struct igb_adapter *adapter = hw->back; 6550 struct igb_adapter *adapter = hw->back;
6555 u16 cap_offset;
6556 6551
6557 cap_offset = adapter->pdev->pcie_cap; 6552 if (pcie_capability_write_word(adapter->pdev, reg, *value))
6558 if (!cap_offset)
6559 return -E1000_ERR_CONFIG; 6553 return -E1000_ERR_CONFIG;
6560 6554
6561 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6562
6563 return 0; 6555 return 0;
6564} 6556}
6565 6557
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4326f74f7137..976570d4c939 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7527,7 +7527,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7527 goto skip_bad_vf_detection; 7527 goto skip_bad_vf_detection;
7528 7528
7529 bdev = pdev->bus->self; 7529 bdev = pdev->bus->self;
7530 while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) 7530 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
7531 bdev = bdev->bus->self; 7531 bdev = bdev->bus->self;
7532 7532
7533 if (!bdev) 7533 if (!bdev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index 11e7c1cb99bf..dd1b5093d8b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -141,16 +141,16 @@ int mlx4_reset(struct mlx4_dev *dev)
141 /* Now restore the PCI headers */ 141 /* Now restore the PCI headers */
142 if (pcie_cap) { 142 if (pcie_cap) {
143 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; 143 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
144 if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, 144 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
145 devctl)) { 145 devctl)) {
146 err = -ENODEV; 146 err = -ENODEV;
147 mlx4_err(dev, "Couldn't restore HCA PCI Express " 147 mlx4_err(dev, "Couldn't restore HCA PCI Express "
148 "Device Control register, aborting.\n"); 148 "Device Control register, aborting.\n");
149 goto out; 149 goto out;
150 } 150 }
151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
152 if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, 152 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
153 linkctl)) { 153 linkctl)) {
154 err = -ENODEV; 154 err = -ENODEV;
155 mlx4_err(dev, "Couldn't restore HCA PCI Express " 155 mlx4_err(dev, "Couldn't restore HCA PCI Express "
156 "Link control register, aborting.\n"); 156 "Link control register, aborting.\n");
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index fa85cf1353fd..83516e3369c9 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1078,22 +1078,16 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1078#ifdef CONFIG_MYRI10GE_DCA 1078#ifdef CONFIG_MYRI10GE_DCA
1079static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on) 1079static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
1080{ 1080{
1081 int ret, cap, err; 1081 int ret;
1082 u16 ctl; 1082 u16 ctl;
1083 1083
1084 cap = pci_pcie_cap(pdev); 1084 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
1085 if (!cap)
1086 return 0;
1087
1088 err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
1089 if (err)
1090 return 0;
1091 1085
1092 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4; 1086 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
1093 if (ret != on) { 1087 if (ret != on) {
1094 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1088 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1095 ctl |= (on << 4); 1089 ctl |= (on << 4);
1096 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); 1090 pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
1097 } 1091 }
1098 return ret; 1092 return ret;
1099} 1093}
@@ -3192,18 +3186,13 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
3192 struct device *dev = &mgp->pdev->dev; 3186 struct device *dev = &mgp->pdev->dev;
3193 int cap; 3187 int cap;
3194 unsigned err_cap; 3188 unsigned err_cap;
3195 u16 val;
3196 u8 ext_type;
3197 int ret; 3189 int ret;
3198 3190
3199 if (!myri10ge_ecrc_enable || !bridge) 3191 if (!myri10ge_ecrc_enable || !bridge)
3200 return; 3192 return;
3201 3193
3202 /* check that the bridge is a root port */ 3194 /* check that the bridge is a root port */
3203 cap = pci_pcie_cap(bridge); 3195 if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
3204 pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
3205 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
3206 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
3207 if (myri10ge_ecrc_enable > 1) { 3196 if (myri10ge_ecrc_enable > 1) {
3208 struct pci_dev *prev_bridge, *old_bridge = bridge; 3197 struct pci_dev *prev_bridge, *old_bridge = bridge;
3209 3198
@@ -3218,11 +3207,8 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
3218 " to force ECRC\n"); 3207 " to force ECRC\n");
3219 return; 3208 return;
3220 } 3209 }
3221 cap = pci_pcie_cap(bridge); 3210 } while (pci_pcie_type(bridge) !=
3222 pci_read_config_word(bridge, 3211 PCI_EXP_TYPE_ROOT_PORT);
3223 cap + PCI_CAP_FLAGS, &val);
3224 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
3225 } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
3226 3212
3227 dev_info(dev, 3213 dev_info(dev,
3228 "Forcing ECRC on non-root port %s" 3214 "Forcing ECRC on non-root port %s"
@@ -3335,11 +3321,10 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3335 int overridden = 0; 3321 int overridden = 0;
3336 3322
3337 if (myri10ge_force_firmware == 0) { 3323 if (myri10ge_force_firmware == 0) {
3338 int link_width, exp_cap; 3324 int link_width;
3339 u16 lnk; 3325 u16 lnk;
3340 3326
3341 exp_cap = pci_pcie_cap(mgp->pdev); 3327 pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
3342 pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
3343 link_width = (lnk >> 4) & 0x3f; 3328 link_width = (lnk >> 4) & 0x3f;
3344 3329
3345 /* Check to see if Link is less than 8 or if the 3330 /* Check to see if Link is less than 8 or if the
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 32d06824fe3e..c2e420a84d22 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -757,7 +757,7 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
757 u16 lnk; 757 u16 lnk;
758 758
759 /* Get the negotiated link width and speed from PCI config space */ 759 /* Get the negotiated link width and speed from PCI config space */
760 pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); 760 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
761 761
762 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) 762 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
763 return VXGE_HW_ERR_INVALID_PCI_INFO; 763 return VXGE_HW_ERR_INVALID_PCI_INFO;
@@ -1982,7 +1982,7 @@ u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1982 struct pci_dev *dev = hldev->pdev; 1982 struct pci_dev *dev = hldev->pdev;
1983 u16 lnk; 1983 u16 lnk;
1984 1984
1985 pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); 1985 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
1986 return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; 1986 return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1987} 1987}
1988 1988
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 342b3a79bd0f..01d6141cedd9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1382,7 +1382,7 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
1382 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) 1382 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
1383 return; 1383 return;
1384 1384
1385 if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) 1385 if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT)
1386 return; 1386 return;
1387 1387
1388 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); 1388 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b47d5b35024e..a7cc56007b33 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -833,15 +833,8 @@ static void rtl_unlock_work(struct rtl8169_private *tp)
833 833
834static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) 834static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
835{ 835{
836 int cap = pci_pcie_cap(pdev); 836 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
837 837 PCI_EXP_DEVCTL_READRQ, force);
838 if (cap) {
839 u16 ctl;
840
841 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
842 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
843 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
844 }
845} 838}
846 839
847struct rtl_cond { 840struct rtl_cond {
@@ -4739,28 +4732,14 @@ static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4739 4732
4740static void rtl_disable_clock_request(struct pci_dev *pdev) 4733static void rtl_disable_clock_request(struct pci_dev *pdev)
4741{ 4734{
4742 int cap = pci_pcie_cap(pdev); 4735 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4743 4736 PCI_EXP_LNKCTL_CLKREQ_EN);
4744 if (cap) {
4745 u16 ctl;
4746
4747 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4748 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4749 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4750 }
4751} 4737}
4752 4738
4753static void rtl_enable_clock_request(struct pci_dev *pdev) 4739static void rtl_enable_clock_request(struct pci_dev *pdev)
4754{ 4740{
4755 int cap = pci_pcie_cap(pdev); 4741 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4756 4742 PCI_EXP_LNKCTL_CLKREQ_EN);
4757 if (cap) {
4758 u16 ctl;
4759
4760 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4761 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4762 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4763 }
4764} 4743}
4765 4744
4766#define R8168_CPCMD_QUIRK_MASK (\ 4745#define R8168_CPCMD_QUIRK_MASK (\
@@ -5405,14 +5384,9 @@ static void rtl_hw_start_8101(struct net_device *dev)
5405 tp->event_slow &= ~RxFIFOOver; 5384 tp->event_slow &= ~RxFIFOOver;
5406 5385
5407 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5386 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5408 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5387 tp->mac_version == RTL_GIGA_MAC_VER_16)
5409 int cap = pci_pcie_cap(pdev); 5388 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5410 5389 PCI_EXP_DEVCTL_NOSNOOP_EN);
5411 if (cap) {
5412 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
5413 PCI_EXP_DEVCTL_NOSNOOP_EN);
5414 }
5415 }
5416 5390
5417 RTL_W8(Cfg9346, Cfg9346_Unlock); 5391 RTL_W8(Cfg9346, Cfg9346_Unlock);
5418 5392
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index c2a0fe393267..3208dca66758 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9762,9 +9762,8 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9762 union niu_parent_id parent_id; 9762 union niu_parent_id parent_id;
9763 struct net_device *dev; 9763 struct net_device *dev;
9764 struct niu *np; 9764 struct niu *np;
9765 int err, pos; 9765 int err;
9766 u64 dma_mask; 9766 u64 dma_mask;
9767 u16 val16;
9768 9767
9769 niu_driver_version(); 9768 niu_driver_version();
9770 9769
@@ -9787,8 +9786,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9787 goto err_out_disable_pdev; 9786 goto err_out_disable_pdev;
9788 } 9787 }
9789 9788
9790 pos = pci_pcie_cap(pdev); 9789 if (!pci_is_pcie(pdev)) {
9791 if (pos <= 0) {
9792 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9793 goto err_out_free_res; 9791 goto err_out_free_res;
9794 } 9792 }
@@ -9813,14 +9811,11 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9813 goto err_out_free_dev; 9811 goto err_out_free_dev;
9814 } 9812 }
9815 9813
9816 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 9814 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
9817 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 9815 PCI_EXP_DEVCTL_NOSNOOP_EN,
9818 val16 |= (PCI_EXP_DEVCTL_CERE | 9816 PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
9819 PCI_EXP_DEVCTL_NFERE | 9817 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
9820 PCI_EXP_DEVCTL_FERE | 9818 PCI_EXP_DEVCTL_RELAX_EN);
9821 PCI_EXP_DEVCTL_URRE |
9822 PCI_EXP_DEVCTL_RELAX_EN);
9823 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
9824 9819
9825 dma_mask = DMA_BIT_MASK(44); 9820 dma_mask = DMA_BIT_MASK(44);
9826 err = pci_set_dma_mask(pdev, dma_mask); 9821 err = pci_set_dma_mask(pdev, dma_mask);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a978984d78a5..ef11dc639461 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -113,41 +113,32 @@ static void ath_pci_aspm_init(struct ath_common *common)
113 struct ath_hw *ah = sc->sc_ah; 113 struct ath_hw *ah = sc->sc_ah;
114 struct pci_dev *pdev = to_pci_dev(sc->dev); 114 struct pci_dev *pdev = to_pci_dev(sc->dev);
115 struct pci_dev *parent; 115 struct pci_dev *parent;
116 int pos; 116 u16 aspm;
117 u8 aspm;
118 117
119 if (!ah->is_pciexpress) 118 if (!ah->is_pciexpress)
120 return; 119 return;
121 120
122 pos = pci_pcie_cap(pdev);
123 if (!pos)
124 return;
125
126 parent = pdev->bus->self; 121 parent = pdev->bus->self;
127 if (!parent) 122 if (!parent)
128 return; 123 return;
129 124
130 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 125 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
131 /* Bluetooth coexistance requires disabling ASPM. */ 126 /* Bluetooth coexistance requires disabling ASPM. */
132 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); 127 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
133 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 128 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
134 pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
135 129
136 /* 130 /*
137 * Both upstream and downstream PCIe components should 131 * Both upstream and downstream PCIe components should
138 * have the same ASPM settings. 132 * have the same ASPM settings.
139 */ 133 */
140 pos = pci_pcie_cap(parent); 134 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
141 pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); 135 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
142 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
143 pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
144 136
145 ath_info(common, "Disabling ASPM since BTCOEX is enabled\n"); 137 ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
146 return; 138 return;
147 } 139 }
148 140
149 pos = pci_pcie_cap(parent); 141 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
150 pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
151 if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { 142 if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
152 ah->aspm_enabled = true; 143 ah->aspm_enabled = true;
153 /* Initialize PCIe PM and SERDES registers. */ 144 /* Initialize PCIe PM and SERDES registers. */
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 5f5017767b99..724682669060 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,10 +1832,8 @@ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
1832static inline u16 1832static inline u16
1833il_pcie_link_ctl(struct il_priv *il) 1833il_pcie_link_ctl(struct il_priv *il)
1834{ 1834{
1835 int pos;
1836 u16 pci_lnk_ctl; 1835 u16 pci_lnk_ctl;
1837 pos = pci_pcie_cap(il->pci_dev); 1836 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl);
1838 pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
1839 return pci_lnk_ctl; 1837 return pci_lnk_ctl;
1840} 1838}
1841 1839
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 1e86ea2266d4..e316ca4632b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -675,13 +675,10 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
675static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) 675static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
676{ 676{
677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
678 int pos;
679 u16 pci_lnk_ctl; 678 u16 pci_lnk_ctl;
680 679
681 struct pci_dev *pci_dev = trans_pcie->pci_dev; 680 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
682 681 &pci_lnk_ctl);
683 pos = pci_pcie_cap(pci_dev);
684 pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
685 return pci_lnk_ctl; 682 return pci_lnk_ctl;
686} 683}
687 684
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 80f75d3ba84a..5983631a1b1a 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -372,13 +372,11 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
372 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 372 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
373 373
374 u8 tmp; 374 u8 tmp;
375 int pos; 375 u16 linkctrl_reg;
376 u8 linkctrl_reg;
377 376
378 /*Link Control Register */ 377 /*Link Control Register */
379 pos = pci_pcie_cap(pdev); 378 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
380 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg); 379 pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
381 pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
382 380
383 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n", 381 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
384 pcipriv->ndis_adapter.linkctrl_reg); 382 pcipriv->ndis_adapter.linkctrl_reg);
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index ba91a7e17519..3af0478c057b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -469,3 +469,205 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
469 raw_spin_unlock_irqrestore(&pci_lock, flags); 469 raw_spin_unlock_irqrestore(&pci_lock, flags);
470} 470}
471EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 471EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
472
473static inline int pcie_cap_version(const struct pci_dev *dev)
474{
475 return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
476}
477
478static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
479{
480 return true;
481}
482
483static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
484{
485 int type = pci_pcie_type(dev);
486
487 return pcie_cap_version(dev) > 1 ||
488 type == PCI_EXP_TYPE_ROOT_PORT ||
489 type == PCI_EXP_TYPE_ENDPOINT ||
490 type == PCI_EXP_TYPE_LEG_END;
491}
492
493static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
494{
495 int type = pci_pcie_type(dev);
496
497 return pcie_cap_version(dev) > 1 ||
498 type == PCI_EXP_TYPE_ROOT_PORT ||
499 (type == PCI_EXP_TYPE_DOWNSTREAM &&
500 dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
501}
502
503static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
504{
505 int type = pci_pcie_type(dev);
506
507 return pcie_cap_version(dev) > 1 ||
508 type == PCI_EXP_TYPE_ROOT_PORT ||
509 type == PCI_EXP_TYPE_RC_EC;
510}
511
512static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
513{
514 if (!pci_is_pcie(dev))
515 return false;
516
517 switch (pos) {
518 case PCI_EXP_FLAGS_TYPE:
519 return true;
520 case PCI_EXP_DEVCAP:
521 case PCI_EXP_DEVCTL:
522 case PCI_EXP_DEVSTA:
523 return pcie_cap_has_devctl(dev);
524 case PCI_EXP_LNKCAP:
525 case PCI_EXP_LNKCTL:
526 case PCI_EXP_LNKSTA:
527 return pcie_cap_has_lnkctl(dev);
528 case PCI_EXP_SLTCAP:
529 case PCI_EXP_SLTCTL:
530 case PCI_EXP_SLTSTA:
531 return pcie_cap_has_sltctl(dev);
532 case PCI_EXP_RTCTL:
533 case PCI_EXP_RTCAP:
534 case PCI_EXP_RTSTA:
535 return pcie_cap_has_rtctl(dev);
536 case PCI_EXP_DEVCAP2:
537 case PCI_EXP_DEVCTL2:
538 case PCI_EXP_LNKCAP2:
539 case PCI_EXP_LNKCTL2:
540 case PCI_EXP_LNKSTA2:
541 return pcie_cap_version(dev) > 1;
542 default:
543 return false;
544 }
545}
546
547/*
548 * Note that these accessor functions are only for the "PCI Express
549 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
550 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
551 */
552int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
553{
554 int ret;
555
556 *val = 0;
557 if (pos & 1)
558 return -EINVAL;
559
560 if (pcie_capability_reg_implemented(dev, pos)) {
561 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
562 /*
563 * Reset *val to 0 if pci_read_config_word() fails, it may
564 * have been written as 0xFFFF if hardware error happens
565 * during pci_read_config_word().
566 */
567 if (ret)
568 *val = 0;
569 return ret;
570 }
571
572 /*
573 * For Functions that do not implement the Slot Capabilities,
574 * Slot Status, and Slot Control registers, these spaces must
575 * be hardwired to 0b, with the exception of the Presence Detect
576 * State bit in the Slot Status register of Downstream Ports,
577 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
578 */
579 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
580 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
581 *val = PCI_EXP_SLTSTA_PDS;
582 }
583
584 return 0;
585}
586EXPORT_SYMBOL(pcie_capability_read_word);
587
588int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
589{
590 int ret;
591
592 *val = 0;
593 if (pos & 3)
594 return -EINVAL;
595
596 if (pcie_capability_reg_implemented(dev, pos)) {
597 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
598 /*
599 * Reset *val to 0 if pci_read_config_dword() fails, it may
600 * have been written as 0xFFFFFFFF if hardware error happens
601 * during pci_read_config_dword().
602 */
603 if (ret)
604 *val = 0;
605 return ret;
606 }
607
608 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
609 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
610 *val = PCI_EXP_SLTSTA_PDS;
611 }
612
613 return 0;
614}
615EXPORT_SYMBOL(pcie_capability_read_dword);
616
617int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
618{
619 if (pos & 1)
620 return -EINVAL;
621
622 if (!pcie_capability_reg_implemented(dev, pos))
623 return 0;
624
625 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
626}
627EXPORT_SYMBOL(pcie_capability_write_word);
628
629int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
630{
631 if (pos & 3)
632 return -EINVAL;
633
634 if (!pcie_capability_reg_implemented(dev, pos))
635 return 0;
636
637 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
638}
639EXPORT_SYMBOL(pcie_capability_write_dword);
640
641int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
642 u16 clear, u16 set)
643{
644 int ret;
645 u16 val;
646
647 ret = pcie_capability_read_word(dev, pos, &val);
648 if (!ret) {
649 val &= ~clear;
650 val |= set;
651 ret = pcie_capability_write_word(dev, pos, val);
652 }
653
654 return ret;
655}
656EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
657
658int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
659 u32 clear, u32 set)
660{
661 int ret;
662 u32 val;
663
664 ret = pcie_capability_read_dword(dev, pos, &val);
665 if (!ret) {
666 val &= ~clear;
667 val |= set;
668 ret = pcie_capability_write_dword(dev, pos, val);
669 }
670
671 return ret;
672}
673EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 376d70d17176..24d709b7388c 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -81,16 +81,12 @@ static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
81/* Dummy driver for dumplicate name detection */ 81/* Dummy driver for dumplicate name detection */
82static int __init dummy_probe(struct pcie_device *dev) 82static int __init dummy_probe(struct pcie_device *dev)
83{ 83{
84 int pos;
85 u32 slot_cap; 84 u32 slot_cap;
86 acpi_handle handle; 85 acpi_handle handle;
87 struct dummy_slot *slot, *tmp; 86 struct dummy_slot *slot, *tmp;
88 struct pci_dev *pdev = dev->port; 87 struct pci_dev *pdev = dev->port;
89 88
90 pos = pci_pcie_cap(pdev); 89 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
91 if (!pos)
92 return -ENODEV;
93 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
94 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 90 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
95 if (!slot) 91 if (!slot)
96 return -ENOMEM; 92 return -ENOMEM;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 302451e8289d..13b2eaf7ba43 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -44,25 +44,25 @@
44static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 44static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
45{ 45{
46 struct pci_dev *dev = ctrl->pcie->port; 46 struct pci_dev *dev = ctrl->pcie->port;
47 return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value); 47 return pcie_capability_read_word(dev, reg, value);
48} 48}
49 49
50static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) 50static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
51{ 51{
52 struct pci_dev *dev = ctrl->pcie->port; 52 struct pci_dev *dev = ctrl->pcie->port;
53 return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value); 53 return pcie_capability_read_dword(dev, reg, value);
54} 54}
55 55
56static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) 56static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
57{ 57{
58 struct pci_dev *dev = ctrl->pcie->port; 58 struct pci_dev *dev = ctrl->pcie->port;
59 return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value); 59 return pcie_capability_write_word(dev, reg, value);
60} 60}
61 61
62static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) 62static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
63{ 63{
64 struct pci_dev *dev = ctrl->pcie->port; 64 struct pci_dev *dev = ctrl->pcie->port;
65 return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value); 65 return pcie_capability_write_dword(dev, reg, value);
66} 66}
67 67
68/* Power Control Command */ 68/* Power Control Command */
@@ -855,10 +855,6 @@ struct controller *pcie_init(struct pcie_device *dev)
855 goto abort; 855 goto abort;
856 } 856 }
857 ctrl->pcie = dev; 857 ctrl->pcie = dev;
858 if (!pci_pcie_cap(pdev)) {
859 ctrl_err(ctrl, "Cannot find PCI Express capability\n");
860 goto abort_ctrl;
861 }
862 if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { 858 if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) {
863 ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); 859 ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
864 goto abort_ctrl; 860 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 8c05a18c9770..fec2d5b75440 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -96,17 +96,11 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
96static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) 96static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
97{ 97{
98 int pos; 98 int pos;
99 u16 reg16;
100 u32 reg32; 99 u32 reg32;
101 100
102 if (!hpp) 101 if (!hpp)
103 return; 102 return;
104 103
105 /* Find PCI Express capability */
106 pos = pci_pcie_cap(dev);
107 if (!pos)
108 return;
109
110 if (hpp->revision > 1) { 104 if (hpp->revision > 1) {
111 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", 105 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
112 hpp->revision); 106 hpp->revision);
@@ -114,17 +108,13 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
114 } 108 }
115 109
116 /* Initialize Device Control Register */ 110 /* Initialize Device Control Register */
117 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 111 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
118 reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; 112 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
119 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
120 113
121 /* Initialize Link Control Register */ 114 /* Initialize Link Control Register */
122 if (dev->subordinate) { 115 if (dev->subordinate)
123 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16); 116 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
124 reg16 = (reg16 & hpp->pci_exp_lnkctl_and) 117 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
125 | hpp->pci_exp_lnkctl_or;
126 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
127 }
128 118
129 /* Find Advanced Error Reporting Enhanced Capability */ 119 /* Find Advanced Error Reporting Enhanced Capability */
130 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 120 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 1e117c2a3cad..b29e20b7862f 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -388,7 +388,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
388 /* Remove the EADS bridge device itself */ 388 /* Remove the EADS bridge device itself */
389 BUG_ON(!bus->self); 389 BUG_ON(!bus->self);
390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); 390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
391 eeh_remove_bus_device(bus->self); 391 eeh_remove_bus_device(bus->self, true);
392 pci_stop_and_remove_bus_device(bus->self); 392 pci_stop_and_remove_bus_device(bus->self);
393 393
394 return 0; 394 return 0;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 74bbaf82638d..aeccc911abb8 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -433,8 +433,8 @@ static int sriov_init(struct pci_dev *dev, int pos)
433 struct resource *res; 433 struct resource *res;
434 struct pci_dev *pdev; 434 struct pci_dev *pdev;
435 435
436 if (dev->pcie_type != PCI_EXP_TYPE_RC_END && 436 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END &&
437 dev->pcie_type != PCI_EXP_TYPE_ENDPOINT) 437 pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT)
438 return -ENODEV; 438 return -ENODEV;
439 439
440 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); 440 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
@@ -503,7 +503,7 @@ found:
503 iov->self = dev; 503 iov->self = dev;
504 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 504 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
505 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 505 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
506 if (dev->pcie_type == PCI_EXP_TYPE_RC_END) 506 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
507 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); 507 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
508 508
509 if (pdev) 509 if (pdev)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 5270f1a99328..d6fd6b6d9d4b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi)
280{ 280{
281 struct drv_dev_and_id *ddi = _ddi; 281 struct drv_dev_and_id *ddi = _ddi;
282 struct device *dev = &ddi->dev->dev; 282 struct device *dev = &ddi->dev->dev;
283 struct device *parent = dev->parent;
283 int rc; 284 int rc;
284 285
286 /* The parent bridge must be in active state when probing */
287 if (parent)
288 pm_runtime_get_sync(parent);
285 /* Unbound PCI devices are always set to disabled and suspended. 289 /* Unbound PCI devices are always set to disabled and suspended.
286 * During probe, the device is set to enabled and active and the 290 * During probe, the device is set to enabled and active and the
287 * usage count is incremented. If the driver supports runtime PM, 291 * usage count is incremented. If the driver supports runtime PM,
@@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi)
298 pm_runtime_set_suspended(dev); 302 pm_runtime_set_suspended(dev);
299 pm_runtime_put_noidle(dev); 303 pm_runtime_put_noidle(dev);
300 } 304 }
305 if (parent)
306 pm_runtime_put(parent);
301 return rc; 307 return rc;
302} 308}
303 309
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 6869009c7393..02d107b15281 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
458} 458}
459struct device_attribute vga_attr = __ATTR_RO(boot_vga); 459struct device_attribute vga_attr = __ATTR_RO(boot_vga);
460 460
461static void
462pci_config_pm_runtime_get(struct pci_dev *pdev)
463{
464 struct device *dev = &pdev->dev;
465 struct device *parent = dev->parent;
466
467 if (parent)
468 pm_runtime_get_sync(parent);
469 pm_runtime_get_noresume(dev);
470 /*
471 * pdev->current_state is set to PCI_D3cold during suspending,
472 * so wait until suspending completes
473 */
474 pm_runtime_barrier(dev);
475 /*
476 * Only need to resume devices in D3cold, because config
477 * registers are still accessible for devices suspended but
478 * not in D3cold.
479 */
480 if (pdev->current_state == PCI_D3cold)
481 pm_runtime_resume(dev);
482}
483
484static void
485pci_config_pm_runtime_put(struct pci_dev *pdev)
486{
487 struct device *dev = &pdev->dev;
488 struct device *parent = dev->parent;
489
490 pm_runtime_put(dev);
491 if (parent)
492 pm_runtime_put_sync(parent);
493}
494
461static ssize_t 495static ssize_t
462pci_read_config(struct file *filp, struct kobject *kobj, 496pci_read_config(struct file *filp, struct kobject *kobj,
463 struct bin_attribute *bin_attr, 497 struct bin_attribute *bin_attr,
@@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
484 size = count; 518 size = count;
485 } 519 }
486 520
521 pci_config_pm_runtime_get(dev);
522
487 if ((off & 1) && size) { 523 if ((off & 1) && size) {
488 u8 val; 524 u8 val;
489 pci_user_read_config_byte(dev, off, &val); 525 pci_user_read_config_byte(dev, off, &val);
@@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
529 --size; 565 --size;
530 } 566 }
531 567
568 pci_config_pm_runtime_put(dev);
569
532 return count; 570 return count;
533} 571}
534 572
@@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
549 count = size; 587 count = size;
550 } 588 }
551 589
590 pci_config_pm_runtime_get(dev);
591
552 if ((off & 1) && size) { 592 if ((off & 1) && size) {
553 pci_user_write_config_byte(dev, off, data[off - init_off]); 593 pci_user_write_config_byte(dev, off, data[off - init_off]);
554 off++; 594 off++;
@@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
587 --size; 627 --size;
588 } 628 }
589 629
630 pci_config_pm_runtime_put(dev);
631
590 return count; 632 return count;
591} 633}
592 634
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f3ea977a5b1b..292cb2e0ff7f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -254,38 +254,6 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
254} 254}
255 255
256/** 256/**
257 * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
258 * @dev: PCI device to check
259 *
260 * Like pci_pcie_cap() but also checks that the PCIe capability version is
261 * >= 2. Note that v1 capability structures could be sparse in that not
262 * all register fields were required. v2 requires the entire structure to
263 * be present size wise, while still allowing for non-implemented registers
264 * to exist but they must be hardwired to 0.
265 *
266 * Due to the differences in the versions of capability structures, one
267 * must be careful not to try and access non-existant registers that may
268 * exist in early versions - v1 - of Express devices.
269 *
270 * Returns the offset of the PCIe capability structure as long as the
271 * capability version is >= 2; otherwise 0 is returned.
272 */
273static int pci_pcie_cap2(struct pci_dev *dev)
274{
275 u16 flags;
276 int pos;
277
278 pos = pci_pcie_cap(dev);
279 if (pos) {
280 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
281 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
282 pos = 0;
283 }
284
285 return pos;
286}
287
288/**
289 * pci_find_ext_capability - Find an extended capability 257 * pci_find_ext_capability - Find an extended capability
290 * @dev: PCI device to query 258 * @dev: PCI device to query
291 * @cap: capability code 259 * @cap: capability code
@@ -854,21 +822,6 @@ EXPORT_SYMBOL(pci_choose_state);
854 822
855#define PCI_EXP_SAVE_REGS 7 823#define PCI_EXP_SAVE_REGS 7
856 824
857#define pcie_cap_has_devctl(type, flags) 1
858#define pcie_cap_has_lnkctl(type, flags) \
859 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
860 (type == PCI_EXP_TYPE_ROOT_PORT || \
861 type == PCI_EXP_TYPE_ENDPOINT || \
862 type == PCI_EXP_TYPE_LEG_END))
863#define pcie_cap_has_sltctl(type, flags) \
864 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
865 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
866 (type == PCI_EXP_TYPE_DOWNSTREAM && \
867 (flags & PCI_EXP_FLAGS_SLOT))))
868#define pcie_cap_has_rtctl(type, flags) \
869 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
870 (type == PCI_EXP_TYPE_ROOT_PORT || \
871 type == PCI_EXP_TYPE_RC_EC))
872 825
873static struct pci_cap_saved_state *pci_find_saved_cap( 826static struct pci_cap_saved_state *pci_find_saved_cap(
874 struct pci_dev *pci_dev, char cap) 827 struct pci_dev *pci_dev, char cap)
@@ -885,13 +838,11 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
885 838
886static int pci_save_pcie_state(struct pci_dev *dev) 839static int pci_save_pcie_state(struct pci_dev *dev)
887{ 840{
888 int pos, i = 0; 841 int i = 0;
889 struct pci_cap_saved_state *save_state; 842 struct pci_cap_saved_state *save_state;
890 u16 *cap; 843 u16 *cap;
891 u16 flags;
892 844
893 pos = pci_pcie_cap(dev); 845 if (!pci_is_pcie(dev))
894 if (!pos)
895 return 0; 846 return 0;
896 847
897 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 848 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -899,60 +850,37 @@ static int pci_save_pcie_state(struct pci_dev *dev)
899 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 850 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
900 return -ENOMEM; 851 return -ENOMEM;
901 } 852 }
902 cap = (u16 *)&save_state->cap.data[0];
903
904 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
905 853
906 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 854 cap = (u16 *)&save_state->cap.data[0];
907 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 855 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
908 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 856 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
909 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 857 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
910 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 858 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
911 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 859 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
912 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 860 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
913 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 861 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
914
915 pos = pci_pcie_cap2(dev);
916 if (!pos)
917 return 0;
918 862
919 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
920 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
921 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
922 return 0; 863 return 0;
923} 864}
924 865
925static void pci_restore_pcie_state(struct pci_dev *dev) 866static void pci_restore_pcie_state(struct pci_dev *dev)
926{ 867{
927 int i = 0, pos; 868 int i = 0;
928 struct pci_cap_saved_state *save_state; 869 struct pci_cap_saved_state *save_state;
929 u16 *cap; 870 u16 *cap;
930 u16 flags;
931 871
932 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 872 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
933 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 873 if (!save_state)
934 if (!save_state || pos <= 0)
935 return;
936 cap = (u16 *)&save_state->cap.data[0];
937
938 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
939
940 if (pcie_cap_has_devctl(dev->pcie_type, flags))
941 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
942 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
943 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
944 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
945 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
946 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
947 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
948
949 pos = pci_pcie_cap2(dev);
950 if (!pos)
951 return; 874 return;
952 875
953 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 876 cap = (u16 *)&save_state->cap.data[0];
954 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 877 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
955 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 878 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
879 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
880 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
881 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
882 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
883 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
956} 884}
957 885
958 886
@@ -1941,6 +1869,7 @@ void pci_pm_init(struct pci_dev *dev)
1941 dev->pm_cap = pm; 1869 dev->pm_cap = pm;
1942 dev->d3_delay = PCI_PM_D3_WAIT; 1870 dev->d3_delay = PCI_PM_D3_WAIT;
1943 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; 1871 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
1872 dev->d3cold_allowed = true;
1944 1873
1945 dev->d1_support = false; 1874 dev->d1_support = false;
1946 dev->d2_support = false; 1875 dev->d2_support = false;
@@ -2066,35 +1995,24 @@ void pci_free_cap_save_buffers(struct pci_dev *dev)
2066 */ 1995 */
2067void pci_enable_ari(struct pci_dev *dev) 1996void pci_enable_ari(struct pci_dev *dev)
2068{ 1997{
2069 int pos;
2070 u32 cap; 1998 u32 cap;
2071 u16 ctrl;
2072 struct pci_dev *bridge; 1999 struct pci_dev *bridge;
2073 2000
2074 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 2001 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2075 return; 2002 return;
2076 2003
2077 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 2004 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
2078 if (!pos)
2079 return; 2005 return;
2080 2006
2081 bridge = dev->bus->self; 2007 bridge = dev->bus->self;
2082 if (!bridge) 2008 if (!bridge)
2083 return; 2009 return;
2084 2010
2085 /* ARI is a PCIe cap v2 feature */ 2011 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2086 pos = pci_pcie_cap2(bridge);
2087 if (!pos)
2088 return;
2089
2090 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2091 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 2012 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2092 return; 2013 return;
2093 2014
2094 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 2015 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
2095 ctrl |= PCI_EXP_DEVCTL2_ARI;
2096 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2097
2098 bridge->ari_enabled = 1; 2016 bridge->ari_enabled = 1;
2099} 2017}
2100 2018
@@ -2109,20 +2027,14 @@ void pci_enable_ari(struct pci_dev *dev)
2109 */ 2027 */
2110void pci_enable_ido(struct pci_dev *dev, unsigned long type) 2028void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2111{ 2029{
2112 int pos; 2030 u16 ctrl = 0;
2113 u16 ctrl;
2114 2031
2115 /* ID-based Ordering is a PCIe cap v2 feature */
2116 pos = pci_pcie_cap2(dev);
2117 if (!pos)
2118 return;
2119
2120 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2121 if (type & PCI_EXP_IDO_REQUEST) 2032 if (type & PCI_EXP_IDO_REQUEST)
2122 ctrl |= PCI_EXP_IDO_REQ_EN; 2033 ctrl |= PCI_EXP_IDO_REQ_EN;
2123 if (type & PCI_EXP_IDO_COMPLETION) 2034 if (type & PCI_EXP_IDO_COMPLETION)
2124 ctrl |= PCI_EXP_IDO_CMP_EN; 2035 ctrl |= PCI_EXP_IDO_CMP_EN;
2125 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2036 if (ctrl)
2037 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
2126} 2038}
2127EXPORT_SYMBOL(pci_enable_ido); 2039EXPORT_SYMBOL(pci_enable_ido);
2128 2040
@@ -2133,20 +2045,14 @@ EXPORT_SYMBOL(pci_enable_ido);
2133 */ 2045 */
2134void pci_disable_ido(struct pci_dev *dev, unsigned long type) 2046void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2135{ 2047{
2136 int pos; 2048 u16 ctrl = 0;
2137 u16 ctrl;
2138 2049
2139 /* ID-based Ordering is a PCIe cap v2 feature */
2140 pos = pci_pcie_cap2(dev);
2141 if (!pos)
2142 return;
2143
2144 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2145 if (type & PCI_EXP_IDO_REQUEST) 2050 if (type & PCI_EXP_IDO_REQUEST)
2146 ctrl &= ~PCI_EXP_IDO_REQ_EN; 2051 ctrl |= PCI_EXP_IDO_REQ_EN;
2147 if (type & PCI_EXP_IDO_COMPLETION) 2052 if (type & PCI_EXP_IDO_COMPLETION)
2148 ctrl &= ~PCI_EXP_IDO_CMP_EN; 2053 ctrl |= PCI_EXP_IDO_CMP_EN;
2149 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2054 if (ctrl)
2055 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
2150} 2056}
2151EXPORT_SYMBOL(pci_disable_ido); 2057EXPORT_SYMBOL(pci_disable_ido);
2152 2058
@@ -2171,17 +2077,11 @@ EXPORT_SYMBOL(pci_disable_ido);
2171 */ 2077 */
2172int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) 2078int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2173{ 2079{
2174 int pos;
2175 u32 cap; 2080 u32 cap;
2176 u16 ctrl; 2081 u16 ctrl;
2177 int ret; 2082 int ret;
2178 2083
2179 /* OBFF is a PCIe cap v2 feature */ 2084 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2180 pos = pci_pcie_cap2(dev);
2181 if (!pos)
2182 return -ENOTSUPP;
2183
2184 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2185 if (!(cap & PCI_EXP_OBFF_MASK)) 2085 if (!(cap & PCI_EXP_OBFF_MASK))
2186 return -ENOTSUPP; /* no OBFF support at all */ 2086 return -ENOTSUPP; /* no OBFF support at all */
2187 2087
@@ -2192,7 +2092,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2192 return ret; 2092 return ret;
2193 } 2093 }
2194 2094
2195 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2095 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
2196 if (cap & PCI_EXP_OBFF_WAKE) 2096 if (cap & PCI_EXP_OBFF_WAKE)
2197 ctrl |= PCI_EXP_OBFF_WAKE_EN; 2097 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2198 else { 2098 else {
@@ -2210,7 +2110,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2210 return -ENOTSUPP; 2110 return -ENOTSUPP;
2211 } 2111 }
2212 } 2112 }
2213 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2113 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl);
2214 2114
2215 return 0; 2115 return 0;
2216} 2116}
@@ -2224,17 +2124,7 @@ EXPORT_SYMBOL(pci_enable_obff);
2224 */ 2124 */
2225void pci_disable_obff(struct pci_dev *dev) 2125void pci_disable_obff(struct pci_dev *dev)
2226{ 2126{
2227 int pos; 2127 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
2228 u16 ctrl;
2229
2230 /* OBFF is a PCIe cap v2 feature */
2231 pos = pci_pcie_cap2(dev);
2232 if (!pos)
2233 return;
2234
2235 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2236 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2237 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2238} 2128}
2239EXPORT_SYMBOL(pci_disable_obff); 2129EXPORT_SYMBOL(pci_disable_obff);
2240 2130
@@ -2247,15 +2137,9 @@ EXPORT_SYMBOL(pci_disable_obff);
2247 */ 2137 */
2248static bool pci_ltr_supported(struct pci_dev *dev) 2138static bool pci_ltr_supported(struct pci_dev *dev)
2249{ 2139{
2250 int pos;
2251 u32 cap; 2140 u32 cap;
2252 2141
2253 /* LTR is a PCIe cap v2 feature */ 2142 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2254 pos = pci_pcie_cap2(dev);
2255 if (!pos)
2256 return false;
2257
2258 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2259 2143
2260 return cap & PCI_EXP_DEVCAP2_LTR; 2144 return cap & PCI_EXP_DEVCAP2_LTR;
2261} 2145}
@@ -2272,22 +2156,15 @@ static bool pci_ltr_supported(struct pci_dev *dev)
2272 */ 2156 */
2273int pci_enable_ltr(struct pci_dev *dev) 2157int pci_enable_ltr(struct pci_dev *dev)
2274{ 2158{
2275 int pos;
2276 u16 ctrl;
2277 int ret; 2159 int ret;
2278 2160
2279 if (!pci_ltr_supported(dev))
2280 return -ENOTSUPP;
2281
2282 /* LTR is a PCIe cap v2 feature */
2283 pos = pci_pcie_cap2(dev);
2284 if (!pos)
2285 return -ENOTSUPP;
2286
2287 /* Only primary function can enable/disable LTR */ 2161 /* Only primary function can enable/disable LTR */
2288 if (PCI_FUNC(dev->devfn) != 0) 2162 if (PCI_FUNC(dev->devfn) != 0)
2289 return -EINVAL; 2163 return -EINVAL;
2290 2164
2165 if (!pci_ltr_supported(dev))
2166 return -ENOTSUPP;
2167
2291 /* Enable upstream ports first */ 2168 /* Enable upstream ports first */
2292 if (dev->bus->self) { 2169 if (dev->bus->self) {
2293 ret = pci_enable_ltr(dev->bus->self); 2170 ret = pci_enable_ltr(dev->bus->self);
@@ -2295,11 +2172,7 @@ int pci_enable_ltr(struct pci_dev *dev)
2295 return ret; 2172 return ret;
2296 } 2173 }
2297 2174
2298 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2175 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2299 ctrl |= PCI_EXP_LTR_EN;
2300 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2301
2302 return 0;
2303} 2176}
2304EXPORT_SYMBOL(pci_enable_ltr); 2177EXPORT_SYMBOL(pci_enable_ltr);
2305 2178
@@ -2309,24 +2182,14 @@ EXPORT_SYMBOL(pci_enable_ltr);
2309 */ 2182 */
2310void pci_disable_ltr(struct pci_dev *dev) 2183void pci_disable_ltr(struct pci_dev *dev)
2311{ 2184{
2312 int pos;
2313 u16 ctrl;
2314
2315 if (!pci_ltr_supported(dev))
2316 return;
2317
2318 /* LTR is a PCIe cap v2 feature */
2319 pos = pci_pcie_cap2(dev);
2320 if (!pos)
2321 return;
2322
2323 /* Only primary function can enable/disable LTR */ 2185 /* Only primary function can enable/disable LTR */
2324 if (PCI_FUNC(dev->devfn) != 0) 2186 if (PCI_FUNC(dev->devfn) != 0)
2325 return; 2187 return;
2326 2188
2327 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2189 if (!pci_ltr_supported(dev))
2328 ctrl &= ~PCI_EXP_LTR_EN; 2190 return;
2329 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2191
2192 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2330} 2193}
2331EXPORT_SYMBOL(pci_disable_ltr); 2194EXPORT_SYMBOL(pci_disable_ltr);
2332 2195
@@ -2409,9 +2272,6 @@ void pci_enable_acs(struct pci_dev *dev)
2409 if (!pci_acs_enable) 2272 if (!pci_acs_enable)
2410 return; 2273 return;
2411 2274
2412 if (!pci_is_pcie(dev))
2413 return;
2414
2415 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 2275 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2416 if (!pos) 2276 if (!pos)
2417 return; 2277 return;
@@ -2459,8 +2319,8 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2459 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | 2319 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2460 PCI_ACS_EC | PCI_ACS_DT); 2320 PCI_ACS_EC | PCI_ACS_DT);
2461 2321
2462 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM || 2322 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2463 pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || 2323 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
2464 pdev->multifunction) { 2324 pdev->multifunction) {
2465 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 2325 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2466 if (!pos) 2326 if (!pos)
@@ -3176,15 +3036,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3176static int pcie_flr(struct pci_dev *dev, int probe) 3036static int pcie_flr(struct pci_dev *dev, int probe)
3177{ 3037{
3178 int i; 3038 int i;
3179 int pos;
3180 u32 cap; 3039 u32 cap;
3181 u16 status, control; 3040 u16 status;
3182
3183 pos = pci_pcie_cap(dev);
3184 if (!pos)
3185 return -ENOTTY;
3186 3041
3187 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 3042 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3188 if (!(cap & PCI_EXP_DEVCAP_FLR)) 3043 if (!(cap & PCI_EXP_DEVCAP_FLR))
3189 return -ENOTTY; 3044 return -ENOTTY;
3190 3045
@@ -3196,7 +3051,7 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3196 if (i) 3051 if (i)
3197 msleep((1 << (i - 1)) * 100); 3052 msleep((1 << (i - 1)) * 100);
3198 3053
3199 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 3054 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3200 if (!(status & PCI_EXP_DEVSTA_TRPND)) 3055 if (!(status & PCI_EXP_DEVSTA_TRPND))
3201 goto clear; 3056 goto clear;
3202 } 3057 }
@@ -3205,9 +3060,7 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3205 "proceeding with reset anyway\n"); 3060 "proceeding with reset anyway\n");
3206 3061
3207clear: 3062clear:
3208 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); 3063 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3209 control |= PCI_EXP_DEVCTL_BCR_FLR;
3210 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3211 3064
3212 msleep(100); 3065 msleep(100);
3213 3066
@@ -3575,18 +3428,11 @@ EXPORT_SYMBOL(pcix_set_mmrbc);
3575 */ 3428 */
3576int pcie_get_readrq(struct pci_dev *dev) 3429int pcie_get_readrq(struct pci_dev *dev)
3577{ 3430{
3578 int ret, cap;
3579 u16 ctl; 3431 u16 ctl;
3580 3432
3581 cap = pci_pcie_cap(dev); 3433 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3582 if (!cap)
3583 return -EINVAL;
3584
3585 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3586 if (!ret)
3587 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3588 3434
3589 return ret; 3435 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3590} 3436}
3591EXPORT_SYMBOL(pcie_get_readrq); 3437EXPORT_SYMBOL(pcie_get_readrq);
3592 3438
@@ -3600,19 +3446,11 @@ EXPORT_SYMBOL(pcie_get_readrq);
3600 */ 3446 */
3601int pcie_set_readrq(struct pci_dev *dev, int rq) 3447int pcie_set_readrq(struct pci_dev *dev, int rq)
3602{ 3448{
3603 int cap, err = -EINVAL; 3449 u16 v;
3604 u16 ctl, v;
3605 3450
3606 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 3451 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3607 goto out; 3452 return -EINVAL;
3608
3609 cap = pci_pcie_cap(dev);
3610 if (!cap)
3611 goto out;
3612 3453
3613 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3614 if (err)
3615 goto out;
3616 /* 3454 /*
3617 * If using the "performance" PCIe config, we clamp the 3455 * If using the "performance" PCIe config, we clamp the
3618 * read rq size to the max packet size to prevent the 3456 * read rq size to the max packet size to prevent the
@@ -3630,14 +3468,8 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
3630 3468
3631 v = (ffs(rq) - 8) << 12; 3469 v = (ffs(rq) - 8) << 12;
3632 3470
3633 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 3471 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3634 ctl &= ~PCI_EXP_DEVCTL_READRQ; 3472 PCI_EXP_DEVCTL_READRQ, v);
3635 ctl |= v;
3636 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3637 }
3638
3639out:
3640 return err;
3641} 3473}
3642EXPORT_SYMBOL(pcie_set_readrq); 3474EXPORT_SYMBOL(pcie_set_readrq);
3643 3475
@@ -3650,18 +3482,11 @@ EXPORT_SYMBOL(pcie_set_readrq);
3650 */ 3482 */
3651int pcie_get_mps(struct pci_dev *dev) 3483int pcie_get_mps(struct pci_dev *dev)
3652{ 3484{
3653 int ret, cap;
3654 u16 ctl; 3485 u16 ctl;
3655 3486
3656 cap = pci_pcie_cap(dev); 3487 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3657 if (!cap)
3658 return -EINVAL;
3659
3660 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3661 if (!ret)
3662 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3663 3488
3664 return ret; 3489 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3665} 3490}
3666 3491
3667/** 3492/**
@@ -3674,32 +3499,18 @@ int pcie_get_mps(struct pci_dev *dev)
3674 */ 3499 */
3675int pcie_set_mps(struct pci_dev *dev, int mps) 3500int pcie_set_mps(struct pci_dev *dev, int mps)
3676{ 3501{
3677 int cap, err = -EINVAL; 3502 u16 v;
3678 u16 ctl, v;
3679 3503
3680 if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) 3504 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3681 goto out; 3505 return -EINVAL;
3682 3506
3683 v = ffs(mps) - 8; 3507 v = ffs(mps) - 8;
3684 if (v > dev->pcie_mpss) 3508 if (v > dev->pcie_mpss)
3685 goto out; 3509 return -EINVAL;
3686 v <<= 5; 3510 v <<= 5;
3687 3511
3688 cap = pci_pcie_cap(dev); 3512 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3689 if (!cap) 3513 PCI_EXP_DEVCTL_PAYLOAD, v);
3690 goto out;
3691
3692 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3693 if (err)
3694 goto out;
3695
3696 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3697 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3698 ctl |= v;
3699 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3700 }
3701out:
3702 return err;
3703} 3514}
3704 3515
3705/** 3516/**
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 52229863e9fe..4e24cb8a94ae 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -288,7 +288,7 @@ static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
288 while (1) { 288 while (1) {
289 if (!pci_is_pcie(dev)) 289 if (!pci_is_pcie(dev))
290 break; 290 break;
291 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 291 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
292 return dev; 292 return dev;
293 if (!dev->bus->self) 293 if (!dev->bus->self)
294 break; 294 break;
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 58ad7917553c..c78778fc0cba 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -81,10 +81,11 @@ bool pci_aer_available(void)
81static int set_device_error_reporting(struct pci_dev *dev, void *data) 81static int set_device_error_reporting(struct pci_dev *dev, void *data)
82{ 82{
83 bool enable = *((bool *)data); 83 bool enable = *((bool *)data);
84 int type = pci_pcie_type(dev);
84 85
85 if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || 86 if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
86 (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || 87 (type == PCI_EXP_TYPE_UPSTREAM) ||
87 (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { 88 (type == PCI_EXP_TYPE_DOWNSTREAM)) {
88 if (enable) 89 if (enable)
89 pci_enable_pcie_error_reporting(dev); 90 pci_enable_pcie_error_reporting(dev);
90 else 91 else
@@ -121,19 +122,17 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
121static void aer_enable_rootport(struct aer_rpc *rpc) 122static void aer_enable_rootport(struct aer_rpc *rpc)
122{ 123{
123 struct pci_dev *pdev = rpc->rpd->port; 124 struct pci_dev *pdev = rpc->rpd->port;
124 int pos, aer_pos; 125 int aer_pos;
125 u16 reg16; 126 u16 reg16;
126 u32 reg32; 127 u32 reg32;
127 128
128 pos = pci_pcie_cap(pdev);
129 /* Clear PCIe Capability's Device Status */ 129 /* Clear PCIe Capability's Device Status */
130 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16); 130 pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16);
131 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); 131 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
132 132
133 /* Disable system error generation in response to error messages */ 133 /* Disable system error generation in response to error messages */
134 pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16); 134 pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
135 reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); 135 SYSTEM_ERROR_INTR_ON_MESG_MASK);
136 pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
137 136
138 aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 137 aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
139 /* Clear error status */ 138 /* Clear error status */
@@ -395,9 +394,8 @@ static void aer_error_resume(struct pci_dev *dev)
395 u16 reg16; 394 u16 reg16;
396 395
397 /* Clean up Root device status */ 396 /* Clean up Root device status */
398 pos = pci_pcie_cap(dev); 397 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
399 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16); 398 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
400 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
401 399
402 /* Clean AER Root Error Status */ 400 /* Clean AER Root Error Status */
403 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 401 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 124f20ff11b2..5194a7d41730 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -60,7 +60,7 @@ static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
60 p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 60 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
61 if (p->flags & ACPI_HEST_GLOBAL) { 61 if (p->flags & ACPI_HEST_GLOBAL) {
62 if ((pci_is_pcie(info->pci_dev) && 62 if ((pci_is_pcie(info->pci_dev) &&
63 info->pci_dev->pcie_type == pcie_type) || bridge) 63 pci_pcie_type(info->pci_dev) == pcie_type) || bridge)
64 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 64 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
65 } else 65 } else
66 if (hest_match_pci(p, info->pci_dev)) 66 if (hest_match_pci(p, info->pci_dev))
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 0ca053538146..cefc0ddcacf6 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -32,53 +32,28 @@ static bool nosourceid;
32module_param(forceload, bool, 0); 32module_param(forceload, bool, 0);
33module_param(nosourceid, bool, 0); 33module_param(nosourceid, bool, 0);
34 34
35#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
36 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
37
35int pci_enable_pcie_error_reporting(struct pci_dev *dev) 38int pci_enable_pcie_error_reporting(struct pci_dev *dev)
36{ 39{
37 u16 reg16 = 0;
38 int pos;
39
40 if (pcie_aer_get_firmware_first(dev)) 40 if (pcie_aer_get_firmware_first(dev))
41 return -EIO; 41 return -EIO;
42 42
43 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 43 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
44 if (!pos)
45 return -EIO;
46
47 pos = pci_pcie_cap(dev);
48 if (!pos)
49 return -EIO; 44 return -EIO;
50 45
51 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 46 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
52 reg16 |= (PCI_EXP_DEVCTL_CERE |
53 PCI_EXP_DEVCTL_NFERE |
54 PCI_EXP_DEVCTL_FERE |
55 PCI_EXP_DEVCTL_URRE);
56 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
57
58 return 0;
59} 47}
60EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); 48EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
61 49
62int pci_disable_pcie_error_reporting(struct pci_dev *dev) 50int pci_disable_pcie_error_reporting(struct pci_dev *dev)
63{ 51{
64 u16 reg16 = 0;
65 int pos;
66
67 if (pcie_aer_get_firmware_first(dev)) 52 if (pcie_aer_get_firmware_first(dev))
68 return -EIO; 53 return -EIO;
69 54
70 pos = pci_pcie_cap(dev); 55 return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
71 if (!pos) 56 PCI_EXP_AER_FLAGS);
72 return -EIO;
73
74 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
75 reg16 &= ~(PCI_EXP_DEVCTL_CERE |
76 PCI_EXP_DEVCTL_NFERE |
77 PCI_EXP_DEVCTL_FERE |
78 PCI_EXP_DEVCTL_URRE);
79 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
80
81 return 0;
82} 57}
83EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); 58EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
84 59
@@ -151,18 +126,12 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
151 */ 126 */
152 if (atomic_read(&dev->enable_cnt) == 0) 127 if (atomic_read(&dev->enable_cnt) == 0)
153 return false; 128 return false;
154 pos = pci_pcie_cap(dev);
155 if (!pos)
156 return false;
157 129
158 /* Check if AER is enabled */ 130 /* Check if AER is enabled */
159 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 131 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
160 if (!(reg16 & ( 132 if (!(reg16 & PCI_EXP_AER_FLAGS))
161 PCI_EXP_DEVCTL_CERE |
162 PCI_EXP_DEVCTL_NFERE |
163 PCI_EXP_DEVCTL_FERE |
164 PCI_EXP_DEVCTL_URRE)))
165 return false; 133 return false;
134
166 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 135 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
167 if (!pos) 136 if (!pos)
168 return false; 137 return false;
@@ -465,7 +434,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev)
465 434
466 if (driver && driver->reset_link) { 435 if (driver && driver->reset_link) {
467 status = driver->reset_link(udev); 436 status = driver->reset_link(udev);
468 } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { 437 } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM) {
469 status = default_downstream_reset_link(udev); 438 status = default_downstream_reset_link(udev);
470 } else { 439 } else {
471 dev_printk(KERN_DEBUG, &dev->dev, 440 dev_printk(KERN_DEBUG, &dev->dev,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b500840a143b..213753b283a6 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -125,21 +125,16 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
125 125
126static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) 126static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
127{ 127{
128 int pos;
129 u16 reg16;
130 struct pci_dev *child; 128 struct pci_dev *child;
131 struct pci_bus *linkbus = link->pdev->subordinate; 129 struct pci_bus *linkbus = link->pdev->subordinate;
132 130
133 list_for_each_entry(child, &linkbus->devices, bus_list) { 131 list_for_each_entry(child, &linkbus->devices, bus_list) {
134 pos = pci_pcie_cap(child);
135 if (!pos)
136 return;
137 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
138 if (enable) 132 if (enable)
139 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; 133 pcie_capability_set_word(child, PCI_EXP_LNKCTL,
134 PCI_EXP_LNKCTL_CLKREQ_EN);
140 else 135 else
141 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 136 pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
142 pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16); 137 PCI_EXP_LNKCTL_CLKREQ_EN);
143 } 138 }
144 link->clkpm_enabled = !!enable; 139 link->clkpm_enabled = !!enable;
145} 140}
@@ -157,7 +152,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
157 152
158static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) 153static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
159{ 154{
160 int pos, capable = 1, enabled = 1; 155 int capable = 1, enabled = 1;
161 u32 reg32; 156 u32 reg32;
162 u16 reg16; 157 u16 reg16;
163 struct pci_dev *child; 158 struct pci_dev *child;
@@ -165,16 +160,13 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
165 160
166 /* All functions should have the same cap and state, take the worst */ 161 /* All functions should have the same cap and state, take the worst */
167 list_for_each_entry(child, &linkbus->devices, bus_list) { 162 list_for_each_entry(child, &linkbus->devices, bus_list) {
168 pos = pci_pcie_cap(child); 163 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
169 if (!pos)
170 return;
171 pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
172 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 164 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
173 capable = 0; 165 capable = 0;
174 enabled = 0; 166 enabled = 0;
175 break; 167 break;
176 } 168 }
177 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16); 169 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
178 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 170 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
179 enabled = 0; 171 enabled = 0;
180 } 172 }
@@ -190,7 +182,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
190 */ 182 */
191static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) 183static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
192{ 184{
193 int ppos, cpos, same_clock = 1; 185 int same_clock = 1;
194 u16 reg16, parent_reg, child_reg[8]; 186 u16 reg16, parent_reg, child_reg[8];
195 unsigned long start_jiffies; 187 unsigned long start_jiffies;
196 struct pci_dev *child, *parent = link->pdev; 188 struct pci_dev *child, *parent = link->pdev;
@@ -203,46 +195,43 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
203 BUG_ON(!pci_is_pcie(child)); 195 BUG_ON(!pci_is_pcie(child));
204 196
205 /* Check downstream component if bit Slot Clock Configuration is 1 */ 197 /* Check downstream component if bit Slot Clock Configuration is 1 */
206 cpos = pci_pcie_cap(child); 198 pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
207 pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
208 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 199 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
209 same_clock = 0; 200 same_clock = 0;
210 201
211 /* Check upstream component if bit Slot Clock Configuration is 1 */ 202 /* Check upstream component if bit Slot Clock Configuration is 1 */
212 ppos = pci_pcie_cap(parent); 203 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
213 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
214 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 204 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
215 same_clock = 0; 205 same_clock = 0;
216 206
217 /* Configure downstream component, all functions */ 207 /* Configure downstream component, all functions */
218 list_for_each_entry(child, &linkbus->devices, bus_list) { 208 list_for_each_entry(child, &linkbus->devices, bus_list) {
219 cpos = pci_pcie_cap(child); 209 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
220 pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
221 child_reg[PCI_FUNC(child->devfn)] = reg16; 210 child_reg[PCI_FUNC(child->devfn)] = reg16;
222 if (same_clock) 211 if (same_clock)
223 reg16 |= PCI_EXP_LNKCTL_CCC; 212 reg16 |= PCI_EXP_LNKCTL_CCC;
224 else 213 else
225 reg16 &= ~PCI_EXP_LNKCTL_CCC; 214 reg16 &= ~PCI_EXP_LNKCTL_CCC;
226 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16); 215 pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
227 } 216 }
228 217
229 /* Configure upstream component */ 218 /* Configure upstream component */
230 pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16); 219 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
231 parent_reg = reg16; 220 parent_reg = reg16;
232 if (same_clock) 221 if (same_clock)
233 reg16 |= PCI_EXP_LNKCTL_CCC; 222 reg16 |= PCI_EXP_LNKCTL_CCC;
234 else 223 else
235 reg16 &= ~PCI_EXP_LNKCTL_CCC; 224 reg16 &= ~PCI_EXP_LNKCTL_CCC;
236 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); 225 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
237 226
238 /* Retrain link */ 227 /* Retrain link */
239 reg16 |= PCI_EXP_LNKCTL_RL; 228 reg16 |= PCI_EXP_LNKCTL_RL;
240 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); 229 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
241 230
242 /* Wait for link training end. Break out after waiting for timeout */ 231 /* Wait for link training end. Break out after waiting for timeout */
243 start_jiffies = jiffies; 232 start_jiffies = jiffies;
244 for (;;) { 233 for (;;) {
245 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16); 234 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
246 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 235 if (!(reg16 & PCI_EXP_LNKSTA_LT))
247 break; 236 break;
248 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) 237 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
@@ -255,12 +244,10 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
255 /* Training failed. Restore common clock configurations */ 244 /* Training failed. Restore common clock configurations */
256 dev_printk(KERN_ERR, &parent->dev, 245 dev_printk(KERN_ERR, &parent->dev,
257 "ASPM: Could not configure common clock\n"); 246 "ASPM: Could not configure common clock\n");
258 list_for_each_entry(child, &linkbus->devices, bus_list) { 247 list_for_each_entry(child, &linkbus->devices, bus_list)
259 cpos = pci_pcie_cap(child); 248 pcie_capability_write_word(child, PCI_EXP_LNKCTL,
260 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, 249 child_reg[PCI_FUNC(child->devfn)]);
261 child_reg[PCI_FUNC(child->devfn)]); 250 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
262 }
263 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
264} 251}
265 252
266/* Convert L0s latency encoding to ns */ 253/* Convert L0s latency encoding to ns */
@@ -305,16 +292,14 @@ struct aspm_register_info {
305static void pcie_get_aspm_reg(struct pci_dev *pdev, 292static void pcie_get_aspm_reg(struct pci_dev *pdev,
306 struct aspm_register_info *info) 293 struct aspm_register_info *info)
307{ 294{
308 int pos;
309 u16 reg16; 295 u16 reg16;
310 u32 reg32; 296 u32 reg32;
311 297
312 pos = pci_pcie_cap(pdev); 298 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
313 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
314 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 299 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
315 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 300 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
316 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 301 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
317 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 302 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
318 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; 303 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
319} 304}
320 305
@@ -412,7 +397,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
412 * do ASPM for now. 397 * do ASPM for now.
413 */ 398 */
414 list_for_each_entry(child, &linkbus->devices, bus_list) { 399 list_for_each_entry(child, &linkbus->devices, bus_list) {
415 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 400 if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
416 link->aspm_disable = ASPM_STATE_ALL; 401 link->aspm_disable = ASPM_STATE_ALL;
417 break; 402 break;
418 } 403 }
@@ -420,17 +405,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
420 405
421 /* Get and check endpoint acceptable latencies */ 406 /* Get and check endpoint acceptable latencies */
422 list_for_each_entry(child, &linkbus->devices, bus_list) { 407 list_for_each_entry(child, &linkbus->devices, bus_list) {
423 int pos;
424 u32 reg32, encoding; 408 u32 reg32, encoding;
425 struct aspm_latency *acceptable = 409 struct aspm_latency *acceptable =
426 &link->acceptable[PCI_FUNC(child->devfn)]; 410 &link->acceptable[PCI_FUNC(child->devfn)];
427 411
428 if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT && 412 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
429 child->pcie_type != PCI_EXP_TYPE_LEG_END) 413 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
430 continue; 414 continue;
431 415
432 pos = pci_pcie_cap(child); 416 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
433 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
434 /* Calculate endpoint L0s acceptable latency */ 417 /* Calculate endpoint L0s acceptable latency */
435 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 418 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
436 acceptable->l0s = calc_l0s_acceptable(encoding); 419 acceptable->l0s = calc_l0s_acceptable(encoding);
@@ -444,13 +427,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
444 427
445static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 428static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
446{ 429{
447 u16 reg16; 430 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 0x3, val);
448 int pos = pci_pcie_cap(pdev);
449
450 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
451 reg16 &= ~0x3;
452 reg16 |= val;
453 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
454} 431}
455 432
456static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) 433static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
@@ -505,7 +482,6 @@ static void free_link_state(struct pcie_link_state *link)
505static int pcie_aspm_sanity_check(struct pci_dev *pdev) 482static int pcie_aspm_sanity_check(struct pci_dev *pdev)
506{ 483{
507 struct pci_dev *child; 484 struct pci_dev *child;
508 int pos;
509 u32 reg32; 485 u32 reg32;
510 486
511 /* 487 /*
@@ -513,8 +489,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
513 * very strange. Disable ASPM for the whole slot 489 * very strange. Disable ASPM for the whole slot
514 */ 490 */
515 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 491 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
516 pos = pci_pcie_cap(child); 492 if (!pci_is_pcie(child))
517 if (!pos)
518 return -EINVAL; 493 return -EINVAL;
519 494
520 /* 495 /*
@@ -530,7 +505,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
530 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 505 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
531 * RBER bit to determine if a function is 1.1 version device 506 * RBER bit to determine if a function is 1.1 version device
532 */ 507 */
533 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); 508 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
534 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 509 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
535 dev_printk(KERN_INFO, &child->dev, "disabling ASPM" 510 dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
536 " on pre-1.1 PCIe device. You can enable it" 511 " on pre-1.1 PCIe device. You can enable it"
@@ -552,7 +527,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
552 INIT_LIST_HEAD(&link->children); 527 INIT_LIST_HEAD(&link->children);
553 INIT_LIST_HEAD(&link->link); 528 INIT_LIST_HEAD(&link->link);
554 link->pdev = pdev; 529 link->pdev = pdev;
555 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { 530 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) {
556 struct pcie_link_state *parent; 531 struct pcie_link_state *parent;
557 parent = pdev->bus->parent->self->link_state; 532 parent = pdev->bus->parent->self->link_state;
558 if (!parent) { 533 if (!parent) {
@@ -585,12 +560,12 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
585 560
586 if (!pci_is_pcie(pdev) || pdev->link_state) 561 if (!pci_is_pcie(pdev) || pdev->link_state)
587 return; 562 return;
588 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 563 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
589 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 564 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
590 return; 565 return;
591 566
592 /* VIA has a strange chipset, root port is under a bridge */ 567 /* VIA has a strange chipset, root port is under a bridge */
593 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && 568 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
594 pdev->bus->self) 569 pdev->bus->self)
595 return; 570 return;
596 571
@@ -647,8 +622,8 @@ static void pcie_update_aspm_capable(struct pcie_link_state *root)
647 if (link->root != root) 622 if (link->root != root)
648 continue; 623 continue;
649 list_for_each_entry(child, &linkbus->devices, bus_list) { 624 list_for_each_entry(child, &linkbus->devices, bus_list) {
650 if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) && 625 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
651 (child->pcie_type != PCI_EXP_TYPE_LEG_END)) 626 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
652 continue; 627 continue;
653 pcie_aspm_check_latency(child); 628 pcie_aspm_check_latency(child);
654 } 629 }
@@ -663,8 +638,8 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
663 638
664 if (!pci_is_pcie(pdev) || !parent || !parent->link_state) 639 if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
665 return; 640 return;
666 if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 641 if ((pci_pcie_type(parent) != PCI_EXP_TYPE_ROOT_PORT) &&
667 (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 642 (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM))
668 return; 643 return;
669 644
670 down_read(&pci_bus_sem); 645 down_read(&pci_bus_sem);
@@ -704,8 +679,8 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
704 679
705 if (aspm_disabled || !pci_is_pcie(pdev) || !link) 680 if (aspm_disabled || !pci_is_pcie(pdev) || !link)
706 return; 681 return;
707 if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 682 if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
708 (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 683 (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
709 return; 684 return;
710 /* 685 /*
711 * Devices changed PM state, we should recheck if latency 686 * Devices changed PM state, we should recheck if latency
@@ -729,8 +704,8 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
729 if (aspm_policy != POLICY_POWERSAVE) 704 if (aspm_policy != POLICY_POWERSAVE)
730 return; 705 return;
731 706
732 if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 707 if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
733 (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 708 (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
734 return; 709 return;
735 710
736 down_read(&pci_bus_sem); 711 down_read(&pci_bus_sem);
@@ -757,8 +732,8 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
757 if (!pci_is_pcie(pdev)) 732 if (!pci_is_pcie(pdev))
758 return; 733 return;
759 734
760 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || 735 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
761 pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) 736 pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)
762 parent = pdev; 737 parent = pdev;
763 if (!parent || !parent->link_state) 738 if (!parent || !parent->link_state)
764 return; 739 return;
@@ -933,8 +908,8 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
933 struct pcie_link_state *link_state = pdev->link_state; 908 struct pcie_link_state *link_state = pdev->link_state;
934 909
935 if (!pci_is_pcie(pdev) || 910 if (!pci_is_pcie(pdev) ||
936 (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 911 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
937 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 912 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
938 return; 913 return;
939 914
940 if (link_state->aspm_support) 915 if (link_state->aspm_support)
@@ -950,8 +925,8 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
950 struct pcie_link_state *link_state = pdev->link_state; 925 struct pcie_link_state *link_state = pdev->link_state;
951 926
952 if (!pci_is_pcie(pdev) || 927 if (!pci_is_pcie(pdev) ||
953 (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 928 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
954 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 929 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
955 return; 930 return;
956 931
957 if (link_state->aspm_support) 932 if (link_state->aspm_support)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 001f1b78f39c..9ca0dc9ffd84 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -57,17 +57,12 @@ struct pcie_pme_service_data {
57 */ 57 */
58void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) 58void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
59{ 59{
60 int rtctl_pos;
61 u16 rtctl;
62
63 rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL;
64
65 pci_read_config_word(dev, rtctl_pos, &rtctl);
66 if (enable) 60 if (enable)
67 rtctl |= PCI_EXP_RTCTL_PMEIE; 61 pcie_capability_set_word(dev, PCI_EXP_RTCTL,
62 PCI_EXP_RTCTL_PMEIE);
68 else 63 else
69 rtctl &= ~PCI_EXP_RTCTL_PMEIE; 64 pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
70 pci_write_config_word(dev, rtctl_pos, rtctl); 65 PCI_EXP_RTCTL_PMEIE);
71} 66}
72 67
73/** 68/**
@@ -120,7 +115,7 @@ static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
120 if (!dev) 115 if (!dev)
121 return false; 116 return false;
122 117
123 if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 118 if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
124 down_read(&pci_bus_sem); 119 down_read(&pci_bus_sem);
125 if (pcie_pme_walk_bus(bus)) 120 if (pcie_pme_walk_bus(bus))
126 found = true; 121 found = true;
@@ -226,18 +221,15 @@ static void pcie_pme_work_fn(struct work_struct *work)
226 struct pcie_pme_service_data *data = 221 struct pcie_pme_service_data *data =
227 container_of(work, struct pcie_pme_service_data, work); 222 container_of(work, struct pcie_pme_service_data, work);
228 struct pci_dev *port = data->srv->port; 223 struct pci_dev *port = data->srv->port;
229 int rtsta_pos;
230 u32 rtsta; 224 u32 rtsta;
231 225
232 rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
233
234 spin_lock_irq(&data->lock); 226 spin_lock_irq(&data->lock);
235 227
236 for (;;) { 228 for (;;) {
237 if (data->noirq) 229 if (data->noirq)
238 break; 230 break;
239 231
240 pci_read_config_dword(port, rtsta_pos, &rtsta); 232 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
241 if (rtsta & PCI_EXP_RTSTA_PME) { 233 if (rtsta & PCI_EXP_RTSTA_PME) {
242 /* 234 /*
243 * Clear PME status of the port. If there are other 235 * Clear PME status of the port. If there are other
@@ -276,17 +268,14 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
276{ 268{
277 struct pci_dev *port; 269 struct pci_dev *port;
278 struct pcie_pme_service_data *data; 270 struct pcie_pme_service_data *data;
279 int rtsta_pos;
280 u32 rtsta; 271 u32 rtsta;
281 unsigned long flags; 272 unsigned long flags;
282 273
283 port = ((struct pcie_device *)context)->port; 274 port = ((struct pcie_device *)context)->port;
284 data = get_service_data((struct pcie_device *)context); 275 data = get_service_data((struct pcie_device *)context);
285 276
286 rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
287
288 spin_lock_irqsave(&data->lock, flags); 277 spin_lock_irqsave(&data->lock, flags);
289 pci_read_config_dword(port, rtsta_pos, &rtsta); 278 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
290 279
291 if (!(rtsta & PCI_EXP_RTSTA_PME)) { 280 if (!(rtsta & PCI_EXP_RTSTA_PME)) {
292 spin_unlock_irqrestore(&data->lock, flags); 281 spin_unlock_irqrestore(&data->lock, flags);
@@ -335,13 +324,13 @@ static void pcie_pme_mark_devices(struct pci_dev *port)
335 struct pci_dev *dev; 324 struct pci_dev *dev;
336 325
337 /* Check if this is a root port event collector. */ 326 /* Check if this is a root port event collector. */
338 if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) 327 if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
339 return; 328 return;
340 329
341 down_read(&pci_bus_sem); 330 down_read(&pci_bus_sem);
342 list_for_each_entry(dev, &bus->devices, bus_list) 331 list_for_each_entry(dev, &bus->devices, bus_list)
343 if (pci_is_pcie(dev) 332 if (pci_is_pcie(dev)
344 && dev->pcie_type == PCI_EXP_TYPE_RC_END) 333 && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
345 pcie_pme_set_native(dev, NULL); 334 pcie_pme_set_native(dev, NULL);
346 up_read(&pci_bus_sem); 335 up_read(&pci_bus_sem);
347 } 336 }
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 18bf90f748f6..67be55a7f260 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -38,7 +38,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
38 return 0; 38 return 0;
39 39
40 if ((driver->port_type != PCIE_ANY_PORT) && 40 if ((driver->port_type != PCIE_ANY_PORT) &&
41 (driver->port_type != pciedev->port->pcie_type)) 41 (driver->port_type != pci_pcie_type(pciedev->port)))
42 return 0; 42 return 0;
43 43
44 return 1; 44 return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 75915b30ad19..aede99171e90 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -246,8 +246,7 @@ static void cleanup_service_irqs(struct pci_dev *dev)
246 */ 246 */
247static int get_port_device_capability(struct pci_dev *dev) 247static int get_port_device_capability(struct pci_dev *dev)
248{ 248{
249 int services = 0, pos; 249 int services = 0;
250 u16 reg16;
251 u32 reg32; 250 u32 reg32;
252 int cap_mask = 0; 251 int cap_mask = 0;
253 int err; 252 int err;
@@ -265,11 +264,9 @@ static int get_port_device_capability(struct pci_dev *dev)
265 return 0; 264 return 0;
266 } 265 }
267 266
268 pos = pci_pcie_cap(dev);
269 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
270 /* Hot-Plug Capable */ 267 /* Hot-Plug Capable */
271 if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { 268 if (cap_mask & PCIE_PORT_SERVICE_HP) {
272 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32); 269 pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
273 if (reg32 & PCI_EXP_SLTCAP_HPC) { 270 if (reg32 & PCI_EXP_SLTCAP_HPC) {
274 services |= PCIE_PORT_SERVICE_HP; 271 services |= PCIE_PORT_SERVICE_HP;
275 /* 272 /*
@@ -277,10 +274,8 @@ static int get_port_device_capability(struct pci_dev *dev)
277 * enabled by the BIOS and the hot-plug service driver 274 * enabled by the BIOS and the hot-plug service driver
278 * is not loaded. 275 * is not loaded.
279 */ 276 */
280 pos += PCI_EXP_SLTCTL; 277 pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
281 pci_read_config_word(dev, pos, &reg16); 278 PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
282 reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
283 pci_write_config_word(dev, pos, reg16);
284 } 279 }
285 } 280 }
286 /* AER capable */ 281 /* AER capable */
@@ -298,7 +293,7 @@ static int get_port_device_capability(struct pci_dev *dev)
298 services |= PCIE_PORT_SERVICE_VC; 293 services |= PCIE_PORT_SERVICE_VC;
299 /* Root ports are capable of generating PME too */ 294 /* Root ports are capable of generating PME too */
300 if ((cap_mask & PCIE_PORT_SERVICE_PME) 295 if ((cap_mask & PCIE_PORT_SERVICE_PME)
301 && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { 296 && pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
302 services |= PCIE_PORT_SERVICE_PME; 297 services |= PCIE_PORT_SERVICE_PME;
303 /* 298 /*
304 * Disable PME interrupt on this port in case it's been enabled 299 * Disable PME interrupt on this port in case it's been enabled
@@ -336,7 +331,7 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
336 device->release = release_pcie_device; /* callback to free pcie dev */ 331 device->release = release_pcie_device; /* callback to free pcie dev */
337 dev_set_name(device, "%s:pcie%02x", 332 dev_set_name(device, "%s:pcie%02x",
338 pci_name(pdev), 333 pci_name(pdev),
339 get_descriptor_id(pdev->pcie_type, service)); 334 get_descriptor_id(pci_pcie_type(pdev), service));
340 device->parent = &pdev->dev; 335 device->parent = &pdev->dev;
341 device_enable_async_suspend(device); 336 device_enable_async_suspend(device);
342 337
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3a7eefcb270a..b0340bc3aae4 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -64,14 +64,7 @@ __setup("pcie_ports=", pcie_port_setup);
64 */ 64 */
65void pcie_clear_root_pme_status(struct pci_dev *dev) 65void pcie_clear_root_pme_status(struct pci_dev *dev)
66{ 66{
67 int rtsta_pos; 67 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
68 u32 rtsta;
69
70 rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
71
72 pci_read_config_dword(dev, rtsta_pos, &rtsta);
73 rtsta |= PCI_EXP_RTSTA_PME;
74 pci_write_config_dword(dev, rtsta_pos, rtsta);
75} 68}
76 69
77static int pcie_portdrv_restore_config(struct pci_dev *dev) 70static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -95,7 +88,7 @@ static int pcie_port_resume_noirq(struct device *dev)
95 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those 88 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
96 * bits now just in case (shouldn't hurt). 89 * bits now just in case (shouldn't hurt).
97 */ 90 */
98 if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 91 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
99 pcie_clear_root_pme_status(pdev); 92 pcie_clear_root_pme_status(pdev);
100 return 0; 93 return 0;
101} 94}
@@ -140,9 +133,17 @@ static int pcie_port_runtime_resume(struct device *dev)
140{ 133{
141 return 0; 134 return 0;
142} 135}
136
137static int pcie_port_runtime_idle(struct device *dev)
138{
139 /* Delay for a short while to prevent too frequent suspend/resume */
140 pm_schedule_suspend(dev, 10);
141 return -EBUSY;
142}
143#else 143#else
144#define pcie_port_runtime_suspend NULL 144#define pcie_port_runtime_suspend NULL
145#define pcie_port_runtime_resume NULL 145#define pcie_port_runtime_resume NULL
146#define pcie_port_runtime_idle NULL
146#endif 147#endif
147 148
148static const struct dev_pm_ops pcie_portdrv_pm_ops = { 149static const struct dev_pm_ops pcie_portdrv_pm_ops = {
@@ -155,6 +156,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
155 .resume_noirq = pcie_port_resume_noirq, 156 .resume_noirq = pcie_port_resume_noirq,
156 .runtime_suspend = pcie_port_runtime_suspend, 157 .runtime_suspend = pcie_port_runtime_suspend,
157 .runtime_resume = pcie_port_runtime_resume, 158 .runtime_resume = pcie_port_runtime_resume,
159 .runtime_idle = pcie_port_runtime_idle,
158}; 160};
159 161
160#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) 162#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -186,9 +188,9 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
186 int status; 188 int status;
187 189
188 if (!pci_is_pcie(dev) || 190 if (!pci_is_pcie(dev) ||
189 ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 191 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
190 (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) && 192 (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
191 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) 193 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
192 return -ENODEV; 194 return -ENODEV;
193 195
194 if (!dev->irq && dev->pin) { 196 if (!dev->irq && dev->pin) {
@@ -200,6 +202,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
200 return status; 202 return status;
201 203
202 pci_save_state(dev); 204 pci_save_state(dev);
205 /*
206 * D3cold may not work properly on some PCIe port, so disable
207 * it by default.
208 */
209 dev->d3cold_allowed = false;
203 if (!pci_match_id(port_runtime_pm_black_list, dev)) 210 if (!pci_match_id(port_runtime_pm_black_list, dev))
204 pm_runtime_put_noidle(&dev->dev); 211 pm_runtime_put_noidle(&dev->dev);
205 212
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6c143b4497ca..3cdba8b3f816 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
144 case PCI_BASE_ADDRESS_MEM_TYPE_32: 144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break; 145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); 147 /* 1M mem BAR treated as 32-bit BAR */
148 break; 148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64: 149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64; 150 flags |= IORESOURCE_MEM_64;
151 break; 151 break;
152 default: 152 default:
153 dev_warn(&dev->dev, 153 /* mem unknown type treated as 32-bit BAR */
154 "mem unknown type %x treated as 32-bit BAR\n",
155 mem_type);
156 break; 154 break;
157 } 155 }
158 return flags; 156 return flags;
@@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
173 u32 l, sz, mask; 171 u32 l, sz, mask;
174 u16 orig_cmd; 172 u16 orig_cmd;
175 struct pci_bus_region region; 173 struct pci_bus_region region;
174 bool bar_too_big = false, bar_disabled = false;
176 175
177 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 176 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
178 177
178 /* No printks while decoding is disabled! */
179 if (!dev->mmio_always_on) { 179 if (!dev->mmio_always_on) {
180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 pci_write_config_word(dev, PCI_COMMAND, 181 pci_write_config_word(dev, PCI_COMMAND,
@@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
240 goto fail; 240 goto fail;
241 241
242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", 243 bar_too_big = true;
244 pos);
245 goto fail; 244 goto fail;
246 } 245 }
247 246
@@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
252 region.start = 0; 251 region.start = 0;
253 region.end = sz64; 252 region.end = sz64;
254 pcibios_bus_to_resource(dev, res, &region); 253 pcibios_bus_to_resource(dev, res, &region);
254 bar_disabled = true;
255 } else { 255 } else {
256 region.start = l64; 256 region.start = l64;
257 region.end = l64 + sz64; 257 region.end = l64 + sz64;
258 pcibios_bus_to_resource(dev, res, &region); 258 pcibios_bus_to_resource(dev, res, &region);
259 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
260 pos, res);
261 } 259 }
262 } else { 260 } else {
263 sz = pci_size(l, sz, mask); 261 sz = pci_size(l, sz, mask);
@@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
268 region.start = l; 266 region.start = l;
269 region.end = l + sz; 267 region.end = l + sz;
270 pcibios_bus_to_resource(dev, res, &region); 268 pcibios_bus_to_resource(dev, res, &region);
271
272 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
273 } 269 }
274 270
275 out: 271 goto out;
272
273
274fail:
275 res->flags = 0;
276out:
276 if (!dev->mmio_always_on) 277 if (!dev->mmio_always_on)
277 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 278 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
278 279
280 if (bar_too_big)
281 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
282 if (res->flags && !bar_disabled)
283 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
284
279 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 285 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
280 fail:
281 res->flags = 0;
282 goto out;
283} 286}
284 287
285static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 288static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
@@ -603,10 +606,10 @@ static void pci_set_bus_speed(struct pci_bus *bus)
603 u32 linkcap; 606 u32 linkcap;
604 u16 linksta; 607 u16 linksta;
605 608
606 pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); 609 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
607 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; 610 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
608 611
609 pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); 612 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
610 pcie_update_link_speed(bus, linksta); 613 pcie_update_link_speed(bus, linksta);
611 } 614 }
612} 615}
@@ -929,24 +932,16 @@ void set_pcie_port_type(struct pci_dev *pdev)
929 pdev->is_pcie = 1; 932 pdev->is_pcie = 1;
930 pdev->pcie_cap = pos; 933 pdev->pcie_cap = pos;
931 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 934 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
932 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 935 pdev->pcie_flags_reg = reg16;
933 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); 936 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
934 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 937 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
935} 938}
936 939
937void set_pcie_hotplug_bridge(struct pci_dev *pdev) 940void set_pcie_hotplug_bridge(struct pci_dev *pdev)
938{ 941{
939 int pos;
940 u16 reg16;
941 u32 reg32; 942 u32 reg32;
942 943
943 pos = pci_pcie_cap(pdev); 944 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
944 if (!pos)
945 return;
946 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
947 if (!(reg16 & PCI_EXP_FLAGS_SLOT))
948 return;
949 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
950 if (reg32 & PCI_EXP_SLTCAP_HPC) 945 if (reg32 & PCI_EXP_SLTCAP_HPC)
951 pdev->is_hotplug_bridge = 1; 946 pdev->is_hotplug_bridge = 1;
952} 947}
@@ -1160,8 +1155,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
1160 if (class == PCI_CLASS_BRIDGE_HOST) 1155 if (class == PCI_CLASS_BRIDGE_HOST)
1161 return pci_cfg_space_size_ext(dev); 1156 return pci_cfg_space_size_ext(dev);
1162 1157
1163 pos = pci_pcie_cap(dev); 1158 if (!pci_is_pcie(dev)) {
1164 if (!pos) {
1165 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1159 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1166 if (!pos) 1160 if (!pos)
1167 goto fail; 1161 goto fail;
@@ -1383,9 +1377,9 @@ static int only_one_child(struct pci_bus *bus)
1383 1377
1384 if (!parent || !pci_is_pcie(parent)) 1378 if (!parent || !pci_is_pcie(parent))
1385 return 0; 1379 return 0;
1386 if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 1380 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1387 return 1; 1381 return 1;
1388 if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM && 1382 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1389 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1383 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1390 return 1; 1384 return 1;
1391 return 0; 1385 return 0;
@@ -1462,7 +1456,7 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
1462 */ 1456 */
1463 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1457 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1464 (dev->bus->self && 1458 (dev->bus->self &&
1465 dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) 1459 pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
1466 *smpss = 0; 1460 *smpss = 0;
1467 1461
1468 if (*smpss > dev->pcie_mpss) 1462 if (*smpss > dev->pcie_mpss)
@@ -1478,7 +1472,8 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
1478 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1472 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1479 mps = 128 << dev->pcie_mpss; 1473 mps = 128 << dev->pcie_mpss;
1480 1474
1481 if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) 1475 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1476 dev->bus->self)
1482 /* For "Performance", the assumption is made that 1477 /* For "Performance", the assumption is made that
1483 * downstream communication will never be larger than 1478 * downstream communication will never be larger than
1484 * the MRRS. So, the MPS only needs to be configured 1479 * the MRRS. So, the MPS only needs to be configured
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 51553179e967..7a451ff56ecc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3081,17 +3081,36 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
3081 3081
3082static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) 3082static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3083{ 3083{
3084 int pos; 3084 int i;
3085 u16 status;
3085 3086
3086 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 3087 /*
3087 if (!pos) 3088 * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
3088 return -ENOTTY; 3089 *
3090 * The 82599 supports FLR on VFs, but FLR support is reported only
3091 * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
3092 * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
3093 */
3089 3094
3090 if (probe) 3095 if (probe)
3091 return 0; 3096 return 0;
3092 3097
3093 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, 3098 /* Wait for Transaction Pending bit clean */
3094 PCI_EXP_DEVCTL_BCR_FLR); 3099 for (i = 0; i < 4; i++) {
3100 if (i)
3101 msleep((1 << (i - 1)) * 100);
3102
3103 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3104 if (!(status & PCI_EXP_DEVSTA_TRPND))
3105 goto clear;
3106 }
3107
3108 dev_err(&dev->dev, "transaction is not cleared; "
3109 "proceeding with reset anyway\n");
3110
3111clear:
3112 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3113
3095 msleep(100); 3114 msleep(100);
3096 3115
3097 return 0; 3116 return 0;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 993d4a0a2469..621b162ceb69 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -41,7 +41,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
41 continue; 41 continue;
42 } 42 }
43 /* PCI device should connect to a PCIe bridge */ 43 /* PCI device should connect to a PCIe bridge */
44 if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { 44 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) {
45 /* Busted hardware? */ 45 /* Busted hardware? */
46 WARN_ON_ONCE(1); 46 WARN_ON_ONCE(1);
47 return NULL; 47 return NULL;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index fb506137aaee..1e808ca338f8 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -697,6 +697,38 @@ static resource_size_t calculate_memsize(resource_size_t size,
697 return size; 697 return size;
698} 698}
699 699
700resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
701 unsigned long type)
702{
703 return 1;
704}
705
706#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
707#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
708#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
709
710static resource_size_t window_alignment(struct pci_bus *bus,
711 unsigned long type)
712{
713 resource_size_t align = 1, arch_align;
714
715 if (type & IORESOURCE_MEM)
716 align = PCI_P2P_DEFAULT_MEM_ALIGN;
717 else if (type & IORESOURCE_IO) {
718 /*
719 * Per spec, I/O windows are 4K-aligned, but some
720 * bridges have an extension to support 1K alignment.
721 */
722 if (bus->self->io_window_1k)
723 align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
724 else
725 align = PCI_P2P_DEFAULT_IO_ALIGN;
726 }
727
728 arch_align = pcibios_window_alignment(bus, type);
729 return max(align, arch_align);
730}
731
700/** 732/**
701 * pbus_size_io() - size the io window of a given bus 733 * pbus_size_io() - size the io window of a given bus
702 * 734 *
@@ -717,17 +749,12 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
717 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 749 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
718 unsigned long size = 0, size0 = 0, size1 = 0; 750 unsigned long size = 0, size0 = 0, size1 = 0;
719 resource_size_t children_add_size = 0; 751 resource_size_t children_add_size = 0;
720 resource_size_t min_align = 4096, align; 752 resource_size_t min_align, io_align, align;
721 753
722 if (!b_res) 754 if (!b_res)
723 return; 755 return;
724 756
725 /* 757 io_align = min_align = window_alignment(bus, IORESOURCE_IO);
726 * Per spec, I/O windows are 4K-aligned, but some bridges have an
727 * extension to support 1K alignment.
728 */
729 if (bus->self->io_window_1k)
730 min_align = 1024;
731 list_for_each_entry(dev, &bus->devices, bus_list) { 758 list_for_each_entry(dev, &bus->devices, bus_list) {
732 int i; 759 int i;
733 760
@@ -754,8 +781,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
754 } 781 }
755 } 782 }
756 783
757 if (min_align > 4096) 784 if (min_align > io_align)
758 min_align = 4096; 785 min_align = io_align;
759 786
760 size0 = calculate_iosize(size, min_size, size1, 787 size0 = calculate_iosize(size, min_size, size1,
761 resource_size(b_res), min_align); 788 resource_size(b_res), min_align);
@@ -785,6 +812,28 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
785 } 812 }
786} 813}
787 814
815static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
816 int max_order)
817{
818 resource_size_t align = 0;
819 resource_size_t min_align = 0;
820 int order;
821
822 for (order = 0; order <= max_order; order++) {
823 resource_size_t align1 = 1;
824
825 align1 <<= (order + 20);
826
827 if (!align)
828 min_align = align1;
829 else if (ALIGN(align + min_align, min_align) < align1)
830 min_align = align1 >> 1;
831 align += aligns[order];
832 }
833
834 return min_align;
835}
836
788/** 837/**
789 * pbus_size_mem() - size the memory window of a given bus 838 * pbus_size_mem() - size the memory window of a given bus
790 * 839 *
@@ -864,19 +913,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
864 children_add_size += get_res_add_size(realloc_head, r); 913 children_add_size += get_res_add_size(realloc_head, r);
865 } 914 }
866 } 915 }
867 align = 0;
868 min_align = 0;
869 for (order = 0; order <= max_order; order++) {
870 resource_size_t align1 = 1;
871 916
872 align1 <<= (order + 20); 917 min_align = calculate_mem_align(aligns, max_order);
873 918 min_align = max(min_align, window_alignment(bus, b_res->flags & mask));
874 if (!align)
875 min_align = align1;
876 else if (ALIGN(align + min_align, min_align) < align1)
877 min_align = align1 >> 1;
878 align += aligns[order];
879 }
880 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 919 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
881 if (children_add_size > add_size) 920 if (children_add_size > add_size)
882 add_size = children_add_size; 921 add_size = children_add_size;
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5d44252b7342..d5e1625bbac2 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2219,9 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2219 const struct pci_device_id *id) 2219 const struct pci_device_id *id)
2220{ 2220{
2221 struct tsi721_device *priv; 2221 struct tsi721_device *priv;
2222 int cap;
2223 int err; 2222 int err;
2224 u32 regval;
2225 2223
2226 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); 2224 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
2227 if (priv == NULL) { 2225 if (priv == NULL) {
@@ -2330,20 +2328,16 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2330 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2328 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2331 } 2329 }
2332 2330
2333 cap = pci_pcie_cap(pdev); 2331 BUG_ON(!pci_is_pcie(pdev));
2334 BUG_ON(cap == 0);
2335 2332
2336 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ 2333 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
2337 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval); 2334 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2338 regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2335 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2339 PCI_EXP_DEVCTL_NOSNOOP_EN); 2336 PCI_EXP_DEVCTL_NOSNOOP_EN,
2340 regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; 2337 0x2 << MAX_READ_REQUEST_SZ_SHIFT);
2341 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
2342 2338
2343 /* Adjust PCIe completion timeout. */ 2339 /* Adjust PCIe completion timeout. */
2344 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval); 2340 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
2345 regval &= ~(0x0f);
2346 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
2347 2341
2348 /* 2342 /*
2349 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2343 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 831868904e02..1dd61f402b04 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -58,6 +58,7 @@ struct sam9_rtc {
58 struct rtc_device *rtcdev; 58 struct rtc_device *rtcdev;
59 u32 imr; 59 u32 imr;
60 void __iomem *gpbr; 60 void __iomem *gpbr;
61 int irq;
61}; 62};
62 63
63#define rtt_readl(rtc, field) \ 64#define rtt_readl(rtc, field) \
@@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
292{ 293{
293 struct resource *r, *r_gpbr; 294 struct resource *r, *r_gpbr;
294 struct sam9_rtc *rtc; 295 struct sam9_rtc *rtc;
295 int ret; 296 int ret, irq;
296 u32 mr; 297 u32 mr;
297 298
298 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 299 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
302 return -ENODEV; 303 return -ENODEV;
303 } 304 }
304 305
306 irq = platform_get_irq(pdev, 0);
307 if (irq < 0) {
308 dev_err(&pdev->dev, "failed to get interrupt resource\n");
309 return irq;
310 }
311
305 rtc = kzalloc(sizeof *rtc, GFP_KERNEL); 312 rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
306 if (!rtc) 313 if (!rtc)
307 return -ENOMEM; 314 return -ENOMEM;
308 315
316 rtc->irq = irq;
317
309 /* platform setup code should have handled this; sigh */ 318 /* platform setup code should have handled this; sigh */
310 if (!device_can_wakeup(&pdev->dev)) 319 if (!device_can_wakeup(&pdev->dev))
311 device_init_wakeup(&pdev->dev, 1); 320 device_init_wakeup(&pdev->dev, 1);
@@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
345 } 354 }
346 355
347 /* register irq handler after we know what name we'll use */ 356 /* register irq handler after we know what name we'll use */
348 ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, 357 ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED,
349 IRQF_SHARED,
350 dev_name(&rtc->rtcdev->dev), rtc); 358 dev_name(&rtc->rtcdev->dev), rtc);
351 if (ret) { 359 if (ret) {
352 dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); 360 dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
353 rtc_device_unregister(rtc->rtcdev); 361 rtc_device_unregister(rtc->rtcdev);
354 goto fail_register; 362 goto fail_register;
355 } 363 }
@@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev)
386 394
387 /* disable all interrupts */ 395 /* disable all interrupts */
388 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); 396 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
389 free_irq(AT91_ID_SYS, rtc); 397 free_irq(rtc->irq, rtc);
390 398
391 rtc_device_unregister(rtc->rtcdev); 399 rtc_device_unregister(rtc->rtcdev);
392 400
@@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev,
423 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); 431 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
424 if (rtc->imr) { 432 if (rtc->imr) {
425 if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { 433 if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) {
426 enable_irq_wake(AT91_ID_SYS); 434 enable_irq_wake(rtc->irq);
427 /* don't let RTTINC cause wakeups */ 435 /* don't let RTTINC cause wakeups */
428 if (mr & AT91_RTT_RTTINCIEN) 436 if (mr & AT91_RTT_RTTINCIEN)
429 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); 437 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev)
441 449
442 if (rtc->imr) { 450 if (rtc->imr) {
443 if (device_may_wakeup(&pdev->dev)) 451 if (device_may_wakeup(&pdev->dev))
444 disable_irq_wake(AT91_ID_SYS); 452 disable_irq_wake(rtc->irq);
445 mr = rtt_readl(rtc, MR); 453 mr = rtt_readl(rtc, MR);
446 rtt_writel(rtc, MR, mr | rtc->imr); 454 rtt_writel(rtc, MR, mr | rtc->imr);
447 } 455 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 9ce3a8f8754f..7cfdf2bd8edb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1615,13 +1615,11 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
1615char * 1615char *
1616qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) 1616qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1617{ 1617{
1618 int pcie_reg;
1619 struct qla_hw_data *ha = vha->hw; 1618 struct qla_hw_data *ha = vha->hw;
1620 char lwstr[6]; 1619 char lwstr[6];
1621 uint16_t lnk; 1620 uint16_t lnk;
1622 1621
1623 pcie_reg = pci_pcie_cap(ha->pdev); 1622 pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
1624 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1625 ha->link_width = (lnk >> 4) & 0x3f; 1623 ha->link_width = (lnk >> 4) & 0x3f;
1626 1624
1627 strcpy(str, "PCIe ("); 1625 strcpy(str, "PCIe (");
@@ -2497,7 +2495,6 @@ fw_load_failed:
2497int 2495int
2498qla82xx_start_firmware(scsi_qla_host_t *vha) 2496qla82xx_start_firmware(scsi_qla_host_t *vha)
2499{ 2497{
2500 int pcie_cap;
2501 uint16_t lnk; 2498 uint16_t lnk;
2502 struct qla_hw_data *ha = vha->hw; 2499 struct qla_hw_data *ha = vha->hw;
2503 2500
@@ -2528,8 +2525,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2528 } 2525 }
2529 2526
2530 /* Negotiated Link width */ 2527 /* Negotiated Link width */
2531 pcie_cap = pci_pcie_cap(ha->pdev); 2528 pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
2532 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2533 ha->link_width = (lnk >> 4) & 0x3f; 2529 ha->link_width = (lnk >> 4) & 0x3f;
2534 2530
2535 /* Synchronize with Receive peg */ 2531 /* Synchronize with Receive peg */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 939d7261c37a..807bf76f1b6a 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1566,7 +1566,6 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1566static int 1566static int
1567qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) 1567qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1568{ 1568{
1569 int pcie_cap;
1570 uint16_t lnk; 1569 uint16_t lnk;
1571 1570
1572 /* scrub dma mask expansion register */ 1571 /* scrub dma mask expansion register */
@@ -1590,8 +1589,7 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1590 } 1589 }
1591 1590
1592 /* Negotiated Link width */ 1591 /* Negotiated Link width */
1593 pcie_cap = pci_pcie_cap(ha->pdev); 1592 pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
1594 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
1595 ha->link_width = (lnk >> 4) & 0x3f; 1593 ha->link_width = (lnk >> 4) & 0x3f;
1596 1594
1597 /* Synchronize with Receive peg */ 1595 /* Synchronize with Receive peg */
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 029725c89e58..49553f88c7b3 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -3995,16 +3995,14 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3995static int et131x_pci_init(struct et131x_adapter *adapter, 3995static int et131x_pci_init(struct et131x_adapter *adapter,
3996 struct pci_dev *pdev) 3996 struct pci_dev *pdev)
3997{ 3997{
3998 int cap = pci_pcie_cap(pdev);
3999 u16 max_payload; 3998 u16 max_payload;
4000 u16 ctl;
4001 int i, rc; 3999 int i, rc;
4002 4000
4003 rc = et131x_init_eeprom(adapter); 4001 rc = et131x_init_eeprom(adapter);
4004 if (rc < 0) 4002 if (rc < 0)
4005 goto out; 4003 goto out;
4006 4004
4007 if (!cap) { 4005 if (!pci_is_pcie(pdev)) {
4008 dev_err(&pdev->dev, "Missing PCIe capabilities\n"); 4006 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4009 goto err_out; 4007 goto err_out;
4010 } 4008 }
@@ -4012,7 +4010,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
4012 /* Let's set up the PORT LOGIC Register. First we need to know what 4010 /* Let's set up the PORT LOGIC Register. First we need to know what
4013 * the max_payload_size is 4011 * the max_payload_size is
4014 */ 4012 */
4015 if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { 4013 if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
4016 dev_err(&pdev->dev, 4014 dev_err(&pdev->dev,
4017 "Could not read PCI config space for Max Payload Size\n"); 4015 "Could not read PCI config space for Max Payload Size\n");
4018 goto err_out; 4016 goto err_out;
@@ -4049,17 +4047,10 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
4049 } 4047 }
4050 4048
4051 /* Change the max read size to 2k */ 4049 /* Change the max read size to 2k */
4052 if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { 4050 if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
4051 PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
4053 dev_err(&pdev->dev, 4052 dev_err(&pdev->dev,
4054 "Could not read PCI config space for Max read size\n"); 4053 "Couldn't change PCI config space for Max read size\n");
4055 goto err_out;
4056 }
4057
4058 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12);
4059
4060 if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
4061 dev_err(&pdev->dev,
4062 "Could not write PCI config space for Max read size\n");
4063 goto err_out; 4054 goto err_out;
4064 } 4055 }
4065 4056
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index ddadcc3e4e7c..5abbee37cdca 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -31,12 +31,10 @@ static void rtl8192_parse_pci_configuration(struct pci_dev *pdev,
31 struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); 31 struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
32 32
33 u8 tmp; 33 u8 tmp;
34 int pos; 34 u16 LinkCtrlReg;
35 u8 LinkCtrlReg;
36 35
37 pos = pci_find_capability(priv->pdev, PCI_CAP_ID_EXP); 36 pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg);
38 pci_read_config_byte(priv->pdev, pos + PCI_EXP_LNKCTL, &LinkCtrlReg); 37 priv->NdisAdapter.LinkCtrlReg = (u8)LinkCtrlReg;
39 priv->NdisAdapter.LinkCtrlReg = LinkCtrlReg;
40 38
41 RT_TRACE(COMP_INIT, "Link Control Register =%x\n", 39 RT_TRACE(COMP_INIT, "Link Control Register =%x\n",
42 priv->NdisAdapter.LinkCtrlReg); 40 priv->NdisAdapter.LinkCtrlReg);
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
index 77da6a2f43dc..c03ecdd31e4c 100644
--- a/drivers/video/auo_k190x.c
+++ b/drivers/video/auo_k190x.c
@@ -987,7 +987,6 @@ err_regfb:
987 fb_dealloc_cmap(&info->cmap); 987 fb_dealloc_cmap(&info->cmap);
988err_cmap: 988err_cmap:
989 fb_deferred_io_cleanup(info); 989 fb_deferred_io_cleanup(info);
990 kfree(info->fbdefio);
991err_defio: 990err_defio:
992 vfree((void *)info->screen_base); 991 vfree((void *)info->screen_base);
993err_irq: 992err_irq:
@@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev)
1022 fb_dealloc_cmap(&info->cmap); 1021 fb_dealloc_cmap(&info->cmap);
1023 1022
1024 fb_deferred_io_cleanup(info); 1023 fb_deferred_io_cleanup(info);
1025 kfree(info->fbdefio);
1026 1024
1027 vfree((void *)info->screen_base); 1025 vfree((void *)info->screen_base);
1028 1026
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index 28b1a834906b..61b182bf32a2 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
162 image.depth = 1; 162 image.depth = 1;
163 163
164 if (attribute) { 164 if (attribute) {
165 buf = kmalloc(cellsize, GFP_KERNEL); 165 buf = kmalloc(cellsize, GFP_ATOMIC);
166 if (!buf) 166 if (!buf)
167 return; 167 return;
168 } 168 }
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 88e92041d8f0..fdefa8fd72c4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt)
449 449
450 while ((options = strsep(&this_opt, ",")) != NULL) { 450 while ((options = strsep(&this_opt, ",")) != NULL) {
451 if (!strncmp(options, "font:", 5)) 451 if (!strncmp(options, "font:", 5))
452 strcpy(fontname, options + 5); 452 strlcpy(fontname, options + 5, sizeof(fontname));
453 453
454 if (!strncmp(options, "scrollback:", 11)) { 454 if (!strncmp(options, "scrollback:", 11)) {
455 options += 11; 455 options += 11;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 00ce1f34b496..57d940be5f3d 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
328 case MB862XX_L1_SET_CFG: 328 case MB862XX_L1_SET_CFG:
329 if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) 329 if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg)))
330 return -EFAULT; 330 return -EFAULT;
331 if (l1_cfg->dh == 0 || l1_cfg->dw == 0)
332 return -EINVAL;
331 if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { 333 if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) {
332 /* downscaling */ 334 /* downscaling */
333 outreg(cap, GC_CAP_CSC, 335 outreg(cap, GC_CAP_CSC,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 5d31699fbd3c..f43bfe17b3b6 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
105 105
106 sdi_config_lcd_manager(dssdev); 106 sdi_config_lcd_manager(dssdev);
107 107
108 /*
109 * LCLK and PCLK divisors are located in shadow registers, and we
110 * normally write them to DISPC registers when enabling the output.
111 * However, SDI uses pck-free as source clock for its PLL, and pck-free
112 * is affected by the divisors. And as we need the PLL before enabling
113 * the output, we need to write the divisors early.
114 *
115 * It seems just writing to the DISPC register is enough, and we don't
116 * need to care about the shadow register mechanism for pck-free. The
117 * exact reason for this is unknown.
118 */
119 dispc_mgr_set_clock_div(dssdev->manager->id,
120 &sdi.mgr_config.clock_info);
121
108 dss_sdi_init(dssdev->phy.sdi.datapairs); 122 dss_sdi_init(dssdev->phy.sdi.datapairs);
109 r = dss_sdi_enable(); 123 r = dss_sdi_enable();
110 if (r) 124 if (r)
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 08ec1a7103f2..fc671d3d8004 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
1192 break; 1192 break;
1193 1193
1194 if (regno < 16) { 1194 if (regno < 16) {
1195 u16 pal; 1195 u32 pal;
1196 pal = ((red >> (16 - var->red.length)) << 1196 pal = ((red >> (16 - var->red.length)) <<
1197 var->red.offset) | 1197 var->red.offset) |
1198 ((green >> (16 - var->green.length)) << 1198 ((green >> (16 - var->green.length)) <<
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1afb4fba11b4..4d519488d304 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
232 return ret; 232 return ret;
233 233
234 if (hwdev && hwdev->coherent_dma_mask) 234 if (hwdev && hwdev->coherent_dma_mask)
235 dma_mask = hwdev->coherent_dma_mask; 235 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
236 236
237 phys = virt_to_phys(ret); 237 phys = virt_to_phys(ret);
238 dev_addr = xen_phys_to_bus(phys); 238 dev_addr = xen_phys_to_bus(phys);
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 097e536e8672..03342728bf23 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
353 if (err) 353 if (err)
354 goto config_release; 354 goto config_release;
355 355
356 dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
357 __pci_reset_function_locked(dev);
358
359 /* We need the device active to save the state. */ 356 /* We need the device active to save the state. */
360 dev_dbg(&dev->dev, "save state of device\n"); 357 dev_dbg(&dev->dev, "save state of device\n");
361 pci_save_state(dev); 358 pci_save_state(dev);
362 dev_data->pci_saved_state = pci_store_saved_state(dev); 359 dev_data->pci_saved_state = pci_store_saved_state(dev);
363 if (!dev_data->pci_saved_state) 360 if (!dev_data->pci_saved_state)
364 dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); 361 dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
365 362 else {
363 dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
364 __pci_reset_function_locked(dev);
365 }
366 /* Now disable the device (this also ensures some private device 366 /* Now disable the device (this also ensures some private device
367 * data is setup before we export) 367 * data is setup before we export)
368 */ 368 */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 604382143bcf..594b419b7d20 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -82,10 +82,18 @@
82 __x - (__x % (y)); \ 82 __x - (__x % (y)); \
83} \ 83} \
84) 84)
85
86/*
87 * Divide positive or negative dividend by positive divisor and round
88 * to closest integer. Result is undefined for negative divisors.
89 */
85#define DIV_ROUND_CLOSEST(x, divisor)( \ 90#define DIV_ROUND_CLOSEST(x, divisor)( \
86{ \ 91{ \
87 typeof(divisor) __divisor = divisor; \ 92 typeof(x) __x = x; \
88 (((x) + ((__divisor) / 2)) / (__divisor)); \ 93 typeof(divisor) __d = divisor; \
94 (((typeof(x))-1) >= 0 || (__x) >= 0) ? \
95 (((__x) + ((__d) / 2)) / (__d)) : \
96 (((__x) - ((__d) / 2)) / (__d)); \
89} \ 97} \
90) 98)
91 99
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 111aca5e97f3..4b27f9f503e4 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -239,6 +239,7 @@ struct mmc_card {
239#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ 239#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
240#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ 240#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
241#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ 241#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
242#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
242 /* byte mode */ 243 /* byte mode */
243 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ 244 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
244#define MMC_NO_POWER_NOTIFICATION 0 245#define MMC_NO_POWER_NOTIFICATION 0
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5faa8310eec9..2c755243eef2 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -254,10 +254,10 @@ struct pci_dev {
254 u8 revision; /* PCI revision, low byte of class word */ 254 u8 revision; /* PCI revision, low byte of class word */
255 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 255 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
256 u8 pcie_cap; /* PCI-E capability offset */ 256 u8 pcie_cap; /* PCI-E capability offset */
257 u8 pcie_type:4; /* PCI-E device/port type */
258 u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ 257 u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
259 u8 rom_base_reg; /* which config register controls the ROM */ 258 u8 rom_base_reg; /* which config register controls the ROM */
260 u8 pin; /* which interrupt pin this device uses */ 259 u8 pin; /* which interrupt pin this device uses */
260 u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */
261 261
262 struct pci_driver *driver; /* which driver has allocated this device */ 262 struct pci_driver *driver; /* which driver has allocated this device */
263 u64 dma_mask; /* Mask of the bits of bus address this 263 u64 dma_mask; /* Mask of the bits of bus address this
@@ -816,6 +816,39 @@ static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
816 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 816 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
817} 817}
818 818
819int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
820int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
821int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
822int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
823int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
824 u16 clear, u16 set);
825int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
826 u32 clear, u32 set);
827
828static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
829 u16 set)
830{
831 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
832}
833
834static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
835 u32 set)
836{
837 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
838}
839
840static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
841 u16 clear)
842{
843 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
844}
845
846static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
847 u32 clear)
848{
849 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
850}
851
819/* user-space driven config access */ 852/* user-space driven config access */
820int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 853int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
821int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 854int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
@@ -1031,6 +1064,8 @@ int pci_cfg_space_size_ext(struct pci_dev *dev);
1031int pci_cfg_space_size(struct pci_dev *dev); 1064int pci_cfg_space_size(struct pci_dev *dev);
1032unsigned char pci_bus_max_busnr(struct pci_bus *bus); 1065unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1033void pci_setup_bridge(struct pci_bus *bus); 1066void pci_setup_bridge(struct pci_bus *bus);
1067resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1068 unsigned long type);
1034 1069
1035#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 1070#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1036#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) 1071#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1650,6 +1685,15 @@ static inline bool pci_is_pcie(struct pci_dev *dev)
1650 return !!pci_pcie_cap(dev); 1685 return !!pci_pcie_cap(dev);
1651} 1686}
1652 1687
1688/**
1689 * pci_pcie_type - get the PCIe device/port type
1690 * @dev: PCI device
1691 */
1692static inline int pci_pcie_type(const struct pci_dev *dev)
1693{
1694 return (dev->pcie_flags_reg & PCI_EXP_FLAGS_TYPE) >> 4;
1695}
1696
1653void pci_request_acs(void); 1697void pci_request_acs(void);
1654bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 1698bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
1655bool pci_acs_path_enabled(struct pci_dev *start, 1699bool pci_acs_path_enabled(struct pci_dev *start,
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 7fb75b143755..3958f70f3202 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -549,6 +549,7 @@
549#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */ 549#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */
550#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */ 550#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
551#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ 551#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
552#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
552#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ 553#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
553 554
554/* Extended Capabilities (PCI-X 2.0 and Express) */ 555/* Extended Capabilities (PCI-X 2.0 and Express) */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index bd92431d4c49..4ada3be6e252 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2562 break; 2562 break;
2563 2563
2564 default: 2564 default:
2565 BUG(); 2565 return -EINVAL;
2566 } 2566 }
2567 2567
2568 l = strlen(policy_modes[mode]); 2568 l = strlen(policy_modes[mode]);
diff --git a/net/socket.c b/net/socket.c
index a5471f804d99..edc3c4af9085 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2604 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2604 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2605 set_fs(old_fs); 2605 set_fs(old_fs);
2606 if (!err) 2606 if (!err)
2607 err = compat_put_timeval(up, &ktv); 2607 err = compat_put_timeval(&ktv, up);
2608 2608
2609 return err; 2609 return err;
2610} 2610}
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2620 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2620 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2621 set_fs(old_fs); 2621 set_fs(old_fs);
2622 if (!err) 2622 if (!err)
2623 err = compat_put_timespec(up, &kts); 2623 err = compat_put_timespec(&kts, up);
2624 2624
2625 return err; 2625 return err;
2626} 2626}
diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
index 6bf8e87f1dcf..c3f69ae275d1 100644
--- a/scripts/Makefile.fwinst
+++ b/scripts/Makefile.fwinst
@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
42$(installed-fw-dirs): 42$(installed-fw-dirs):
43 $(call cmd,mkdir) 43 $(call cmd,mkdir)
44 44
45$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %) 45$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
46 $(call cmd,install) 46 $(call cmd,install)
47 47
48PHONY += __fw_install __fw_modinst FORCE 48PHONY += __fw_install __fw_modinst FORCE
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 4629038c9e5a..4235a6361fec 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -211,7 +211,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then
211 211
212 if ! cmp -s System.map .tmp_System.map; then 212 if ! cmp -s System.map .tmp_System.map; then
213 echo >&2 Inconsistent kallsyms data 213 echo >&2 Inconsistent kallsyms data
214 echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround 214 echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
215 cleanup 215 cleanup
216 exit 1 216 exit 1
217 fi 217 fi
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index f560051a949e..f25c24c743f9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec)
1209 kfree(codec); 1209 kfree(codec);
1210} 1210}
1211 1211
1212static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec,
1213 hda_nid_t fg, unsigned int power_state);
1214
1212static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, 1215static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
1213 unsigned int power_state); 1216 unsigned int power_state);
1214 1217
@@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
1317 AC_VERB_GET_SUBSYSTEM_ID, 0); 1320 AC_VERB_GET_SUBSYSTEM_ID, 0);
1318 } 1321 }
1319 1322
1323 codec->epss = snd_hda_codec_get_supported_ps(codec,
1324 codec->afg ? codec->afg : codec->mfg,
1325 AC_PWRST_EPSS);
1326
1320 /* power-up all before initialization */ 1327 /* power-up all before initialization */
1321 hda_set_power_state(codec, 1328 hda_set_power_state(codec,
1322 codec->afg ? codec->afg : codec->mfg, 1329 codec->afg ? codec->afg : codec->mfg,
@@ -3543,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
3543 /* this delay seems necessary to avoid click noise at power-down */ 3550 /* this delay seems necessary to avoid click noise at power-down */
3544 if (power_state == AC_PWRST_D3) { 3551 if (power_state == AC_PWRST_D3) {
3545 /* transition time less than 10ms for power down */ 3552 /* transition time less than 10ms for power down */
3546 bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS); 3553 msleep(codec->epss ? 10 : 100);
3547 msleep(epss ? 10 : 100);
3548 } 3554 }
3549 3555
3550 /* repeat power states setting at most 10 times*/ 3556 /* repeat power states setting at most 10 times*/
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 7fbc1bcaf1a9..e5a7e19a8071 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -862,6 +862,7 @@ struct hda_codec {
862 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ 862 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
863 unsigned int no_jack_detect:1; /* Machine has no jack-detection */ 863 unsigned int no_jack_detect:1; /* Machine has no jack-detection */
864 unsigned int pcm_format_first:1; /* PCM format must be set first */ 864 unsigned int pcm_format_first:1; /* PCM format must be set first */
865 unsigned int epss:1; /* supporting EPSS? */
865#ifdef CONFIG_SND_HDA_POWER_SAVE 866#ifdef CONFIG_SND_HDA_POWER_SAVE
866 unsigned int power_on :1; /* current (global) power-state */ 867 unsigned int power_on :1; /* current (global) power-state */
867 int power_transition; /* power-state in transition */ 868 int power_transition; /* power-state in transition */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index ea5775a1a7db..6f806d3e56bb 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4543,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec,
4543 struct auto_pin_cfg *cfg = &spec->autocfg; 4543 struct auto_pin_cfg *cfg = &spec->autocfg;
4544 int i; 4544 int i;
4545 4545
4546 if (cfg->speaker_outs == 0)
4547 return;
4548
4546 for (i = 0; i < cfg->line_outs; i++) { 4549 for (i = 0; i < cfg->line_outs; i++) {
4547 if (presence) 4550 if (presence)
4548 break; 4551 break;
@@ -5531,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5531 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); 5534 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
5532 } 5535 }
5533 5536
5537 codec->epss = 0; /* longer delay needed for D3 */
5534 codec->no_trigger_sense = 1; 5538 codec->no_trigger_sense = 1;
5535 codec->spec = spec; 5539 codec->spec = spec;
5536 5540
diff --git a/sound/usb/card.c b/sound/usb/card.c
index d5b5c3388e28..4a469f0cb6d4 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
553 struct snd_usb_audio *chip) 553 struct snd_usb_audio *chip)
554{ 554{
555 struct snd_card *card; 555 struct snd_card *card;
556 struct list_head *p; 556 struct list_head *p, *n;
557 557
558 if (chip == (void *)-1L) 558 if (chip == (void *)-1L)
559 return; 559 return;
@@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
570 snd_usb_stream_disconnect(p); 570 snd_usb_stream_disconnect(p);
571 } 571 }
572 /* release the endpoint resources */ 572 /* release the endpoint resources */
573 list_for_each(p, &chip->ep_list) { 573 list_for_each_safe(p, n, &chip->ep_list) {
574 snd_usb_endpoint_free(p); 574 snd_usb_endpoint_free(p);
575 } 575 }
576 /* release the midi resources */ 576 /* release the midi resources */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c41181202688..d6e2bb49c59c 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep)
141 * 141 *
142 * For implicit feedback, next_packet_size() is unused. 142 * For implicit feedback, next_packet_size() is unused.
143 */ 143 */
144static int next_packet_size(struct snd_usb_endpoint *ep) 144int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
145{ 145{
146 unsigned long flags; 146 unsigned long flags;
147 int ret; 147 int ret;
@@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep,
177 ep->retire_data_urb(ep->data_subs, urb); 177 ep->retire_data_urb(ep->data_subs, urb);
178} 178}
179 179
180static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep,
181 struct snd_urb_ctx *ctx)
182{
183 int i;
184
185 for (i = 0; i < ctx->packets; ++i)
186 ctx->packet_size[i] = next_packet_size(ep);
187}
188
189/* 180/*
190 * Prepare a PLAYBACK urb for submission to the bus. 181 * Prepare a PLAYBACK urb for submission to the bus.
191 */ 182 */
@@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb)
370 goto exit_clear; 361 goto exit_clear;
371 } 362 }
372 363
373 prepare_outbound_urb_sizes(ep, ctx);
374 prepare_outbound_urb(ep, ctx); 364 prepare_outbound_urb(ep, ctx);
375 } else { 365 } else {
376 retire_inbound_urb(ep, ctx); 366 retire_inbound_urb(ep, ctx);
@@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
799/** 789/**
800 * snd_usb_endpoint_start: start an snd_usb_endpoint 790 * snd_usb_endpoint_start: start an snd_usb_endpoint
801 * 791 *
802 * @ep: the endpoint to start 792 * @ep: the endpoint to start
793 * @can_sleep: flag indicating whether the operation is executed in
794 * non-atomic context
803 * 795 *
804 * A call to this function will increment the use count of the endpoint. 796 * A call to this function will increment the use count of the endpoint.
805 * In case it is not already running, the URBs for this endpoint will be 797 * In case it is not already running, the URBs for this endpoint will be
@@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
809 * 801 *
810 * Returns an error if the URB submission failed, 0 in all other cases. 802 * Returns an error if the URB submission failed, 0 in all other cases.
811 */ 803 */
812int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) 804int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
813{ 805{
814 int err; 806 int err;
815 unsigned int i; 807 unsigned int i;
@@ -821,6 +813,11 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
821 if (++ep->use_count != 1) 813 if (++ep->use_count != 1)
822 return 0; 814 return 0;
823 815
816 /* just to be sure */
817 deactivate_urbs(ep, 0, can_sleep);
818 if (can_sleep)
819 wait_clear_urbs(ep);
820
824 ep->active_mask = 0; 821 ep->active_mask = 0;
825 ep->unlink_mask = 0; 822 ep->unlink_mask = 0;
826 ep->phase = 0; 823 ep->phase = 0;
@@ -850,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
850 goto __error; 847 goto __error;
851 848
852 if (usb_pipeout(ep->pipe)) { 849 if (usb_pipeout(ep->pipe)) {
853 prepare_outbound_urb_sizes(ep, urb->context);
854 prepare_outbound_urb(ep, urb->context); 850 prepare_outbound_urb(ep, urb->context);
855 } else { 851 } else {
856 prepare_inbound_urb(ep, urb->context); 852 prepare_inbound_urb(ep, urb->context);
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index ee2723fb174f..cbbbdf226d66 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
13 struct audioformat *fmt, 13 struct audioformat *fmt,
14 struct snd_usb_endpoint *sync_ep); 14 struct snd_usb_endpoint *sync_ep);
15 15
16int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); 16int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
17void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, 17void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
18 int force, int can_sleep, int wait); 18 int force, int can_sleep, int wait);
19int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); 19int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
@@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
21void snd_usb_endpoint_free(struct list_head *head); 21void snd_usb_endpoint_free(struct list_head *head);
22 22
23int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); 23int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep);
24int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
24 25
25void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, 26void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
26 struct snd_usb_endpoint *sender, 27 struct snd_usb_endpoint *sender,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 62ec808ed792..fd5e982fc98c 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
212 } 212 }
213} 213}
214 214
215static int start_endpoints(struct snd_usb_substream *subs) 215static int start_endpoints(struct snd_usb_substream *subs, int can_sleep)
216{ 216{
217 int err; 217 int err;
218 218
@@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs)
225 snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); 225 snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep);
226 226
227 ep->data_subs = subs; 227 ep->data_subs = subs;
228 err = snd_usb_endpoint_start(ep); 228 err = snd_usb_endpoint_start(ep, can_sleep);
229 if (err < 0) { 229 if (err < 0) {
230 clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); 230 clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
231 return err; 231 return err;
@@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs)
236 !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { 236 !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) {
237 struct snd_usb_endpoint *ep = subs->sync_endpoint; 237 struct snd_usb_endpoint *ep = subs->sync_endpoint;
238 238
239 if (subs->data_endpoint->iface != subs->sync_endpoint->iface ||
240 subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) {
241 err = usb_set_interface(subs->dev,
242 subs->sync_endpoint->iface,
243 subs->sync_endpoint->alt_idx);
244 if (err < 0) {
245 snd_printk(KERN_ERR
246 "%d:%d:%d: cannot set interface (%d)\n",
247 subs->dev->devnum,
248 subs->sync_endpoint->iface,
249 subs->sync_endpoint->alt_idx, err);
250 return -EIO;
251 }
252 }
253
239 snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); 254 snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep);
240 255
241 ep->sync_slave = subs->data_endpoint; 256 ep->sync_slave = subs->data_endpoint;
242 err = snd_usb_endpoint_start(ep); 257 err = snd_usb_endpoint_start(ep, can_sleep);
243 if (err < 0) { 258 if (err < 0) {
244 clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); 259 clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
245 return err; 260 return err;
@@ -544,13 +559,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
544 subs->last_frame_number = 0; 559 subs->last_frame_number = 0;
545 runtime->delay = 0; 560 runtime->delay = 0;
546 561
547 /* clear the pending deactivation on the target EPs */
548 deactivate_endpoints(subs);
549
550 /* for playback, submit the URBs now; otherwise, the first hwptr_done 562 /* for playback, submit the URBs now; otherwise, the first hwptr_done
551 * updates for all URBs would happen at the same time when starting */ 563 * updates for all URBs would happen at the same time when starting */
552 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) 564 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
553 return start_endpoints(subs); 565 return start_endpoints(subs, 1);
554 566
555 return 0; 567 return 0;
556} 568}
@@ -1032,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
1032 struct urb *urb) 1044 struct urb *urb)
1033{ 1045{
1034 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; 1046 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
1047 struct snd_usb_endpoint *ep = subs->data_endpoint;
1035 struct snd_urb_ctx *ctx = urb->context; 1048 struct snd_urb_ctx *ctx = urb->context;
1036 unsigned int counts, frames, bytes; 1049 unsigned int counts, frames, bytes;
1037 int i, stride, period_elapsed = 0; 1050 int i, stride, period_elapsed = 0;
@@ -1043,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
1043 urb->number_of_packets = 0; 1056 urb->number_of_packets = 0;
1044 spin_lock_irqsave(&subs->lock, flags); 1057 spin_lock_irqsave(&subs->lock, flags);
1045 for (i = 0; i < ctx->packets; i++) { 1058 for (i = 0; i < ctx->packets; i++) {
1046 counts = ctx->packet_size[i]; 1059 if (ctx->packet_size[i])
1060 counts = ctx->packet_size[i];
1061 else
1062 counts = snd_usb_endpoint_next_packet_size(ep);
1063
1047 /* set up descriptor */ 1064 /* set up descriptor */
1048 urb->iso_frame_desc[i].offset = frames * stride; 1065 urb->iso_frame_desc[i].offset = frames * stride;
1049 urb->iso_frame_desc[i].length = counts * stride; 1066 urb->iso_frame_desc[i].length = counts * stride;
@@ -1094,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
1094 subs->hwptr_done += bytes; 1111 subs->hwptr_done += bytes;
1095 if (subs->hwptr_done >= runtime->buffer_size * stride) 1112 if (subs->hwptr_done >= runtime->buffer_size * stride)
1096 subs->hwptr_done -= runtime->buffer_size * stride; 1113 subs->hwptr_done -= runtime->buffer_size * stride;
1114
1115 /* update delay with exact number of samples queued */
1116 runtime->delay = subs->last_delay;
1097 runtime->delay += frames; 1117 runtime->delay += frames;
1118 subs->last_delay = runtime->delay;
1119
1120 /* realign last_frame_number */
1121 subs->last_frame_number = usb_get_current_frame_number(subs->dev);
1122 subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
1123
1098 spin_unlock_irqrestore(&subs->lock, flags); 1124 spin_unlock_irqrestore(&subs->lock, flags);
1099 urb->transfer_buffer_length = bytes; 1125 urb->transfer_buffer_length = bytes;
1100 if (period_elapsed) 1126 if (period_elapsed)
@@ -1112,12 +1138,26 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
1112 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; 1138 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
1113 int stride = runtime->frame_bits >> 3; 1139 int stride = runtime->frame_bits >> 3;
1114 int processed = urb->transfer_buffer_length / stride; 1140 int processed = urb->transfer_buffer_length / stride;
1141 int est_delay;
1115 1142
1116 spin_lock_irqsave(&subs->lock, flags); 1143 spin_lock_irqsave(&subs->lock, flags);
1117 if (processed > runtime->delay) 1144 est_delay = snd_usb_pcm_delay(subs, runtime->rate);
1118 runtime->delay = 0; 1145 /* update delay with exact number of samples played */
1146 if (processed > subs->last_delay)
1147 subs->last_delay = 0;
1119 else 1148 else
1120 runtime->delay -= processed; 1149 subs->last_delay -= processed;
1150 runtime->delay = subs->last_delay;
1151
1152 /*
1153 * Report when delay estimate is off by more than 2ms.
1154 * The error should be lower than 2ms since the estimate relies
1155 * on two reads of a counter updated every ms.
1156 */
1157 if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
1158 snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
1159 est_delay, subs->last_delay);
1160
1121 spin_unlock_irqrestore(&subs->lock, flags); 1161 spin_unlock_irqrestore(&subs->lock, flags);
1122} 1162}
1123 1163
@@ -1175,7 +1215,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
1175 1215
1176 switch (cmd) { 1216 switch (cmd) {
1177 case SNDRV_PCM_TRIGGER_START: 1217 case SNDRV_PCM_TRIGGER_START:
1178 err = start_endpoints(subs); 1218 err = start_endpoints(subs, 0);
1179 if (err < 0) 1219 if (err < 0)
1180 return err; 1220 return err;
1181 1221