aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/msi.c
diff options
context:
space:
mode:
authorAshok Raj <ashok.raj@intel.com>2005-11-09 00:42:33 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2005-11-10 19:09:18 -0500
commitb4033c1715cb5aa1dcb1a25bdaf71fea908bb3f1 (patch)
treecf9ba9ae7999573a507df301faf34170ab08e2c3 /drivers/pci/msi.c
parent48b19148733b4826eeedfd8be9f19b61c8d010b1 (diff)
[PATCH] PCI: Change MSI to use physical delivery mode always
MSI hardcoded delivery mode to use logical delivery mode. Recently x86_64 moved to use physical mode addressing to support physflat mode. With this mode enabled noticed that my eth with MSI werent working. msi_address_init() was hardcoded to use logical mode for i386 and x86_64. So when we switch to use physical mode, things stopped working. Since anyway we dont use lowest priority delivery with MSI, its always directed to just a single CPU. Its safe and simpler to use physical mode always, even when we use logical delivery mode for IPI's or other ioapic RTE's. Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r--drivers/pci/msi.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a2033552423c..202b7507a357 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -23,6 +23,8 @@
23#include "pci.h" 23#include "pci.h"
24#include "msi.h" 24#include "msi.h"
25 25
26#define MSI_TARGET_CPU first_cpu(cpu_online_map)
27
26static DEFINE_SPINLOCK(msi_lock); 28static DEFINE_SPINLOCK(msi_lock);
27static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; 29static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
28static kmem_cache_t* msi_cachep; 30static kmem_cache_t* msi_cachep;
@@ -92,6 +94,7 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
92 struct msi_desc *entry; 94 struct msi_desc *entry;
93 struct msg_address address; 95 struct msg_address address;
94 unsigned int irq = vector; 96 unsigned int irq = vector;
97 unsigned int dest_cpu = first_cpu(cpu_mask);
95 98
96 entry = (struct msi_desc *)msi_desc[vector]; 99 entry = (struct msi_desc *)msi_desc[vector];
97 if (!entry || !entry->dev) 100 if (!entry || !entry->dev)
@@ -108,9 +111,9 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
108 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), 111 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
109 &address.lo_address.value); 112 &address.lo_address.value);
110 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 113 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
111 address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) << 114 address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
112 MSI_TARGET_CPU_SHIFT); 115 MSI_TARGET_CPU_SHIFT);
113 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); 116 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
114 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 117 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
115 address.lo_address.value); 118 address.lo_address.value);
116 set_native_irq_info(irq, cpu_mask); 119 set_native_irq_info(irq, cpu_mask);
@@ -123,9 +126,9 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
123 126
124 address.lo_address.value = readl(entry->mask_base + offset); 127 address.lo_address.value = readl(entry->mask_base + offset);
125 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 128 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
126 address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) << 129 address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
127 MSI_TARGET_CPU_SHIFT); 130 MSI_TARGET_CPU_SHIFT);
128 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); 131 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
129 writel(address.lo_address.value, entry->mask_base + offset); 132 writel(address.lo_address.value, entry->mask_base + offset);
130 set_native_irq_info(irq, cpu_mask); 133 set_native_irq_info(irq, cpu_mask);
131 break; 134 break;
@@ -259,14 +262,15 @@ static void msi_data_init(struct msg_data *msi_data,
259static void msi_address_init(struct msg_address *msi_address) 262static void msi_address_init(struct msg_address *msi_address)
260{ 263{
261 unsigned int dest_id; 264 unsigned int dest_id;
265 unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
262 266
263 memset(msi_address, 0, sizeof(struct msg_address)); 267 memset(msi_address, 0, sizeof(struct msg_address));
264 msi_address->hi_address = (u32)0; 268 msi_address->hi_address = (u32)0;
265 dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT); 269 dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
266 msi_address->lo_address.u.dest_mode = MSI_DEST_MODE; 270 msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
267 msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE; 271 msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
268 msi_address->lo_address.u.dest_id = dest_id; 272 msi_address->lo_address.u.dest_id = dest_id;
269 msi_address->lo_address.value |= (MSI_TARGET_CPU << MSI_TARGET_CPU_SHIFT); 273 msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
270} 274}
271 275
272static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); 276static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);