aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/input/xen-kbdfront.c2
-rw-r--r--drivers/isdn/act2000/act2000.h6
-rw-r--r--drivers/isdn/hisax/config.c18
-rw-r--r--drivers/isdn/hisax/hisax.h1
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/pci/Kconfig21
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c1
-rw-r--r--drivers/pci/dmar.c8
-rw-r--r--drivers/pci/htirq.c22
-rw-r--r--drivers/pci/intr_remapping.c212
-rw-r--r--drivers/pci/msi.c52
-rw-r--r--drivers/pci/xen-pcifront.c1148
-rw-r--r--drivers/video/xen-fbfront.c2
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/biomerge.c13
-rw-r--r--drivers/xen/events.c365
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
21 files changed, 1623 insertions, 264 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ab735a605cf3..c4e9d817caaa 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1125,6 +1125,8 @@ static void blkback_changed(struct xenbus_device *dev,
1125 case XenbusStateInitialising: 1125 case XenbusStateInitialising:
1126 case XenbusStateInitWait: 1126 case XenbusStateInitWait:
1127 case XenbusStateInitialised: 1127 case XenbusStateInitialised:
1128 case XenbusStateReconfiguring:
1129 case XenbusStateReconfigured:
1128 case XenbusStateUnknown: 1130 case XenbusStateUnknown:
1129 case XenbusStateClosed: 1131 case XenbusStateClosed:
1130 break; 1132 break;
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index ebb11907d402..e0c024db2ca5 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -276,6 +276,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
276 switch (backend_state) { 276 switch (backend_state) {
277 case XenbusStateInitialising: 277 case XenbusStateInitialising:
278 case XenbusStateInitialised: 278 case XenbusStateInitialised:
279 case XenbusStateReconfiguring:
280 case XenbusStateReconfigured:
279 case XenbusStateUnknown: 281 case XenbusStateUnknown:
280 case XenbusStateClosed: 282 case XenbusStateClosed:
281 break; 283 break;
diff --git a/drivers/isdn/act2000/act2000.h b/drivers/isdn/act2000/act2000.h
index d4c50512a1ff..88c9423500d8 100644
--- a/drivers/isdn/act2000/act2000.h
+++ b/drivers/isdn/act2000/act2000.h
@@ -141,9 +141,9 @@ typedef struct irq_data_isa {
141 __u8 rcvhdr[8]; 141 __u8 rcvhdr[8];
142} irq_data_isa; 142} irq_data_isa;
143 143
144typedef union irq_data { 144typedef union act2000_irq_data {
145 irq_data_isa isa; 145 irq_data_isa isa;
146} irq_data; 146} act2000_irq_data;
147 147
148/* 148/*
149 * Per card driver data 149 * Per card driver data
@@ -176,7 +176,7 @@ typedef struct act2000_card {
176 char *status_buf_read; 176 char *status_buf_read;
177 char *status_buf_write; 177 char *status_buf_write;
178 char *status_buf_end; 178 char *status_buf_end;
179 irq_data idat; /* Data used for IRQ handler */ 179 act2000_irq_data idat; /* Data used for IRQ handler */
180 isdn_if interface; /* Interface to upper layer */ 180 isdn_if interface; /* Interface to upper layer */
181 char regname[35]; /* Name used for request_region */ 181 char regname[35]; /* Name used for request_region */
182} act2000_card; 182} act2000_card;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 6f9afcd5ca4e..b133378d4dc9 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -801,6 +801,16 @@ static void closecard(int cardnr)
801 ll_unload(csta); 801 ll_unload(csta);
802} 802}
803 803
804static irqreturn_t card_irq(int intno, void *dev_id)
805{
806 struct IsdnCardState *cs = dev_id;
807 irqreturn_t ret = cs->irq_func(intno, cs);
808
809 if (ret == IRQ_HANDLED)
810 cs->irq_cnt++;
811 return ret;
812}
813
804static int init_card(struct IsdnCardState *cs) 814static int init_card(struct IsdnCardState *cs)
805{ 815{
806 int irq_cnt, cnt = 3, ret; 816 int irq_cnt, cnt = 3, ret;
@@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs)
809 ret = cs->cardmsg(cs, CARD_INIT, NULL); 819 ret = cs->cardmsg(cs, CARD_INIT, NULL);
810 return(ret); 820 return(ret);
811 } 821 }
812 irq_cnt = kstat_irqs(cs->irq); 822 irq_cnt = cs->irq_cnt = 0;
813 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], 823 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
814 cs->irq, irq_cnt); 824 cs->irq, irq_cnt);
815 if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { 825 if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
816 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", 826 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
817 cs->irq); 827 cs->irq);
818 return 1; 828 return 1;
@@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs)
822 /* Timeout 10ms */ 832 /* Timeout 10ms */
823 msleep(10); 833 msleep(10);
824 printk(KERN_INFO "%s: IRQ %d count %d\n", 834 printk(KERN_INFO "%s: IRQ %d count %d\n",
825 CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); 835 CardType[cs->typ], cs->irq, cs->irq_cnt);
826 if (kstat_irqs(cs->irq) == irq_cnt) { 836 if (cs->irq_cnt == irq_cnt) {
827 printk(KERN_WARNING 837 printk(KERN_WARNING
828 "%s: IRQ(%d) getting no interrupts during init %d\n", 838 "%s: IRQ(%d) getting no interrupts during init %d\n",
829 CardType[cs->typ], cs->irq, 4 - cnt); 839 CardType[cs->typ], cs->irq, 4 - cnt);
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 832a87855ffb..32ab3924aa73 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -959,6 +959,7 @@ struct IsdnCardState {
959 u_long event; 959 u_long event;
960 struct work_struct tqueue; 960 struct work_struct tqueue;
961 struct timer_list dbusytimer; 961 struct timer_list dbusytimer;
962 unsigned int irq_cnt;
962#ifdef ERROR_STATISTIC 963#ifdef ERROR_STATISTIC
963 int err_crc; 964 int err_crc;
964 int err_tx; 965 int err_tx;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 097f24d8bceb..b9fda7018cef 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -78,7 +78,7 @@ struct sih {
78 u8 irq_lines; /* number of supported irq lines */ 78 u8 irq_lines; /* number of supported irq lines */
79 79
80 /* SIR ignored -- set interrupt, for testing only */ 80 /* SIR ignored -- set interrupt, for testing only */
81 struct irq_data { 81 struct sih_irq_data {
82 u8 isr_offset; 82 u8 isr_offset;
83 u8 imr_offset; 83 u8 imr_offset;
84 } mask[2]; 84 } mask[2];
@@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
810 twl4030_irq_chip = dummy_irq_chip; 810 twl4030_irq_chip = dummy_irq_chip;
811 twl4030_irq_chip.name = "twl4030"; 811 twl4030_irq_chip.name = "twl4030";
812 812
813 twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; 813 twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
814 814
815 for (i = irq_base; i < irq_end; i++) { 815 for (i = irq_base; i < irq_end; i++) {
816 set_irq_chip_and_handler(i, &twl4030_irq_chip, 816 set_irq_chip_and_handler(i, &twl4030_irq_chip,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b50fedcef8ac..cb6e112989d8 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1610,6 +1610,8 @@ static void backend_changed(struct xenbus_device *dev,
1610 switch (backend_state) { 1610 switch (backend_state) {
1611 case XenbusStateInitialising: 1611 case XenbusStateInitialising:
1612 case XenbusStateInitialised: 1612 case XenbusStateInitialised:
1613 case XenbusStateReconfiguring:
1614 case XenbusStateReconfigured:
1613 case XenbusStateConnected: 1615 case XenbusStateConnected:
1614 case XenbusStateUnknown: 1616 case XenbusStateUnknown:
1615 case XenbusStateClosed: 1617 case XenbusStateClosed:
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 34ef70d562b2..5b1630e4e9e3 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -40,6 +40,27 @@ config PCI_STUB
40 40
41 When in doubt, say N. 41 When in doubt, say N.
42 42
43config XEN_PCIDEV_FRONTEND
44 tristate "Xen PCI Frontend"
45 depends on PCI && X86 && XEN
46 select HOTPLUG
47 select PCI_XEN
48 default y
49 help
50 The PCI device frontend driver allows the kernel to import arbitrary
51 PCI devices from a PCI backend to support PCI driver domains.
52
53config XEN_PCIDEV_FE_DEBUG
54 bool "Xen PCI Frontend debugging"
55 depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG
56 help
57 Say Y here if you want the Xen PCI frontend to produce a bunch of debug
58 messages to the system log. Select this if you are having a
59 problem with Xen PCI frontend support and want to see more of what is
60 going on.
61
62 When in doubt, say N.
63
43config HT_IRQ 64config HT_IRQ
44 bool "Interrupts on hypertransport devices" 65 bool "Interrupts on hypertransport devices"
45 default y 66 default y
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index dc1aa0922868..d5e27050c4e3 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -65,6 +65,8 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
65 65
66obj-$(CONFIG_PCI_STUB) += pci-stub.o 66obj-$(CONFIG_PCI_STUB) += pci-stub.o
67 67
68obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
69
68ifeq ($(CONFIG_PCI_DEBUG),y) 70ifeq ($(CONFIG_PCI_DEBUG),y)
69EXTRA_CFLAGS += -DDEBUG 71EXTRA_CFLAGS += -DDEBUG
70endif 72endif
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 7f0af0e9b826..69546e9213dd 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -299,6 +299,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
299 } 299 }
300 up_read(&pci_bus_sem); 300 up_read(&pci_bus_sem);
301} 301}
302EXPORT_SYMBOL_GPL(pci_walk_bus);
302 303
303EXPORT_SYMBOL(pci_bus_alloc_resource); 304EXPORT_SYMBOL(pci_bus_alloc_resource);
304EXPORT_SYMBOL_GPL(pci_bus_add_device); 305EXPORT_SYMBOL_GPL(pci_bus_add_device);
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 0a19708074c2..3de3a436a432 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -1221,9 +1221,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1221 } 1221 }
1222} 1222}
1223 1223
1224void dmar_msi_unmask(unsigned int irq) 1224void dmar_msi_unmask(struct irq_data *data)
1225{ 1225{
1226 struct intel_iommu *iommu = get_irq_data(irq); 1226 struct intel_iommu *iommu = irq_data_get_irq_data(data);
1227 unsigned long flag; 1227 unsigned long flag;
1228 1228
1229 /* unmask it */ 1229 /* unmask it */
@@ -1234,10 +1234,10 @@ void dmar_msi_unmask(unsigned int irq)
1234 spin_unlock_irqrestore(&iommu->register_lock, flag); 1234 spin_unlock_irqrestore(&iommu->register_lock, flag);
1235} 1235}
1236 1236
1237void dmar_msi_mask(unsigned int irq) 1237void dmar_msi_mask(struct irq_data *data)
1238{ 1238{
1239 unsigned long flag; 1239 unsigned long flag;
1240 struct intel_iommu *iommu = get_irq_data(irq); 1240 struct intel_iommu *iommu = irq_data_get_irq_data(data);
1241 1241
1242 /* mask it */ 1242 /* mask it */
1243 spin_lock_irqsave(&iommu->register_lock, flag); 1243 spin_lock_irqsave(&iommu->register_lock, flag);
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 98abf8b91294..834842aa5bbf 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
57 *msg = cfg->msg; 57 *msg = cfg->msg;
58} 58}
59 59
60void mask_ht_irq(unsigned int irq) 60void mask_ht_irq(struct irq_data *data)
61{ 61{
62 struct ht_irq_cfg *cfg; 62 struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
63 struct ht_irq_msg msg; 63 struct ht_irq_msg msg = cfg->msg;
64
65 cfg = get_irq_data(irq);
66 64
67 msg = cfg->msg;
68 msg.address_lo |= 1; 65 msg.address_lo |= 1;
69 write_ht_irq_msg(irq, &msg); 66 write_ht_irq_msg(data->irq, &msg);
70} 67}
71 68
72void unmask_ht_irq(unsigned int irq) 69void unmask_ht_irq(struct irq_data *data)
73{ 70{
74 struct ht_irq_cfg *cfg; 71 struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
75 struct ht_irq_msg msg; 72 struct ht_irq_msg msg = cfg->msg;
76
77 cfg = get_irq_data(irq);
78 73
79 msg = cfg->msg;
80 msg.address_lo &= ~1; 74 msg.address_lo &= ~1;
81 write_ht_irq_msg(irq, &msg); 75 write_ht_irq_msg(data->irq, &msg);
82} 76}
83 77
84/** 78/**
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index fd1d2867cdcc..ec87cd66f3eb 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -46,109 +46,24 @@ static __init int setup_intremap(char *str)
46} 46}
47early_param("intremap", setup_intremap); 47early_param("intremap", setup_intremap);
48 48
49struct irq_2_iommu {
50 struct intel_iommu *iommu;
51 u16 irte_index;
52 u16 sub_handle;
53 u8 irte_mask;
54};
55
56#ifdef CONFIG_GENERIC_HARDIRQS
57static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
58{
59 struct irq_2_iommu *iommu;
60
61 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
62 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
63
64 return iommu;
65}
66
67static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
68{
69 struct irq_desc *desc;
70
71 desc = irq_to_desc(irq);
72
73 if (WARN_ON_ONCE(!desc))
74 return NULL;
75
76 return desc->irq_2_iommu;
77}
78
79static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
80{
81 struct irq_desc *desc;
82 struct irq_2_iommu *irq_iommu;
83
84 desc = irq_to_desc(irq);
85 if (!desc) {
86 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
87 return NULL;
88 }
89
90 irq_iommu = desc->irq_2_iommu;
91
92 if (!irq_iommu)
93 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
94
95 return desc->irq_2_iommu;
96}
97
98#else /* !CONFIG_SPARSE_IRQ */
99
100static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
101
102static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
103{
104 if (irq < nr_irqs)
105 return &irq_2_iommuX[irq];
106
107 return NULL;
108}
109static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
110{
111 return irq_2_iommu(irq);
112}
113#endif
114
115static DEFINE_SPINLOCK(irq_2_ir_lock); 49static DEFINE_SPINLOCK(irq_2_ir_lock);
116 50
117static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 51static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
118{
119 struct irq_2_iommu *irq_iommu;
120
121 irq_iommu = irq_2_iommu(irq);
122
123 if (!irq_iommu)
124 return NULL;
125
126 if (!irq_iommu->iommu)
127 return NULL;
128
129 return irq_iommu;
130}
131
132int irq_remapped(int irq)
133{ 52{
134 return valid_irq_2_iommu(irq) != NULL; 53 struct irq_cfg *cfg = get_irq_chip_data(irq);
54 return cfg ? &cfg->irq_2_iommu : NULL;
135} 55}
136 56
137int get_irte(int irq, struct irte *entry) 57int get_irte(int irq, struct irte *entry)
138{ 58{
139 int index; 59 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
140 struct irq_2_iommu *irq_iommu;
141 unsigned long flags; 60 unsigned long flags;
61 int index;
142 62
143 if (!entry) 63 if (!entry || !irq_iommu)
144 return -1; 64 return -1;
145 65
146 spin_lock_irqsave(&irq_2_ir_lock, flags); 66 spin_lock_irqsave(&irq_2_ir_lock, flags);
147 irq_iommu = valid_irq_2_iommu(irq);
148 if (!irq_iommu) {
149 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
150 return -1;
151 }
152 67
153 index = irq_iommu->irte_index + irq_iommu->sub_handle; 68 index = irq_iommu->irte_index + irq_iommu->sub_handle;
154 *entry = *(irq_iommu->iommu->ir_table->base + index); 69 *entry = *(irq_iommu->iommu->ir_table->base + index);
@@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry)
160int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 75int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
161{ 76{
162 struct ir_table *table = iommu->ir_table; 77 struct ir_table *table = iommu->ir_table;
163 struct irq_2_iommu *irq_iommu; 78 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
164 u16 index, start_index; 79 u16 index, start_index;
165 unsigned int mask = 0; 80 unsigned int mask = 0;
166 unsigned long flags; 81 unsigned long flags;
167 int i; 82 int i;
168 83
169 if (!count) 84 if (!count || !irq_iommu)
170 return -1;
171
172#ifndef CONFIG_SPARSE_IRQ
173 /* protect irq_2_iommu_alloc later */
174 if (irq >= nr_irqs)
175 return -1; 85 return -1;
176#endif
177 86
178 /* 87 /*
179 * start the IRTE search from index 0. 88 * start the IRTE search from index 0.
@@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
214 for (i = index; i < index + count; i++) 123 for (i = index; i < index + count; i++)
215 table->base[i].present = 1; 124 table->base[i].present = 1;
216 125
217 irq_iommu = irq_2_iommu_alloc(irq);
218 if (!irq_iommu) {
219 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
220 printk(KERN_ERR "can't allocate irq_2_iommu\n");
221 return -1;
222 }
223
224 irq_iommu->iommu = iommu; 126 irq_iommu->iommu = iommu;
225 irq_iommu->irte_index = index; 127 irq_iommu->irte_index = index;
226 irq_iommu->sub_handle = 0; 128 irq_iommu->sub_handle = 0;
@@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
244 146
245int map_irq_to_irte_handle(int irq, u16 *sub_handle) 147int map_irq_to_irte_handle(int irq, u16 *sub_handle)
246{ 148{
247 int index; 149 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
248 struct irq_2_iommu *irq_iommu;
249 unsigned long flags; 150 unsigned long flags;
151 int index;
250 152
251 spin_lock_irqsave(&irq_2_ir_lock, flags); 153 if (!irq_iommu)
252 irq_iommu = valid_irq_2_iommu(irq);
253 if (!irq_iommu) {
254 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
255 return -1; 154 return -1;
256 }
257 155
156 spin_lock_irqsave(&irq_2_ir_lock, flags);
258 *sub_handle = irq_iommu->sub_handle; 157 *sub_handle = irq_iommu->sub_handle;
259 index = irq_iommu->irte_index; 158 index = irq_iommu->irte_index;
260 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 159 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
263 162
264int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 163int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
265{ 164{
266 struct irq_2_iommu *irq_iommu; 165 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
267 unsigned long flags; 166 unsigned long flags;
268 167
269 spin_lock_irqsave(&irq_2_ir_lock, flags); 168 if (!irq_iommu)
270
271 irq_iommu = irq_2_iommu_alloc(irq);
272
273 if (!irq_iommu) {
274 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
275 printk(KERN_ERR "can't allocate irq_2_iommu\n");
276 return -1; 169 return -1;
277 } 170
171 spin_lock_irqsave(&irq_2_ir_lock, flags);
278 172
279 irq_iommu->iommu = iommu; 173 irq_iommu->iommu = iommu;
280 irq_iommu->irte_index = index; 174 irq_iommu->irte_index = index;
@@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
286 return 0; 180 return 0;
287} 181}
288 182
289int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
290{
291 struct irq_2_iommu *irq_iommu;
292 unsigned long flags;
293
294 spin_lock_irqsave(&irq_2_ir_lock, flags);
295 irq_iommu = valid_irq_2_iommu(irq);
296 if (!irq_iommu) {
297 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
298 return -1;
299 }
300
301 irq_iommu->iommu = NULL;
302 irq_iommu->irte_index = 0;
303 irq_iommu->sub_handle = 0;
304 irq_2_iommu(irq)->irte_mask = 0;
305
306 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
307
308 return 0;
309}
310
311int modify_irte(int irq, struct irte *irte_modified) 183int modify_irte(int irq, struct irte *irte_modified)
312{ 184{
313 int rc; 185 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
314 int index;
315 struct irte *irte;
316 struct intel_iommu *iommu; 186 struct intel_iommu *iommu;
317 struct irq_2_iommu *irq_iommu;
318 unsigned long flags; 187 unsigned long flags;
188 struct irte *irte;
189 int rc, index;
319 190
320 spin_lock_irqsave(&irq_2_ir_lock, flags); 191 if (!irq_iommu)
321 irq_iommu = valid_irq_2_iommu(irq);
322 if (!irq_iommu) {
323 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
324 return -1; 192 return -1;
325 } 193
194 spin_lock_irqsave(&irq_2_ir_lock, flags);
326 195
327 iommu = irq_iommu->iommu; 196 iommu = irq_iommu->iommu;
328 197
@@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified)
339 return rc; 208 return rc;
340} 209}
341 210
342int flush_irte(int irq)
343{
344 int rc;
345 int index;
346 struct intel_iommu *iommu;
347 struct irq_2_iommu *irq_iommu;
348 unsigned long flags;
349
350 spin_lock_irqsave(&irq_2_ir_lock, flags);
351 irq_iommu = valid_irq_2_iommu(irq);
352 if (!irq_iommu) {
353 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
354 return -1;
355 }
356
357 iommu = irq_iommu->iommu;
358
359 index = irq_iommu->irte_index + irq_iommu->sub_handle;
360
361 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
362 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
363
364 return rc;
365}
366
367struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 211struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
368{ 212{
369 int i; 213 int i;
@@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
420 264
421int free_irte(int irq) 265int free_irte(int irq)
422{ 266{
423 int rc = 0; 267 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
424 struct irq_2_iommu *irq_iommu;
425 unsigned long flags; 268 unsigned long flags;
269 int rc;
426 270
427 spin_lock_irqsave(&irq_2_ir_lock, flags); 271 if (!irq_iommu)
428 irq_iommu = valid_irq_2_iommu(irq);
429 if (!irq_iommu) {
430 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
431 return -1; 272 return -1;
432 } 273
274 spin_lock_irqsave(&irq_2_ir_lock, flags);
433 275
434 rc = clear_entries(irq_iommu); 276 rc = clear_entries(irq_iommu);
435 277
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 69b7be33b3a2..7c24dcef2989 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -35,7 +35,12 @@ int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
35#endif 35#endif
36 36
37#ifndef arch_setup_msi_irqs 37#ifndef arch_setup_msi_irqs
38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 38# define arch_setup_msi_irqs default_setup_msi_irqs
39# define HAVE_DEFAULT_MSI_SETUP_IRQS
40#endif
41
42#ifdef HAVE_DEFAULT_MSI_SETUP_IRQS
43int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
39{ 44{
40 struct msi_desc *entry; 45 struct msi_desc *entry;
41 int ret; 46 int ret;
@@ -60,7 +65,12 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
60#endif 65#endif
61 66
62#ifndef arch_teardown_msi_irqs 67#ifndef arch_teardown_msi_irqs
63void arch_teardown_msi_irqs(struct pci_dev *dev) 68# define arch_teardown_msi_irqs default_teardown_msi_irqs
69# define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
70#endif
71
72#ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS
73void default_teardown_msi_irqs(struct pci_dev *dev)
64{ 74{
65 struct msi_desc *entry; 75 struct msi_desc *entry;
66 76
@@ -170,33 +180,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
170 desc->masked = __msix_mask_irq(desc, flag); 180 desc->masked = __msix_mask_irq(desc, flag);
171} 181}
172 182
173static void msi_set_mask_bit(unsigned irq, u32 flag) 183static void msi_set_mask_bit(struct irq_data *data, u32 flag)
174{ 184{
175 struct msi_desc *desc = get_irq_msi(irq); 185 struct msi_desc *desc = irq_data_get_msi(data);
176 186
177 if (desc->msi_attrib.is_msix) { 187 if (desc->msi_attrib.is_msix) {
178 msix_mask_irq(desc, flag); 188 msix_mask_irq(desc, flag);
179 readl(desc->mask_base); /* Flush write to device */ 189 readl(desc->mask_base); /* Flush write to device */
180 } else { 190 } else {
181 unsigned offset = irq - desc->dev->irq; 191 unsigned offset = data->irq - desc->dev->irq;
182 msi_mask_irq(desc, 1 << offset, flag << offset); 192 msi_mask_irq(desc, 1 << offset, flag << offset);
183 } 193 }
184} 194}
185 195
186void mask_msi_irq(unsigned int irq) 196void mask_msi_irq(struct irq_data *data)
187{ 197{
188 msi_set_mask_bit(irq, 1); 198 msi_set_mask_bit(data, 1);
189} 199}
190 200
191void unmask_msi_irq(unsigned int irq) 201void unmask_msi_irq(struct irq_data *data)
192{ 202{
193 msi_set_mask_bit(irq, 0); 203 msi_set_mask_bit(data, 0);
194} 204}
195 205
196void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 206void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
197{ 207{
198 struct msi_desc *entry = get_irq_desc_msi(desc);
199
200 BUG_ON(entry->dev->current_state != PCI_D0); 208 BUG_ON(entry->dev->current_state != PCI_D0);
201 209
202 if (entry->msi_attrib.is_msix) { 210 if (entry->msi_attrib.is_msix) {
@@ -227,15 +235,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
227 235
228void read_msi_msg(unsigned int irq, struct msi_msg *msg) 236void read_msi_msg(unsigned int irq, struct msi_msg *msg)
229{ 237{
230 struct irq_desc *desc = irq_to_desc(irq); 238 struct msi_desc *entry = get_irq_msi(irq);
231 239
232 read_msi_msg_desc(desc, msg); 240 __read_msi_msg(entry, msg);
233} 241}
234 242
235void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 243void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
236{ 244{
237 struct msi_desc *entry = get_irq_desc_msi(desc);
238
239 /* Assert that the cache is valid, assuming that 245 /* Assert that the cache is valid, assuming that
240 * valid messages are not all-zeroes. */ 246 * valid messages are not all-zeroes. */
241 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | 247 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
@@ -246,15 +252,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
246 252
247void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 253void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
248{ 254{
249 struct irq_desc *desc = irq_to_desc(irq); 255 struct msi_desc *entry = get_irq_msi(irq);
250 256
251 get_cached_msi_msg_desc(desc, msg); 257 __get_cached_msi_msg(entry, msg);
252} 258}
253 259
254void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 260void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
255{ 261{
256 struct msi_desc *entry = get_irq_desc_msi(desc);
257
258 if (entry->dev->current_state != PCI_D0) { 262 if (entry->dev->current_state != PCI_D0) {
259 /* Don't touch the hardware now */ 263 /* Don't touch the hardware now */
260 } else if (entry->msi_attrib.is_msix) { 264 } else if (entry->msi_attrib.is_msix) {
@@ -292,9 +296,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
292 296
293void write_msi_msg(unsigned int irq, struct msi_msg *msg) 297void write_msi_msg(unsigned int irq, struct msi_msg *msg)
294{ 298{
295 struct irq_desc *desc = irq_to_desc(irq); 299 struct msi_desc *entry = get_irq_msi(irq);
296 300
297 write_msi_msg_desc(desc, msg); 301 __write_msi_msg(entry, msg);
298} 302}
299 303
300static void free_msi_irqs(struct pci_dev *dev) 304static void free_msi_irqs(struct pci_dev *dev)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
new file mode 100644
index 000000000000..a87c4985326e
--- /dev/null
+++ b/drivers/pci/xen-pcifront.c
@@ -0,0 +1,1148 @@
1/*
2 * Xen PCI Frontend.
3 *
4 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
5 */
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <xen/xenbus.h>
10#include <xen/events.h>
11#include <xen/grant_table.h>
12#include <xen/page.h>
13#include <linux/spinlock.h>
14#include <linux/pci.h>
15#include <linux/msi.h>
16#include <xen/xenbus.h>
17#include <xen/interface/io/pciif.h>
18#include <asm/xen/pci.h>
19#include <linux/interrupt.h>
20#include <asm/atomic.h>
21#include <linux/workqueue.h>
22#include <linux/bitops.h>
23#include <linux/time.h>
24
25#define INVALID_GRANT_REF (0)
26#define INVALID_EVTCHN (-1)
27
28struct pci_bus_entry {
29 struct list_head list;
30 struct pci_bus *bus;
31};
32
33#define _PDEVB_op_active (0)
34#define PDEVB_op_active (1 << (_PDEVB_op_active))
35
36struct pcifront_device {
37 struct xenbus_device *xdev;
38 struct list_head root_buses;
39
40 int evtchn;
41 int gnt_ref;
42
43 int irq;
44
45 /* Lock this when doing any operations in sh_info */
46 spinlock_t sh_info_lock;
47 struct xen_pci_sharedinfo *sh_info;
48 struct work_struct op_work;
49 unsigned long flags;
50
51};
52
53struct pcifront_sd {
54 int domain;
55 struct pcifront_device *pdev;
56};
57
58static inline struct pcifront_device *
59pcifront_get_pdev(struct pcifront_sd *sd)
60{
61 return sd->pdev;
62}
63
64static inline void pcifront_init_sd(struct pcifront_sd *sd,
65 unsigned int domain, unsigned int bus,
66 struct pcifront_device *pdev)
67{
68 sd->domain = domain;
69 sd->pdev = pdev;
70}
71
72static DEFINE_SPINLOCK(pcifront_dev_lock);
73static struct pcifront_device *pcifront_dev;
74
75static int verbose_request;
76module_param(verbose_request, int, 0644);
77
78static int errno_to_pcibios_err(int errno)
79{
80 switch (errno) {
81 case XEN_PCI_ERR_success:
82 return PCIBIOS_SUCCESSFUL;
83
84 case XEN_PCI_ERR_dev_not_found:
85 return PCIBIOS_DEVICE_NOT_FOUND;
86
87 case XEN_PCI_ERR_invalid_offset:
88 case XEN_PCI_ERR_op_failed:
89 return PCIBIOS_BAD_REGISTER_NUMBER;
90
91 case XEN_PCI_ERR_not_implemented:
92 return PCIBIOS_FUNC_NOT_SUPPORTED;
93
94 case XEN_PCI_ERR_access_denied:
95 return PCIBIOS_SET_FAILED;
96 }
97 return errno;
98}
99
100static inline void schedule_pcifront_aer_op(struct pcifront_device *pdev)
101{
102 if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
103 && !test_and_set_bit(_PDEVB_op_active, &pdev->flags)) {
104 dev_dbg(&pdev->xdev->dev, "schedule aer frontend job\n");
105 schedule_work(&pdev->op_work);
106 }
107}
108
109static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
110{
111 int err = 0;
112 struct xen_pci_op *active_op = &pdev->sh_info->op;
113 unsigned long irq_flags;
114 evtchn_port_t port = pdev->evtchn;
115 unsigned irq = pdev->irq;
116 s64 ns, ns_timeout;
117 struct timeval tv;
118
119 spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
120
121 memcpy(active_op, op, sizeof(struct xen_pci_op));
122
123 /* Go */
124 wmb();
125 set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
126 notify_remote_via_evtchn(port);
127
128 /*
129 * We set a poll timeout of 3 seconds but give up on return after
130 * 2 seconds. It is better to time out too late rather than too early
131 * (in the latter case we end up continually re-executing poll() with a
132 * timeout in the past). 1s difference gives plenty of slack for error.
133 */
134 do_gettimeofday(&tv);
135 ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
136
137 xen_clear_irq_pending(irq);
138
139 while (test_bit(_XEN_PCIF_active,
140 (unsigned long *)&pdev->sh_info->flags)) {
141 xen_poll_irq_timeout(irq, jiffies + 3*HZ);
142 xen_clear_irq_pending(irq);
143 do_gettimeofday(&tv);
144 ns = timeval_to_ns(&tv);
145 if (ns > ns_timeout) {
146 dev_err(&pdev->xdev->dev,
147 "pciback not responding!!!\n");
148 clear_bit(_XEN_PCIF_active,
149 (unsigned long *)&pdev->sh_info->flags);
150 err = XEN_PCI_ERR_dev_not_found;
151 goto out;
152 }
153 }
154
155 /*
156 * We might lose backend service request since we
157 * reuse same evtchn with pci_conf backend response. So re-schedule
158 * aer pcifront service.
159 */
160 if (test_bit(_XEN_PCIB_active,
161 (unsigned long *)&pdev->sh_info->flags)) {
162 dev_err(&pdev->xdev->dev,
163 "schedule aer pcifront service\n");
164 schedule_pcifront_aer_op(pdev);
165 }
166
167 memcpy(op, active_op, sizeof(struct xen_pci_op));
168
169 err = op->err;
170out:
171 spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
172 return err;
173}
174
175/* Access to this function is spinlocked in drivers/pci/access.c */
176static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
177 int where, int size, u32 *val)
178{
179 int err = 0;
180 struct xen_pci_op op = {
181 .cmd = XEN_PCI_OP_conf_read,
182 .domain = pci_domain_nr(bus),
183 .bus = bus->number,
184 .devfn = devfn,
185 .offset = where,
186 .size = size,
187 };
188 struct pcifront_sd *sd = bus->sysdata;
189 struct pcifront_device *pdev = pcifront_get_pdev(sd);
190
191 if (verbose_request)
192 dev_info(&pdev->xdev->dev,
193 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
194 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
195 PCI_FUNC(devfn), where, size);
196
197 err = do_pci_op(pdev, &op);
198
199 if (likely(!err)) {
200 if (verbose_request)
201 dev_info(&pdev->xdev->dev, "read got back value %x\n",
202 op.value);
203
204 *val = op.value;
205 } else if (err == -ENODEV) {
206 /* No device here, pretend that it just returned 0 */
207 err = 0;
208 *val = 0;
209 }
210
211 return errno_to_pcibios_err(err);
212}
213
214/* Access to this function is spinlocked in drivers/pci/access.c */
215static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
216 int where, int size, u32 val)
217{
218 struct xen_pci_op op = {
219 .cmd = XEN_PCI_OP_conf_write,
220 .domain = pci_domain_nr(bus),
221 .bus = bus->number,
222 .devfn = devfn,
223 .offset = where,
224 .size = size,
225 .value = val,
226 };
227 struct pcifront_sd *sd = bus->sysdata;
228 struct pcifront_device *pdev = pcifront_get_pdev(sd);
229
230 if (verbose_request)
231 dev_info(&pdev->xdev->dev,
232 "write dev=%04x:%02x:%02x.%01x - "
233 "offset %x size %d val %x\n",
234 pci_domain_nr(bus), bus->number,
235 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
236
237 return errno_to_pcibios_err(do_pci_op(pdev, &op));
238}
239
240struct pci_ops pcifront_bus_ops = {
241 .read = pcifront_bus_read,
242 .write = pcifront_bus_write,
243};
244
245#ifdef CONFIG_PCI_MSI
246static int pci_frontend_enable_msix(struct pci_dev *dev,
247 int **vector, int nvec)
248{
249 int err;
250 int i;
251 struct xen_pci_op op = {
252 .cmd = XEN_PCI_OP_enable_msix,
253 .domain = pci_domain_nr(dev->bus),
254 .bus = dev->bus->number,
255 .devfn = dev->devfn,
256 .value = nvec,
257 };
258 struct pcifront_sd *sd = dev->bus->sysdata;
259 struct pcifront_device *pdev = pcifront_get_pdev(sd);
260 struct msi_desc *entry;
261
262 if (nvec > SH_INFO_MAX_VEC) {
263 dev_err(&dev->dev, "too much vector for pci frontend: %x."
264 " Increase SH_INFO_MAX_VEC.\n", nvec);
265 return -EINVAL;
266 }
267
268 i = 0;
269 list_for_each_entry(entry, &dev->msi_list, list) {
270 op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
271 /* Vector is useless at this point. */
272 op.msix_entries[i].vector = -1;
273 i++;
274 }
275
276 err = do_pci_op(pdev, &op);
277
278 if (likely(!err)) {
279 if (likely(!op.value)) {
280 /* we get the result */
281 for (i = 0; i < nvec; i++)
282 *(*vector+i) = op.msix_entries[i].vector;
283 return 0;
284 } else {
285 printk(KERN_DEBUG "enable msix get value %x\n",
286 op.value);
287 return op.value;
288 }
289 } else {
290 dev_err(&dev->dev, "enable msix get err %x\n", err);
291 return err;
292 }
293}
294
295static void pci_frontend_disable_msix(struct pci_dev *dev)
296{
297 int err;
298 struct xen_pci_op op = {
299 .cmd = XEN_PCI_OP_disable_msix,
300 .domain = pci_domain_nr(dev->bus),
301 .bus = dev->bus->number,
302 .devfn = dev->devfn,
303 };
304 struct pcifront_sd *sd = dev->bus->sysdata;
305 struct pcifront_device *pdev = pcifront_get_pdev(sd);
306
307 err = do_pci_op(pdev, &op);
308
309 /* What should do for error ? */
310 if (err)
311 dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
312}
313
314static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
315{
316 int err;
317 struct xen_pci_op op = {
318 .cmd = XEN_PCI_OP_enable_msi,
319 .domain = pci_domain_nr(dev->bus),
320 .bus = dev->bus->number,
321 .devfn = dev->devfn,
322 };
323 struct pcifront_sd *sd = dev->bus->sysdata;
324 struct pcifront_device *pdev = pcifront_get_pdev(sd);
325
326 err = do_pci_op(pdev, &op);
327 if (likely(!err)) {
328 *(*vector) = op.value;
329 } else {
330 dev_err(&dev->dev, "pci frontend enable msi failed for dev "
331 "%x:%x\n", op.bus, op.devfn);
332 err = -EINVAL;
333 }
334 return err;
335}
336
337static void pci_frontend_disable_msi(struct pci_dev *dev)
338{
339 int err;
340 struct xen_pci_op op = {
341 .cmd = XEN_PCI_OP_disable_msi,
342 .domain = pci_domain_nr(dev->bus),
343 .bus = dev->bus->number,
344 .devfn = dev->devfn,
345 };
346 struct pcifront_sd *sd = dev->bus->sysdata;
347 struct pcifront_device *pdev = pcifront_get_pdev(sd);
348
349 err = do_pci_op(pdev, &op);
350 if (err == XEN_PCI_ERR_dev_not_found) {
351 /* XXX No response from backend, what shall we do? */
352 printk(KERN_DEBUG "get no response from backend for disable MSI\n");
353 return;
354 }
355 if (err)
356 /* how can pciback notify us fail? */
357 printk(KERN_DEBUG "get fake response frombackend\n");
358}
359
360static struct xen_pci_frontend_ops pci_frontend_ops = {
361 .enable_msi = pci_frontend_enable_msi,
362 .disable_msi = pci_frontend_disable_msi,
363 .enable_msix = pci_frontend_enable_msix,
364 .disable_msix = pci_frontend_disable_msix,
365};
366
367static void pci_frontend_registrar(int enable)
368{
369 if (enable)
370 xen_pci_frontend = &pci_frontend_ops;
371 else
372 xen_pci_frontend = NULL;
373};
374#else
375static inline void pci_frontend_registrar(int enable) { };
376#endif /* CONFIG_PCI_MSI */
377
378/* Claim resources for the PCI frontend as-is, backend won't allow changes */
379static int pcifront_claim_resource(struct pci_dev *dev, void *data)
380{
381 struct pcifront_device *pdev = data;
382 int i;
383 struct resource *r;
384
385 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
386 r = &dev->resource[i];
387
388 if (!r->parent && r->start && r->flags) {
389 dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
390 pci_name(dev), i);
391 if (pci_claim_resource(dev, i)) {
392 dev_err(&pdev->xdev->dev, "Could not claim "
393 "resource %s/%d! Device offline. Try "
394 "giving less than 4GB to domain.\n",
395 pci_name(dev), i);
396 }
397 }
398 }
399
400 return 0;
401}
402
403static int __devinit pcifront_scan_bus(struct pcifront_device *pdev,
404 unsigned int domain, unsigned int bus,
405 struct pci_bus *b)
406{
407 struct pci_dev *d;
408 unsigned int devfn;
409
410 /* Scan the bus for functions and add.
411 * We omit handling of PCI bridge attachment because pciback prevents
412 * bridges from being exported.
413 */
414 for (devfn = 0; devfn < 0x100; devfn++) {
415 d = pci_get_slot(b, devfn);
416 if (d) {
417 /* Device is already known. */
418 pci_dev_put(d);
419 continue;
420 }
421
422 d = pci_scan_single_device(b, devfn);
423 if (d)
424 dev_info(&pdev->xdev->dev, "New device on "
425 "%04x:%02x:%02x.%02x found.\n", domain, bus,
426 PCI_SLOT(devfn), PCI_FUNC(devfn));
427 }
428
429 return 0;
430}
431
432static int __devinit pcifront_scan_root(struct pcifront_device *pdev,
433 unsigned int domain, unsigned int bus)
434{
435 struct pci_bus *b;
436 struct pcifront_sd *sd = NULL;
437 struct pci_bus_entry *bus_entry = NULL;
438 int err = 0;
439
440#ifndef CONFIG_PCI_DOMAINS
441 if (domain != 0) {
442 dev_err(&pdev->xdev->dev,
443 "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
444 dev_err(&pdev->xdev->dev,
445 "Please compile with CONFIG_PCI_DOMAINS\n");
446 err = -EINVAL;
447 goto err_out;
448 }
449#endif
450
451 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
452 domain, bus);
453
454 bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
455 sd = kmalloc(sizeof(*sd), GFP_KERNEL);
456 if (!bus_entry || !sd) {
457 err = -ENOMEM;
458 goto err_out;
459 }
460 pcifront_init_sd(sd, domain, bus, pdev);
461
462 b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
463 &pcifront_bus_ops, sd);
464 if (!b) {
465 dev_err(&pdev->xdev->dev,
466 "Error creating PCI Frontend Bus!\n");
467 err = -ENOMEM;
468 goto err_out;
469 }
470
471 bus_entry->bus = b;
472
473 list_add(&bus_entry->list, &pdev->root_buses);
474
475 /* pci_scan_bus_parented skips devices which do not have a have
476 * devfn==0. The pcifront_scan_bus enumerates all devfn. */
477 err = pcifront_scan_bus(pdev, domain, bus, b);
478
479 /* Claim resources before going "live" with our devices */
480 pci_walk_bus(b, pcifront_claim_resource, pdev);
481
482 /* Create SysFS and notify udev of the devices. Aka: "going live" */
483 pci_bus_add_devices(b);
484
485 return err;
486
487err_out:
488 kfree(bus_entry);
489 kfree(sd);
490
491 return err;
492}
493
494static int __devinit pcifront_rescan_root(struct pcifront_device *pdev,
495 unsigned int domain, unsigned int bus)
496{
497 int err;
498 struct pci_bus *b;
499
500#ifndef CONFIG_PCI_DOMAINS
501 if (domain != 0) {
502 dev_err(&pdev->xdev->dev,
503 "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
504 dev_err(&pdev->xdev->dev,
505 "Please compile with CONFIG_PCI_DOMAINS\n");
506 return -EINVAL;
507 }
508#endif
509
510 dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
511 domain, bus);
512
513 b = pci_find_bus(domain, bus);
514 if (!b)
515 /* If the bus is unknown, create it. */
516 return pcifront_scan_root(pdev, domain, bus);
517
518 err = pcifront_scan_bus(pdev, domain, bus, b);
519
520 /* Claim resources before going "live" with our devices */
521 pci_walk_bus(b, pcifront_claim_resource, pdev);
522
523 /* Create SysFS and notify udev of the devices. Aka: "going live" */
524 pci_bus_add_devices(b);
525
526 return err;
527}
528
529static void free_root_bus_devs(struct pci_bus *bus)
530{
531 struct pci_dev *dev;
532
533 while (!list_empty(&bus->devices)) {
534 dev = container_of(bus->devices.next, struct pci_dev,
535 bus_list);
536 dev_dbg(&dev->dev, "removing device\n");
537 pci_remove_bus_device(dev);
538 }
539}
540
541static void pcifront_free_roots(struct pcifront_device *pdev)
542{
543 struct pci_bus_entry *bus_entry, *t;
544
545 dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
546
547 list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
548 list_del(&bus_entry->list);
549
550 free_root_bus_devs(bus_entry->bus);
551
552 kfree(bus_entry->bus->sysdata);
553
554 device_unregister(bus_entry->bus->bridge);
555 pci_remove_bus(bus_entry->bus);
556
557 kfree(bus_entry);
558 }
559}
560
561static pci_ers_result_t pcifront_common_process(int cmd,
562 struct pcifront_device *pdev,
563 pci_channel_state_t state)
564{
565 pci_ers_result_t result;
566 struct pci_driver *pdrv;
567 int bus = pdev->sh_info->aer_op.bus;
568 int devfn = pdev->sh_info->aer_op.devfn;
569 struct pci_dev *pcidev;
570 int flag = 0;
571
572 dev_dbg(&pdev->xdev->dev,
573 "pcifront AER process: cmd %x (bus:%x, devfn%x)",
574 cmd, bus, devfn);
575 result = PCI_ERS_RESULT_NONE;
576
577 pcidev = pci_get_bus_and_slot(bus, devfn);
578 if (!pcidev || !pcidev->driver) {
579 dev_err(&pcidev->dev,
580 "device or driver is NULL\n");
581 return result;
582 }
583 pdrv = pcidev->driver;
584
585 if (get_driver(&pdrv->driver)) {
586 if (pdrv->err_handler && pdrv->err_handler->error_detected) {
587 dev_dbg(&pcidev->dev,
588 "trying to call AER service\n");
589 if (pcidev) {
590 flag = 1;
591 switch (cmd) {
592 case XEN_PCI_OP_aer_detected:
593 result = pdrv->err_handler->
594 error_detected(pcidev, state);
595 break;
596 case XEN_PCI_OP_aer_mmio:
597 result = pdrv->err_handler->
598 mmio_enabled(pcidev);
599 break;
600 case XEN_PCI_OP_aer_slotreset:
601 result = pdrv->err_handler->
602 slot_reset(pcidev);
603 break;
604 case XEN_PCI_OP_aer_resume:
605 pdrv->err_handler->resume(pcidev);
606 break;
607 default:
608 dev_err(&pdev->xdev->dev,
609 "bad request in aer recovery "
610 "operation!\n");
611
612 }
613 }
614 }
615 put_driver(&pdrv->driver);
616 }
617 if (!flag)
618 result = PCI_ERS_RESULT_NONE;
619
620 return result;
621}
622
623
624static void pcifront_do_aer(struct work_struct *data)
625{
626 struct pcifront_device *pdev =
627 container_of(data, struct pcifront_device, op_work);
628 int cmd = pdev->sh_info->aer_op.cmd;
629 pci_channel_state_t state =
630 (pci_channel_state_t)pdev->sh_info->aer_op.err;
631
632 /*If a pci_conf op is in progress,
633 we have to wait until it is done before service aer op*/
634 dev_dbg(&pdev->xdev->dev,
635 "pcifront service aer bus %x devfn %x\n",
636 pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn);
637
638 pdev->sh_info->aer_op.err = pcifront_common_process(cmd, pdev, state);
639
640 /* Post the operation to the guest. */
641 wmb();
642 clear_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags);
643 notify_remote_via_evtchn(pdev->evtchn);
644
645 /*in case of we lost an aer request in four lines time_window*/
646 smp_mb__before_clear_bit();
647 clear_bit(_PDEVB_op_active, &pdev->flags);
648 smp_mb__after_clear_bit();
649
650 schedule_pcifront_aer_op(pdev);
651
652}
653
654static irqreturn_t pcifront_handler_aer(int irq, void *dev)
655{
656 struct pcifront_device *pdev = dev;
657 schedule_pcifront_aer_op(pdev);
658 return IRQ_HANDLED;
659}
660static int pcifront_connect(struct pcifront_device *pdev)
661{
662 int err = 0;
663
664 spin_lock(&pcifront_dev_lock);
665
666 if (!pcifront_dev) {
667 dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
668 pcifront_dev = pdev;
669 } else {
670 dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
671 err = -EEXIST;
672 }
673
674 spin_unlock(&pcifront_dev_lock);
675
676 return err;
677}
678
679static void pcifront_disconnect(struct pcifront_device *pdev)
680{
681 spin_lock(&pcifront_dev_lock);
682
683 if (pdev == pcifront_dev) {
684 dev_info(&pdev->xdev->dev,
685 "Disconnecting PCI Frontend Buses\n");
686 pcifront_dev = NULL;
687 }
688
689 spin_unlock(&pcifront_dev_lock);
690}
691static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
692{
693 struct pcifront_device *pdev;
694
695 pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL);
696 if (pdev == NULL)
697 goto out;
698
699 pdev->sh_info =
700 (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
701 if (pdev->sh_info == NULL) {
702 kfree(pdev);
703 pdev = NULL;
704 goto out;
705 }
706 pdev->sh_info->flags = 0;
707
708 /*Flag for registering PV AER handler*/
709 set_bit(_XEN_PCIB_AERHANDLER, (void *)&pdev->sh_info->flags);
710
711 dev_set_drvdata(&xdev->dev, pdev);
712 pdev->xdev = xdev;
713
714 INIT_LIST_HEAD(&pdev->root_buses);
715
716 spin_lock_init(&pdev->sh_info_lock);
717
718 pdev->evtchn = INVALID_EVTCHN;
719 pdev->gnt_ref = INVALID_GRANT_REF;
720 pdev->irq = -1;
721
722 INIT_WORK(&pdev->op_work, pcifront_do_aer);
723
724 dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
725 pdev, pdev->sh_info);
726out:
727 return pdev;
728}
729
730static void free_pdev(struct pcifront_device *pdev)
731{
732 dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
733
734 pcifront_free_roots(pdev);
735
736 /*For PCIE_AER error handling job*/
737 flush_scheduled_work();
738
739 if (pdev->irq >= 0)
740 unbind_from_irqhandler(pdev->irq, pdev);
741
742 if (pdev->evtchn != INVALID_EVTCHN)
743 xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
744
745 if (pdev->gnt_ref != INVALID_GRANT_REF)
746 gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */,
747 (unsigned long)pdev->sh_info);
748 else
749 free_page((unsigned long)pdev->sh_info);
750
751 dev_set_drvdata(&pdev->xdev->dev, NULL);
752
753 kfree(pdev);
754}
755
756static int pcifront_publish_info(struct pcifront_device *pdev)
757{
758 int err = 0;
759 struct xenbus_transaction trans;
760
761 err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
762 if (err < 0)
763 goto out;
764
765 pdev->gnt_ref = err;
766
767 err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
768 if (err)
769 goto out;
770
771 err = bind_evtchn_to_irqhandler(pdev->evtchn, pcifront_handler_aer,
772 0, "pcifront", pdev);
773
774 if (err < 0)
775 return err;
776
777 pdev->irq = err;
778
779do_publish:
780 err = xenbus_transaction_start(&trans);
781 if (err) {
782 xenbus_dev_fatal(pdev->xdev, err,
783 "Error writing configuration for backend "
784 "(start transaction)");
785 goto out;
786 }
787
788 err = xenbus_printf(trans, pdev->xdev->nodename,
789 "pci-op-ref", "%u", pdev->gnt_ref);
790 if (!err)
791 err = xenbus_printf(trans, pdev->xdev->nodename,
792 "event-channel", "%u", pdev->evtchn);
793 if (!err)
794 err = xenbus_printf(trans, pdev->xdev->nodename,
795 "magic", XEN_PCI_MAGIC);
796
797 if (err) {
798 xenbus_transaction_end(trans, 1);
799 xenbus_dev_fatal(pdev->xdev, err,
800 "Error writing configuration for backend");
801 goto out;
802 } else {
803 err = xenbus_transaction_end(trans, 0);
804 if (err == -EAGAIN)
805 goto do_publish;
806 else if (err) {
807 xenbus_dev_fatal(pdev->xdev, err,
808 "Error completing transaction "
809 "for backend");
810 goto out;
811 }
812 }
813
814 xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
815
816 dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
817
818out:
819 return err;
820}
821
822static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
823{
824 int err = -EFAULT;
825 int i, num_roots, len;
826 char str[64];
827 unsigned int domain, bus;
828
829
830 /* Only connect once */
831 if (xenbus_read_driver_state(pdev->xdev->nodename) !=
832 XenbusStateInitialised)
833 goto out;
834
835 err = pcifront_connect(pdev);
836 if (err) {
837 xenbus_dev_fatal(pdev->xdev, err,
838 "Error connecting PCI Frontend");
839 goto out;
840 }
841
842 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
843 "root_num", "%d", &num_roots);
844 if (err == -ENOENT) {
845 xenbus_dev_error(pdev->xdev, err,
846 "No PCI Roots found, trying 0000:00");
847 err = pcifront_scan_root(pdev, 0, 0);
848 num_roots = 0;
849 } else if (err != 1) {
850 if (err == 0)
851 err = -EINVAL;
852 xenbus_dev_fatal(pdev->xdev, err,
853 "Error reading number of PCI roots");
854 goto out;
855 }
856
857 for (i = 0; i < num_roots; i++) {
858 len = snprintf(str, sizeof(str), "root-%d", i);
859 if (unlikely(len >= (sizeof(str) - 1))) {
860 err = -ENOMEM;
861 goto out;
862 }
863
864 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
865 "%x:%x", &domain, &bus);
866 if (err != 2) {
867 if (err >= 0)
868 err = -EINVAL;
869 xenbus_dev_fatal(pdev->xdev, err,
870 "Error reading PCI root %d", i);
871 goto out;
872 }
873
874 err = pcifront_scan_root(pdev, domain, bus);
875 if (err) {
876 xenbus_dev_fatal(pdev->xdev, err,
877 "Error scanning PCI root %04x:%02x",
878 domain, bus);
879 goto out;
880 }
881 }
882
883 err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
884
885out:
886 return err;
887}
888
889static int pcifront_try_disconnect(struct pcifront_device *pdev)
890{
891 int err = 0;
892 enum xenbus_state prev_state;
893
894
895 prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
896
897 if (prev_state >= XenbusStateClosing)
898 goto out;
899
900 if (prev_state == XenbusStateConnected) {
901 pcifront_free_roots(pdev);
902 pcifront_disconnect(pdev);
903 }
904
905 err = xenbus_switch_state(pdev->xdev, XenbusStateClosed);
906
907out:
908
909 return err;
910}
911
912static int __devinit pcifront_attach_devices(struct pcifront_device *pdev)
913{
914 int err = -EFAULT;
915 int i, num_roots, len;
916 unsigned int domain, bus;
917 char str[64];
918
919 if (xenbus_read_driver_state(pdev->xdev->nodename) !=
920 XenbusStateReconfiguring)
921 goto out;
922
923 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
924 "root_num", "%d", &num_roots);
925 if (err == -ENOENT) {
926 xenbus_dev_error(pdev->xdev, err,
927 "No PCI Roots found, trying 0000:00");
928 err = pcifront_rescan_root(pdev, 0, 0);
929 num_roots = 0;
930 } else if (err != 1) {
931 if (err == 0)
932 err = -EINVAL;
933 xenbus_dev_fatal(pdev->xdev, err,
934 "Error reading number of PCI roots");
935 goto out;
936 }
937
938 for (i = 0; i < num_roots; i++) {
939 len = snprintf(str, sizeof(str), "root-%d", i);
940 if (unlikely(len >= (sizeof(str) - 1))) {
941 err = -ENOMEM;
942 goto out;
943 }
944
945 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
946 "%x:%x", &domain, &bus);
947 if (err != 2) {
948 if (err >= 0)
949 err = -EINVAL;
950 xenbus_dev_fatal(pdev->xdev, err,
951 "Error reading PCI root %d", i);
952 goto out;
953 }
954
955 err = pcifront_rescan_root(pdev, domain, bus);
956 if (err) {
957 xenbus_dev_fatal(pdev->xdev, err,
958 "Error scanning PCI root %04x:%02x",
959 domain, bus);
960 goto out;
961 }
962 }
963
964 xenbus_switch_state(pdev->xdev, XenbusStateConnected);
965
966out:
967 return err;
968}
969
970static int pcifront_detach_devices(struct pcifront_device *pdev)
971{
972 int err = 0;
973 int i, num_devs;
974 unsigned int domain, bus, slot, func;
975 struct pci_bus *pci_bus;
976 struct pci_dev *pci_dev;
977 char str[64];
978
979 if (xenbus_read_driver_state(pdev->xdev->nodename) !=
980 XenbusStateConnected)
981 goto out;
982
983 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
984 &num_devs);
985 if (err != 1) {
986 if (err >= 0)
987 err = -EINVAL;
988 xenbus_dev_fatal(pdev->xdev, err,
989 "Error reading number of PCI devices");
990 goto out;
991 }
992
993 /* Find devices being detached and remove them. */
994 for (i = 0; i < num_devs; i++) {
995 int l, state;
996 l = snprintf(str, sizeof(str), "state-%d", i);
997 if (unlikely(l >= (sizeof(str) - 1))) {
998 err = -ENOMEM;
999 goto out;
1000 }
1001 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
1002 &state);
1003 if (err != 1)
1004 state = XenbusStateUnknown;
1005
1006 if (state != XenbusStateClosing)
1007 continue;
1008
1009 /* Remove device. */
1010 l = snprintf(str, sizeof(str), "vdev-%d", i);
1011 if (unlikely(l >= (sizeof(str) - 1))) {
1012 err = -ENOMEM;
1013 goto out;
1014 }
1015 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
1016 "%x:%x:%x.%x", &domain, &bus, &slot, &func);
1017 if (err != 4) {
1018 if (err >= 0)
1019 err = -EINVAL;
1020 xenbus_dev_fatal(pdev->xdev, err,
1021 "Error reading PCI device %d", i);
1022 goto out;
1023 }
1024
1025 pci_bus = pci_find_bus(domain, bus);
1026 if (!pci_bus) {
1027 dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n",
1028 domain, bus);
1029 continue;
1030 }
1031 pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
1032 if (!pci_dev) {
1033 dev_dbg(&pdev->xdev->dev,
1034 "Cannot get PCI device %04x:%02x:%02x.%02x\n",
1035 domain, bus, slot, func);
1036 continue;
1037 }
1038 pci_remove_bus_device(pci_dev);
1039 pci_dev_put(pci_dev);
1040
1041 dev_dbg(&pdev->xdev->dev,
1042 "PCI device %04x:%02x:%02x.%02x removed.\n",
1043 domain, bus, slot, func);
1044 }
1045
1046 err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
1047
1048out:
1049 return err;
1050}
1051
1052static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
1053 enum xenbus_state be_state)
1054{
1055 struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
1056
1057 switch (be_state) {
1058 case XenbusStateUnknown:
1059 case XenbusStateInitialising:
1060 case XenbusStateInitWait:
1061 case XenbusStateInitialised:
1062 case XenbusStateClosed:
1063 break;
1064
1065 case XenbusStateConnected:
1066 pcifront_try_connect(pdev);
1067 break;
1068
1069 case XenbusStateClosing:
1070 dev_warn(&xdev->dev, "backend going away!\n");
1071 pcifront_try_disconnect(pdev);
1072 break;
1073
1074 case XenbusStateReconfiguring:
1075 pcifront_detach_devices(pdev);
1076 break;
1077
1078 case XenbusStateReconfigured:
1079 pcifront_attach_devices(pdev);
1080 break;
1081 }
1082}
1083
1084static int pcifront_xenbus_probe(struct xenbus_device *xdev,
1085 const struct xenbus_device_id *id)
1086{
1087 int err = 0;
1088 struct pcifront_device *pdev = alloc_pdev(xdev);
1089
1090 if (pdev == NULL) {
1091 err = -ENOMEM;
1092 xenbus_dev_fatal(xdev, err,
1093 "Error allocating pcifront_device struct");
1094 goto out;
1095 }
1096
1097 err = pcifront_publish_info(pdev);
1098 if (err)
1099 free_pdev(pdev);
1100
1101out:
1102 return err;
1103}
1104
1105static int pcifront_xenbus_remove(struct xenbus_device *xdev)
1106{
1107 struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
1108 if (pdev)
1109 free_pdev(pdev);
1110
1111 return 0;
1112}
1113
1114static const struct xenbus_device_id xenpci_ids[] = {
1115 {"pci"},
1116 {""},
1117};
1118
1119static struct xenbus_driver xenbus_pcifront_driver = {
1120 .name = "pcifront",
1121 .owner = THIS_MODULE,
1122 .ids = xenpci_ids,
1123 .probe = pcifront_xenbus_probe,
1124 .remove = pcifront_xenbus_remove,
1125 .otherend_changed = pcifront_backend_changed,
1126};
1127
1128static int __init pcifront_init(void)
1129{
1130 if (!xen_pv_domain() || xen_initial_domain())
1131 return -ENODEV;
1132
1133 pci_frontend_registrar(1 /* enable */);
1134
1135 return xenbus_register_frontend(&xenbus_pcifront_driver);
1136}
1137
1138static void __exit pcifront_cleanup(void)
1139{
1140 xenbus_unregister_driver(&xenbus_pcifront_driver);
1141 pci_frontend_registrar(0 /* disable */);
1142}
1143module_init(pcifront_init);
1144module_exit(pcifront_cleanup);
1145
1146MODULE_DESCRIPTION("Xen PCI passthrough frontend.");
1147MODULE_LICENSE("GPL");
1148MODULE_ALIAS("xen:pci");
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 7c7f42a12796..428d273be727 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -631,6 +631,8 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
631 switch (backend_state) { 631 switch (backend_state) {
632 case XenbusStateInitialising: 632 case XenbusStateInitialising:
633 case XenbusStateInitialised: 633 case XenbusStateInitialised:
634 case XenbusStateReconfiguring:
635 case XenbusStateReconfigured:
634 case XenbusStateUnknown: 636 case XenbusStateUnknown:
635 case XenbusStateClosed: 637 case XenbusStateClosed:
636 break; 638 break;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 60d71e9abe9f..6e6180ccd726 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -74,6 +74,7 @@ config XEN_PLATFORM_PCI
74 74
75config SWIOTLB_XEN 75config SWIOTLB_XEN
76 def_bool y 76 def_bool y
77 depends on SWIOTLB 77 depends on PCI
78 select SWIOTLB
78 79
79endmenu 80endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index fcaf838f54be..b97864551718 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,6 +4,7 @@ obj-y += xenbus/
4nostackp := $(call cc-option, -fno-stack-protector) 4nostackp := $(call cc-option, -fno-stack-protector)
5CFLAGS_features.o := $(nostackp) 5CFLAGS_features.o := $(nostackp)
6 6
7obj-$(CONFIG_BLOCK) += biomerge.o
7obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
8obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
9obj-$(CONFIG_XEN_BALLOON) += balloon.o 10obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
new file mode 100644
index 000000000000..ba6eda4b5143
--- /dev/null
+++ b/drivers/xen/biomerge.c
@@ -0,0 +1,13 @@
1#include <linux/bio.h>
2#include <linux/io.h>
3#include <xen/page.h>
4
5bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
6 const struct bio_vec *vec2)
7{
8 unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page));
9 unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page));
10
11 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
12 ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
13}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 13365ba35218..3df53de6b43a 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -16,7 +16,7 @@
16 * (typically dom0). 16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events. 17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs. 18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present. 19 * 4. PIRQs - Hardware interrupts.
20 * 20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */ 22 */
@@ -28,11 +28,13 @@
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/bootmem.h> 29#include <linux/bootmem.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/irqnr.h>
31 32
32#include <asm/desc.h> 33#include <asm/desc.h>
33#include <asm/ptrace.h> 34#include <asm/ptrace.h>
34#include <asm/irq.h> 35#include <asm/irq.h>
35#include <asm/idle.h> 36#include <asm/idle.h>
37#include <asm/io_apic.h>
36#include <asm/sync_bitops.h> 38#include <asm/sync_bitops.h>
37#include <asm/xen/hypercall.h> 39#include <asm/xen/hypercall.h>
38#include <asm/xen/hypervisor.h> 40#include <asm/xen/hypervisor.h>
@@ -89,20 +91,26 @@ struct irq_info
89 enum ipi_vector ipi; 91 enum ipi_vector ipi;
90 struct { 92 struct {
91 unsigned short gsi; 93 unsigned short gsi;
92 unsigned short vector; 94 unsigned char vector;
95 unsigned char flags;
93 } pirq; 96 } pirq;
94 } u; 97 } u;
95}; 98};
99#define PIRQ_NEEDS_EOI (1 << 0)
100#define PIRQ_SHAREABLE (1 << 1)
96 101
97static struct irq_info irq_info[NR_IRQS]; 102static struct irq_info *irq_info;
98 103
99static int evtchn_to_irq[NR_EVENT_CHANNELS] = { 104static int *evtchn_to_irq;
100 [0 ... NR_EVENT_CHANNELS-1] = -1
101};
102struct cpu_evtchn_s { 105struct cpu_evtchn_s {
103 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; 106 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
104}; 107};
105static struct cpu_evtchn_s *cpu_evtchn_mask_p; 108
109static __initdata struct cpu_evtchn_s init_evtchn_mask = {
110 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
111};
112static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
113
106static inline unsigned long *cpu_evtchn_mask(int cpu) 114static inline unsigned long *cpu_evtchn_mask(int cpu)
107{ 115{
108 return cpu_evtchn_mask_p[cpu].bits; 116 return cpu_evtchn_mask_p[cpu].bits;
@@ -113,6 +121,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
113 121
114static struct irq_chip xen_dynamic_chip; 122static struct irq_chip xen_dynamic_chip;
115static struct irq_chip xen_percpu_chip; 123static struct irq_chip xen_percpu_chip;
124static struct irq_chip xen_pirq_chip;
116 125
117/* Constructor for packed IRQ information. */ 126/* Constructor for packed IRQ information. */
118static struct irq_info mk_unbound_info(void) 127static struct irq_info mk_unbound_info(void)
@@ -225,6 +234,15 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
225 return ret; 234 return ret;
226} 235}
227 236
237static bool pirq_needs_eoi(unsigned irq)
238{
239 struct irq_info *info = info_for_irq(irq);
240
241 BUG_ON(info->type != IRQT_PIRQ);
242
243 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
244}
245
228static inline unsigned long active_evtchns(unsigned int cpu, 246static inline unsigned long active_evtchns(unsigned int cpu,
229 struct shared_info *sh, 247 struct shared_info *sh,
230 unsigned int idx) 248 unsigned int idx)
@@ -336,36 +354,297 @@ static void unmask_evtchn(int port)
336 put_cpu(); 354 put_cpu();
337} 355}
338 356
357static int get_nr_hw_irqs(void)
358{
359 int ret = 1;
360
361#ifdef CONFIG_X86_IO_APIC
362 ret = get_nr_irqs_gsi();
363#endif
364
365 return ret;
366}
367
339static int find_unbound_irq(void) 368static int find_unbound_irq(void)
340{ 369{
341 int irq; 370 struct irq_data *data;
342 struct irq_desc *desc; 371 int irq, res;
372 int start = get_nr_hw_irqs();
343 373
344 for (irq = 0; irq < nr_irqs; irq++) { 374 if (start == nr_irqs)
345 desc = irq_to_desc(irq); 375 goto no_irqs;
376
377 /* nr_irqs is a magic value. Must not use it.*/
378 for (irq = nr_irqs-1; irq > start; irq--) {
379 data = irq_get_irq_data(irq);
346 /* only 0->15 have init'd desc; handle irq > 16 */ 380 /* only 0->15 have init'd desc; handle irq > 16 */
347 if (desc == NULL) 381 if (!data)
348 break; 382 break;
349 if (desc->chip == &no_irq_chip) 383 if (data->chip == &no_irq_chip)
350 break; 384 break;
351 if (desc->chip != &xen_dynamic_chip) 385 if (data->chip != &xen_dynamic_chip)
352 continue; 386 continue;
353 if (irq_info[irq].type == IRQT_UNBOUND) 387 if (irq_info[irq].type == IRQT_UNBOUND)
354 break; 388 return irq;
355 } 389 }
356 390
357 if (irq == nr_irqs) 391 if (irq == start)
358 panic("No available IRQ to bind to: increase nr_irqs!\n"); 392 goto no_irqs;
393
394 res = irq_alloc_desc_at(irq, 0);
359 395
360 desc = irq_to_desc_alloc_node(irq, 0); 396 if (WARN_ON(res != irq))
361 if (WARN_ON(desc == NULL))
362 return -1; 397 return -1;
363 398
364 dynamic_irq_init_keep_chip_data(irq); 399 return irq;
400
401no_irqs:
402 panic("No available IRQ to bind to: increase nr_irqs!\n");
403}
404
405static bool identity_mapped_irq(unsigned irq)
406{
407 /* identity map all the hardware irqs */
408 return irq < get_nr_hw_irqs();
409}
410
411static void pirq_unmask_notify(int irq)
412{
413 struct physdev_eoi eoi = { .irq = irq };
414
415 if (unlikely(pirq_needs_eoi(irq))) {
416 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
417 WARN_ON(rc);
418 }
419}
420
421static void pirq_query_unmask(int irq)
422{
423 struct physdev_irq_status_query irq_status;
424 struct irq_info *info = info_for_irq(irq);
425
426 BUG_ON(info->type != IRQT_PIRQ);
427
428 irq_status.irq = irq;
429 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
430 irq_status.flags = 0;
431
432 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
433 if (irq_status.flags & XENIRQSTAT_needs_eoi)
434 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
435}
436
437static bool probing_irq(int irq)
438{
439 struct irq_desc *desc = irq_to_desc(irq);
440
441 return desc && desc->action == NULL;
442}
443
444static unsigned int startup_pirq(unsigned int irq)
445{
446 struct evtchn_bind_pirq bind_pirq;
447 struct irq_info *info = info_for_irq(irq);
448 int evtchn = evtchn_from_irq(irq);
449 int rc;
450
451 BUG_ON(info->type != IRQT_PIRQ);
452
453 if (VALID_EVTCHN(evtchn))
454 goto out;
455
456 bind_pirq.pirq = irq;
457 /* NB. We are happy to share unless we are probing. */
458 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
459 BIND_PIRQ__WILL_SHARE : 0;
460 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
461 if (rc != 0) {
462 if (!probing_irq(irq))
463 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
464 irq);
465 return 0;
466 }
467 evtchn = bind_pirq.port;
468
469 pirq_query_unmask(irq);
470
471 evtchn_to_irq[evtchn] = irq;
472 bind_evtchn_to_cpu(evtchn, 0);
473 info->evtchn = evtchn;
474
475out:
476 unmask_evtchn(evtchn);
477 pirq_unmask_notify(irq);
478
479 return 0;
480}
481
482static void shutdown_pirq(unsigned int irq)
483{
484 struct evtchn_close close;
485 struct irq_info *info = info_for_irq(irq);
486 int evtchn = evtchn_from_irq(irq);
487
488 BUG_ON(info->type != IRQT_PIRQ);
489
490 if (!VALID_EVTCHN(evtchn))
491 return;
492
493 mask_evtchn(evtchn);
494
495 close.port = evtchn;
496 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
497 BUG();
498
499 bind_evtchn_to_cpu(evtchn, 0);
500 evtchn_to_irq[evtchn] = -1;
501 info->evtchn = 0;
502}
503
504static void enable_pirq(unsigned int irq)
505{
506 startup_pirq(irq);
507}
508
509static void disable_pirq(unsigned int irq)
510{
511}
512
513static void ack_pirq(unsigned int irq)
514{
515 int evtchn = evtchn_from_irq(irq);
516
517 move_native_irq(irq);
518
519 if (VALID_EVTCHN(evtchn)) {
520 mask_evtchn(evtchn);
521 clear_evtchn(evtchn);
522 }
523}
524
525static void end_pirq(unsigned int irq)
526{
527 int evtchn = evtchn_from_irq(irq);
528 struct irq_desc *desc = irq_to_desc(irq);
529
530 if (WARN_ON(!desc))
531 return;
532
533 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
534 (IRQ_DISABLED|IRQ_PENDING)) {
535 shutdown_pirq(irq);
536 } else if (VALID_EVTCHN(evtchn)) {
537 unmask_evtchn(evtchn);
538 pirq_unmask_notify(irq);
539 }
540}
541
542static int find_irq_by_gsi(unsigned gsi)
543{
544 int irq;
545
546 for (irq = 0; irq < nr_irqs; irq++) {
547 struct irq_info *info = info_for_irq(irq);
548
549 if (info == NULL || info->type != IRQT_PIRQ)
550 continue;
551
552 if (gsi_from_irq(irq) == gsi)
553 return irq;
554 }
555
556 return -1;
557}
558
559/* xen_allocate_irq might allocate irqs from the top down, as a
560 * consequence don't assume that the irq number returned has a low value
561 * or can be used as a pirq number unless you know otherwise.
562 *
563 * One notable exception is when xen_allocate_irq is called passing an
564 * hardware gsi as argument, in that case the irq number returned
565 * matches the gsi number passed as first argument.
566
567 * Note: We don't assign an
568 * event channel until the irq actually started up. Return an
569 * existing irq if we've already got one for the gsi.
570 */
571int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
572{
573 int irq;
574 struct physdev_irq irq_op;
575
576 spin_lock(&irq_mapping_update_lock);
577
578 irq = find_irq_by_gsi(gsi);
579 if (irq != -1) {
580 printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n",
581 irq, gsi);
582 goto out; /* XXX need refcount? */
583 }
584
585 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
586 * we are using the !xen_initial_domain() to drop in the function.*/
587 if (identity_mapped_irq(gsi) || !xen_initial_domain()) {
588 irq = gsi;
589 irq_alloc_desc_at(irq, 0);
590 } else
591 irq = find_unbound_irq();
592
593 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
594 handle_level_irq, name);
595
596 irq_op.irq = irq;
597 irq_op.vector = 0;
598
599 /* Only the privileged domain can do this. For non-priv, the pcifront
600 * driver provides a PCI bus that does the call to do exactly
601 * this in the priv domain. */
602 if (xen_initial_domain() &&
603 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
604 irq_free_desc(irq);
605 irq = -ENOSPC;
606 goto out;
607 }
608
609 irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector);
610 irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
611
612out:
613 spin_unlock(&irq_mapping_update_lock);
365 614
366 return irq; 615 return irq;
367} 616}
368 617
618int xen_destroy_irq(int irq)
619{
620 struct irq_desc *desc;
621 int rc = -ENOENT;
622
623 spin_lock(&irq_mapping_update_lock);
624
625 desc = irq_to_desc(irq);
626 if (!desc)
627 goto out;
628
629 irq_info[irq] = mk_unbound_info();
630
631 irq_free_desc(irq);
632
633out:
634 spin_unlock(&irq_mapping_update_lock);
635 return rc;
636}
637
638int xen_vector_from_irq(unsigned irq)
639{
640 return vector_from_irq(irq);
641}
642
643int xen_gsi_from_irq(unsigned irq)
644{
645 return gsi_from_irq(irq);
646}
647
369int bind_evtchn_to_irq(unsigned int evtchn) 648int bind_evtchn_to_irq(unsigned int evtchn)
370{ 649{
371 int irq; 650 int irq;
@@ -495,7 +774,7 @@ static void unbind_from_irq(unsigned int irq)
495 if (irq_info[irq].type != IRQT_UNBOUND) { 774 if (irq_info[irq].type != IRQT_UNBOUND) {
496 irq_info[irq] = mk_unbound_info(); 775 irq_info[irq] = mk_unbound_info();
497 776
498 dynamic_irq_cleanup(irq); 777 irq_free_desc(irq);
499 } 778 }
500 779
501 spin_unlock(&irq_mapping_update_lock); 780 spin_unlock(&irq_mapping_update_lock);
@@ -892,7 +1171,7 @@ void xen_clear_irq_pending(int irq)
892 if (VALID_EVTCHN(evtchn)) 1171 if (VALID_EVTCHN(evtchn))
893 clear_evtchn(evtchn); 1172 clear_evtchn(evtchn);
894} 1173}
895 1174EXPORT_SYMBOL(xen_clear_irq_pending);
896void xen_set_irq_pending(int irq) 1175void xen_set_irq_pending(int irq)
897{ 1176{
898 int evtchn = evtchn_from_irq(irq); 1177 int evtchn = evtchn_from_irq(irq);
@@ -912,9 +1191,9 @@ bool xen_test_irq_pending(int irq)
912 return ret; 1191 return ret;
913} 1192}
914 1193
915/* Poll waiting for an irq to become pending. In the usual case, the 1194/* Poll waiting for an irq to become pending with timeout. In the usual case,
916 irq will be disabled so it won't deliver an interrupt. */ 1195 * the irq will be disabled so it won't deliver an interrupt. */
917void xen_poll_irq(int irq) 1196void xen_poll_irq_timeout(int irq, u64 timeout)
918{ 1197{
919 evtchn_port_t evtchn = evtchn_from_irq(irq); 1198 evtchn_port_t evtchn = evtchn_from_irq(irq);
920 1199
@@ -922,13 +1201,20 @@ void xen_poll_irq(int irq)
922 struct sched_poll poll; 1201 struct sched_poll poll;
923 1202
924 poll.nr_ports = 1; 1203 poll.nr_ports = 1;
925 poll.timeout = 0; 1204 poll.timeout = timeout;
926 set_xen_guest_handle(poll.ports, &evtchn); 1205 set_xen_guest_handle(poll.ports, &evtchn);
927 1206
928 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 1207 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
929 BUG(); 1208 BUG();
930 } 1209 }
931} 1210}
1211EXPORT_SYMBOL(xen_poll_irq_timeout);
1212/* Poll waiting for an irq to become pending. In the usual case, the
1213 * irq will be disabled so it won't deliver an interrupt. */
1214void xen_poll_irq(int irq)
1215{
1216 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1217}
932 1218
933void xen_irq_resume(void) 1219void xen_irq_resume(void)
934{ 1220{
@@ -965,6 +1251,26 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
965 .retrigger = retrigger_dynirq, 1251 .retrigger = retrigger_dynirq,
966}; 1252};
967 1253
1254static struct irq_chip xen_pirq_chip __read_mostly = {
1255 .name = "xen-pirq",
1256
1257 .startup = startup_pirq,
1258 .shutdown = shutdown_pirq,
1259
1260 .enable = enable_pirq,
1261 .unmask = enable_pirq,
1262
1263 .disable = disable_pirq,
1264 .mask = disable_pirq,
1265
1266 .ack = ack_pirq,
1267 .end = end_pirq,
1268
1269 .set_affinity = set_affinity_irq,
1270
1271 .retrigger = retrigger_dynirq,
1272};
1273
968static struct irq_chip xen_percpu_chip __read_mostly = { 1274static struct irq_chip xen_percpu_chip __read_mostly = {
969 .name = "xen-percpu", 1275 .name = "xen-percpu",
970 1276
@@ -1019,7 +1325,12 @@ void __init xen_init_IRQ(void)
1019 1325
1020 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), 1326 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1021 GFP_KERNEL); 1327 GFP_KERNEL);
1022 BUG_ON(cpu_evtchn_mask_p == NULL); 1328 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1329
1330 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1331 GFP_KERNEL);
1332 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1333 evtchn_to_irq[i] = -1;
1023 1334
1024 init_evtchn_cpu_bindings(); 1335 init_evtchn_cpu_bindings();
1025 1336
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 7e49527189b6..cdacf923e073 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -50,6 +50,8 @@ const char *xenbus_strstate(enum xenbus_state state)
50 [ XenbusStateConnected ] = "Connected", 50 [ XenbusStateConnected ] = "Connected",
51 [ XenbusStateClosing ] = "Closing", 51 [ XenbusStateClosing ] = "Closing",
52 [ XenbusStateClosed ] = "Closed", 52 [ XenbusStateClosed ] = "Closed",
53 [XenbusStateReconfiguring] = "Reconfiguring",
54 [XenbusStateReconfigured] = "Reconfigured",
53 }; 55 };
54 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; 56 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
55} 57}