aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2006-10-19 11:41:28 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2006-12-01 17:36:58 -0500
commit7ea7e98fd8d02351c43ef4ab35d70f3aaa26c31d (patch)
treecb37b18402c2b82cc227ad6bd1ab3d57cf677ff3 /drivers/pci
parent50bf14b3ff05fb6e10688021b96f95d30a300f8d (diff)
PCI: Block on access to temporarily unavailable pci device
The existing implementation of pci_block_user_cfg_access() was recently criticised for providing out of date information and for returning errors on write, which applications won't be expecting. This reimplementation uses a global wait queue and a bit per device. I've open-coded prepare_to_wait() / finish_wait() as I could optimise it significantly by knowing that the pci_lock protected us at all points. It looked a bit funny to be doing a spin_unlock_irqsave(); schedule(), so I used spin_lock_irq() for the _user versions of pci_read_config and pci_write_config. Not carrying a flags pointer around made the code much less nasty. Attempts to block an already blocked device hit a BUG() and attempts to unblock an already unblocked device hit a WARN(). If we need to block access to a device from userspace, it's because it's unsafe for even another bit of the kernel to access the device. An attempt to block a device for a second time means we're about to access the device to perform some other operation, which could provoke undefined behaviour from the device. Signed-off-by: Matthew Wilcox <matthew@wil.cx> Acked-by: Adam Belay <abelay@novell.com> Acked-by: Alan Cox <alan@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/access.c75
1 files changed, 47 insertions, 28 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index ea16805a153c..73a58c73d526 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -1,6 +1,7 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/ioport.h> 3#include <linux/ioport.h>
4#include <linux/wait.h>
4 5
5#include "pci.h" 6#include "pci.h"
6 7
@@ -63,30 +64,42 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
63EXPORT_SYMBOL(pci_bus_write_config_word); 64EXPORT_SYMBOL(pci_bus_write_config_word);
64EXPORT_SYMBOL(pci_bus_write_config_dword); 65EXPORT_SYMBOL(pci_bus_write_config_dword);
65 66
66static u32 pci_user_cached_config(struct pci_dev *dev, int pos) 67/*
67{ 68 * The following routines are to prevent the user from accessing PCI config
68 u32 data; 69 * space when it's unsafe to do so. Some devices require this during BIST and
70 * we're required to prevent it during D-state transitions.
71 *
72 * We have a bit per device to indicate it's blocked and a global wait queue
73 * for callers to sleep on until devices are unblocked.
74 */
75static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
69 76
70 data = dev->saved_config_space[pos/sizeof(dev->saved_config_space[0])]; 77static noinline void pci_wait_ucfg(struct pci_dev *dev)
71 data >>= (pos % sizeof(dev->saved_config_space[0])) * 8; 78{
72 return data; 79 DECLARE_WAITQUEUE(wait, current);
80
81 __add_wait_queue(&pci_ucfg_wait, &wait);
82 do {
83 set_current_state(TASK_UNINTERRUPTIBLE);
84 spin_unlock_irq(&pci_lock);
85 schedule();
86 spin_lock_irq(&pci_lock);
87 } while (dev->block_ucfg_access);
88 __remove_wait_queue(&pci_ucfg_wait, &wait);
73} 89}
74 90
75#define PCI_USER_READ_CONFIG(size,type) \ 91#define PCI_USER_READ_CONFIG(size,type) \
76int pci_user_read_config_##size \ 92int pci_user_read_config_##size \
77 (struct pci_dev *dev, int pos, type *val) \ 93 (struct pci_dev *dev, int pos, type *val) \
78{ \ 94{ \
79 unsigned long flags; \
80 int ret = 0; \ 95 int ret = 0; \
81 u32 data = -1; \ 96 u32 data = -1; \
82 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 97 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
83 spin_lock_irqsave(&pci_lock, flags); \ 98 spin_lock_irq(&pci_lock); \
84 if (likely(!dev->block_ucfg_access)) \ 99 if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
85 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 100 ret = dev->bus->ops->read(dev->bus, dev->devfn, \
86 pos, sizeof(type), &data); \ 101 pos, sizeof(type), &data); \
87 else if (pos < sizeof(dev->saved_config_space)) \ 102 spin_unlock_irq(&pci_lock); \
88 data = pci_user_cached_config(dev, pos); \
89 spin_unlock_irqrestore(&pci_lock, flags); \
90 *val = (type)data; \ 103 *val = (type)data; \
91 return ret; \ 104 return ret; \
92} 105}
@@ -95,14 +108,13 @@ int pci_user_read_config_##size \
95int pci_user_write_config_##size \ 108int pci_user_write_config_##size \
96 (struct pci_dev *dev, int pos, type val) \ 109 (struct pci_dev *dev, int pos, type val) \
97{ \ 110{ \
98 unsigned long flags; \
99 int ret = -EIO; \ 111 int ret = -EIO; \
100 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 112 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
101 spin_lock_irqsave(&pci_lock, flags); \ 113 spin_lock_irq(&pci_lock); \
102 if (likely(!dev->block_ucfg_access)) \ 114 if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
103 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 115 ret = dev->bus->ops->write(dev->bus, dev->devfn, \
104 pos, sizeof(type), val); \ 116 pos, sizeof(type), val); \
105 spin_unlock_irqrestore(&pci_lock, flags); \ 117 spin_unlock_irq(&pci_lock); \
106 return ret; \ 118 return ret; \
107} 119}
108 120
@@ -117,21 +129,23 @@ PCI_USER_WRITE_CONFIG(dword, u32)
117 * pci_block_user_cfg_access - Block userspace PCI config reads/writes 129 * pci_block_user_cfg_access - Block userspace PCI config reads/writes
118 * @dev: pci device struct 130 * @dev: pci device struct
119 * 131 *
120 * This function blocks any userspace PCI config accesses from occurring. 132 * When user access is blocked, any reads or writes to config space will
121 * When blocked, any writes will be bit bucketed and reads will return the 133 * sleep until access is unblocked again. We don't allow nesting of
122 * data saved using pci_save_state for the first 64 bytes of config 134 * block/unblock calls.
123 * space and return 0xff for all other config reads. 135 */
124 **/
125void pci_block_user_cfg_access(struct pci_dev *dev) 136void pci_block_user_cfg_access(struct pci_dev *dev)
126{ 137{
127 unsigned long flags; 138 unsigned long flags;
139 int was_blocked;
128 140
129 pci_save_state(dev);
130
131 /* spinlock to synchronize with anyone reading config space now */
132 spin_lock_irqsave(&pci_lock, flags); 141 spin_lock_irqsave(&pci_lock, flags);
142 was_blocked = dev->block_ucfg_access;
133 dev->block_ucfg_access = 1; 143 dev->block_ucfg_access = 1;
134 spin_unlock_irqrestore(&pci_lock, flags); 144 spin_unlock_irqrestore(&pci_lock, flags);
145
146 /* If we BUG() inside the pci_lock, we're guaranteed to hose
147 * the machine */
148 BUG_ON(was_blocked);
135} 149}
136EXPORT_SYMBOL_GPL(pci_block_user_cfg_access); 150EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
137 151
@@ -140,14 +154,19 @@ EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
140 * @dev: pci device struct 154 * @dev: pci device struct
141 * 155 *
142 * This function allows userspace PCI config accesses to resume. 156 * This function allows userspace PCI config accesses to resume.
143 **/ 157 */
144void pci_unblock_user_cfg_access(struct pci_dev *dev) 158void pci_unblock_user_cfg_access(struct pci_dev *dev)
145{ 159{
146 unsigned long flags; 160 unsigned long flags;
147 161
148 /* spinlock to synchronize with anyone reading saved config space */
149 spin_lock_irqsave(&pci_lock, flags); 162 spin_lock_irqsave(&pci_lock, flags);
163
164 /* This indicates a problem in the caller, but we don't need
165 * to kill them, unlike a double-block above. */
166 WARN_ON(!dev->block_ucfg_access);
167
150 dev->block_ucfg_access = 0; 168 dev->block_ucfg_access = 0;
169 wake_up_all(&pci_ucfg_wait);
151 spin_unlock_irqrestore(&pci_lock, flags); 170 spin_unlock_irqrestore(&pci_lock, flags);
152} 171}
153EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access); 172EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);