aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/access.c
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2011-11-04 04:45:59 -0400
committerJesse Barnes <jbarnes@virtuousgeek.org>2012-01-06 15:10:33 -0500
commitfb51ccbf217c1c994607b6519c7d85250928553d (patch)
treed08ba9a0278da0e75b6c6714e9453e46068e27b4 /drivers/pci/access.c
parentae5cd86455381282ece162966183d3f208c6fad7 (diff)
PCI: Rework config space blocking services
pci_block_user_cfg_access was designed for the use case that a single context, the IPR driver, temporarily delays user space accesses to the config space via sysfs. This assumption became invalid by the time pci_dev_reset was added as locking instance. Today, if you run two loops in parallel that reset the same device via sysfs, you end up with a kernel BUG as pci_block_user_cfg_access detect the broken assumption. This reworks the pci_block_user_cfg_access to a sleeping service pci_cfg_access_lock and an atomic-compatible variant called pci_cfg_access_trylock. The former not only blocks user space access as before but also waits if access was already locked. The latter service just returns false in this case, allowing the caller to resolve the conflict instead of raising a BUG. Adaptions of the ipr driver were originally written by Brian King. Acked-by: Brian King <brking@linux.vnet.ibm.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/pci/access.c')
-rw-r--r--drivers/pci/access.c74
1 files changed, 48 insertions, 26 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index fdaa42aac7c6..0c4c71712dfc 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -127,20 +127,20 @@ EXPORT_SYMBOL(pci_write_vpd);
127 * We have a bit per device to indicate it's blocked and a global wait queue 127 * We have a bit per device to indicate it's blocked and a global wait queue
128 * for callers to sleep on until devices are unblocked. 128 * for callers to sleep on until devices are unblocked.
129 */ 129 */
130static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait); 130static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
131 131
132static noinline void pci_wait_ucfg(struct pci_dev *dev) 132static noinline void pci_wait_cfg(struct pci_dev *dev)
133{ 133{
134 DECLARE_WAITQUEUE(wait, current); 134 DECLARE_WAITQUEUE(wait, current);
135 135
136 __add_wait_queue(&pci_ucfg_wait, &wait); 136 __add_wait_queue(&pci_cfg_wait, &wait);
137 do { 137 do {
138 set_current_state(TASK_UNINTERRUPTIBLE); 138 set_current_state(TASK_UNINTERRUPTIBLE);
139 raw_spin_unlock_irq(&pci_lock); 139 raw_spin_unlock_irq(&pci_lock);
140 schedule(); 140 schedule();
141 raw_spin_lock_irq(&pci_lock); 141 raw_spin_lock_irq(&pci_lock);
142 } while (dev->block_ucfg_access); 142 } while (dev->block_cfg_access);
143 __remove_wait_queue(&pci_ucfg_wait, &wait); 143 __remove_wait_queue(&pci_cfg_wait, &wait);
144} 144}
145 145
146/* Returns 0 on success, negative values indicate error. */ 146/* Returns 0 on success, negative values indicate error. */
@@ -153,7 +153,8 @@ int pci_user_read_config_##size \
153 if (PCI_##size##_BAD) \ 153 if (PCI_##size##_BAD) \
154 return -EINVAL; \ 154 return -EINVAL; \
155 raw_spin_lock_irq(&pci_lock); \ 155 raw_spin_lock_irq(&pci_lock); \
156 if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ 156 if (unlikely(dev->block_cfg_access)) \
157 pci_wait_cfg(dev); \
157 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 158 ret = dev->bus->ops->read(dev->bus, dev->devfn, \
158 pos, sizeof(type), &data); \ 159 pos, sizeof(type), &data); \
159 raw_spin_unlock_irq(&pci_lock); \ 160 raw_spin_unlock_irq(&pci_lock); \
@@ -172,7 +173,8 @@ int pci_user_write_config_##size \
172 if (PCI_##size##_BAD) \ 173 if (PCI_##size##_BAD) \
173 return -EINVAL; \ 174 return -EINVAL; \
174 raw_spin_lock_irq(&pci_lock); \ 175 raw_spin_lock_irq(&pci_lock); \
175 if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ 176 if (unlikely(dev->block_cfg_access)) \
177 pci_wait_cfg(dev); \
176 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 178 ret = dev->bus->ops->write(dev->bus, dev->devfn, \
177 pos, sizeof(type), val); \ 179 pos, sizeof(type), val); \
178 raw_spin_unlock_irq(&pci_lock); \ 180 raw_spin_unlock_irq(&pci_lock); \
@@ -401,36 +403,56 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
401EXPORT_SYMBOL(pci_vpd_truncate); 403EXPORT_SYMBOL(pci_vpd_truncate);
402 404
403/** 405/**
404 * pci_block_user_cfg_access - Block userspace PCI config reads/writes 406 * pci_cfg_access_lock - Lock PCI config reads/writes
405 * @dev: pci device struct 407 * @dev: pci device struct
406 * 408 *
407 * When user access is blocked, any reads or writes to config space will 409 * When access is locked, any userspace reads or writes to config
408 * sleep until access is unblocked again. We don't allow nesting of 410 * space and concurrent lock requests will sleep until access is
409 * block/unblock calls. 411 * allowed via pci_cfg_access_unlocked again.
410 */ 412 */
411void pci_block_user_cfg_access(struct pci_dev *dev) 413void pci_cfg_access_lock(struct pci_dev *dev)
414{
415 might_sleep();
416
417 raw_spin_lock_irq(&pci_lock);
418 if (dev->block_cfg_access)
419 pci_wait_cfg(dev);
420 dev->block_cfg_access = 1;
421 raw_spin_unlock_irq(&pci_lock);
422}
423EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
424
425/**
426 * pci_cfg_access_trylock - try to lock PCI config reads/writes
427 * @dev: pci device struct
428 *
429 * Same as pci_cfg_access_lock, but will return 0 if access is
430 * already locked, 1 otherwise. This function can be used from
431 * atomic contexts.
432 */
433bool pci_cfg_access_trylock(struct pci_dev *dev)
412{ 434{
413 unsigned long flags; 435 unsigned long flags;
414 int was_blocked; 436 bool locked = true;
415 437
416 raw_spin_lock_irqsave(&pci_lock, flags); 438 raw_spin_lock_irqsave(&pci_lock, flags);
417 was_blocked = dev->block_ucfg_access; 439 if (dev->block_cfg_access)
418 dev->block_ucfg_access = 1; 440 locked = false;
441 else
442 dev->block_cfg_access = 1;
419 raw_spin_unlock_irqrestore(&pci_lock, flags); 443 raw_spin_unlock_irqrestore(&pci_lock, flags);
420 444
421 /* If we BUG() inside the pci_lock, we're guaranteed to hose 445 return locked;
422 * the machine */
423 BUG_ON(was_blocked);
424} 446}
425EXPORT_SYMBOL_GPL(pci_block_user_cfg_access); 447EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
426 448
427/** 449/**
428 * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes 450 * pci_cfg_access_unlock - Unlock PCI config reads/writes
429 * @dev: pci device struct 451 * @dev: pci device struct
430 * 452 *
431 * This function allows userspace PCI config accesses to resume. 453 * This function allows PCI config accesses to resume.
432 */ 454 */
433void pci_unblock_user_cfg_access(struct pci_dev *dev) 455void pci_cfg_access_unlock(struct pci_dev *dev)
434{ 456{
435 unsigned long flags; 457 unsigned long flags;
436 458
@@ -438,10 +460,10 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
438 460
439 /* This indicates a problem in the caller, but we don't need 461 /* This indicates a problem in the caller, but we don't need
440 * to kill them, unlike a double-block above. */ 462 * to kill them, unlike a double-block above. */
441 WARN_ON(!dev->block_ucfg_access); 463 WARN_ON(!dev->block_cfg_access);
442 464
443 dev->block_ucfg_access = 0; 465 dev->block_cfg_access = 0;
444 wake_up_all(&pci_ucfg_wait); 466 wake_up_all(&pci_cfg_wait);
445 raw_spin_unlock_irqrestore(&pci_lock, flags); 467 raw_spin_unlock_irqrestore(&pci_lock, flags);
446} 468}
447EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access); 469EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);