aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2006-06-06 09:44:34 -0400
committerJaroslav Kysela <perex@suse.cz>2006-06-22 15:34:13 -0400
commit688956f23bdbfb1c3551bfafc819f989b36bb8ae (patch)
treeae59fdf7662b5a27c66554cbfd2b67c49fc156da
parentc5533bf36b4a6629dab0e08c4951247050928853 (diff)
[ALSA] Fix races in irq handler and ioremap
Call ioremap before request_irq for avoiding possible races in the irq handler. Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Takashi Iwai <tiwai@suse.de>
-rw-r--r--sound/pci/cs4281.c14
-rw-r--r--sound/pci/rme32.c12
-rw-r--r--sound/pci/rme96.c10
3 files changed, 18 insertions, 18 deletions
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 8c150eab45b6..e77a4ce314b7 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1379,6 +1379,13 @@ static int __devinit snd_cs4281_create(struct snd_card *card,
1379 chip->ba0_addr = pci_resource_start(pci, 0); 1379 chip->ba0_addr = pci_resource_start(pci, 0);
1380 chip->ba1_addr = pci_resource_start(pci, 1); 1380 chip->ba1_addr = pci_resource_start(pci, 1);
1381 1381
1382 chip->ba0 = ioremap_nocache(chip->ba0_addr, pci_resource_len(pci, 0));
1383 chip->ba1 = ioremap_nocache(chip->ba1_addr, pci_resource_len(pci, 1));
1384 if (!chip->ba0 || !chip->ba1) {
1385 snd_cs4281_free(chip);
1386 return -ENOMEM;
1387 }
1388
1382 if (request_irq(pci->irq, snd_cs4281_interrupt, SA_INTERRUPT|SA_SHIRQ, 1389 if (request_irq(pci->irq, snd_cs4281_interrupt, SA_INTERRUPT|SA_SHIRQ,
1383 "CS4281", chip)) { 1390 "CS4281", chip)) {
1384 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); 1391 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
@@ -1387,13 +1394,6 @@ static int __devinit snd_cs4281_create(struct snd_card *card,
1387 } 1394 }
1388 chip->irq = pci->irq; 1395 chip->irq = pci->irq;
1389 1396
1390 chip->ba0 = ioremap_nocache(chip->ba0_addr, pci_resource_len(pci, 0));
1391 chip->ba1 = ioremap_nocache(chip->ba1_addr, pci_resource_len(pci, 1));
1392 if (!chip->ba0 || !chip->ba1) {
1393 snd_cs4281_free(chip);
1394 return -ENOMEM;
1395 }
1396
1397 tmp = snd_cs4281_chip_init(chip); 1397 tmp = snd_cs4281_chip_init(chip);
1398 if (tmp) { 1398 if (tmp) {
1399 snd_cs4281_free(chip); 1399 snd_cs4281_free(chip);
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 4dd53bfe0308..2cb9fe98db2f 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1368,18 +1368,18 @@ static int __devinit snd_rme32_create(struct rme32 * rme32)
1368 return err; 1368 return err;
1369 rme32->port = pci_resource_start(rme32->pci, 0); 1369 rme32->port = pci_resource_start(rme32->pci, 0);
1370 1370
1371 if (request_irq(pci->irq, snd_rme32_interrupt, SA_INTERRUPT | SA_SHIRQ, "RME32", (void *) rme32)) {
1372 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
1373 return -EBUSY;
1374 }
1375 rme32->irq = pci->irq;
1376
1377 if ((rme32->iobase = ioremap_nocache(rme32->port, RME32_IO_SIZE)) == 0) { 1371 if ((rme32->iobase = ioremap_nocache(rme32->port, RME32_IO_SIZE)) == 0) {
1378 snd_printk(KERN_ERR "unable to remap memory region 0x%lx-0x%lx\n", 1372 snd_printk(KERN_ERR "unable to remap memory region 0x%lx-0x%lx\n",
1379 rme32->port, rme32->port + RME32_IO_SIZE - 1); 1373 rme32->port, rme32->port + RME32_IO_SIZE - 1);
1380 return -ENOMEM; 1374 return -ENOMEM;
1381 } 1375 }
1382 1376
1377 if (request_irq(pci->irq, snd_rme32_interrupt, SA_INTERRUPT | SA_SHIRQ, "RME32", (void *) rme32)) {
1378 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
1379 return -EBUSY;
1380 }
1381 rme32->irq = pci->irq;
1382
1383 /* read the card's revision number */ 1383 /* read the card's revision number */
1384 pci_read_config_byte(pci, 8, &rme32->rev); 1384 pci_read_config_byte(pci, 8, &rme32->rev);
1385 1385
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 65611a7d366d..991cb18c14f3 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -1583,17 +1583,17 @@ snd_rme96_create(struct rme96 *rme96)
1583 return err; 1583 return err;
1584 rme96->port = pci_resource_start(rme96->pci, 0); 1584 rme96->port = pci_resource_start(rme96->pci, 0);
1585 1585
1586 if ((rme96->iobase = ioremap_nocache(rme96->port, RME96_IO_SIZE)) == 0) {
1587 snd_printk(KERN_ERR "unable to remap memory region 0x%lx-0x%lx\n", rme96->port, rme96->port + RME96_IO_SIZE - 1);
1588 return -ENOMEM;
1589 }
1590
1586 if (request_irq(pci->irq, snd_rme96_interrupt, SA_INTERRUPT|SA_SHIRQ, "RME96", (void *)rme96)) { 1591 if (request_irq(pci->irq, snd_rme96_interrupt, SA_INTERRUPT|SA_SHIRQ, "RME96", (void *)rme96)) {
1587 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); 1592 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
1588 return -EBUSY; 1593 return -EBUSY;
1589 } 1594 }
1590 rme96->irq = pci->irq; 1595 rme96->irq = pci->irq;
1591 1596
1592 if ((rme96->iobase = ioremap_nocache(rme96->port, RME96_IO_SIZE)) == 0) {
1593 snd_printk(KERN_ERR "unable to remap memory region 0x%lx-0x%lx\n", rme96->port, rme96->port + RME96_IO_SIZE - 1);
1594 return -ENOMEM;
1595 }
1596
1597 /* read the card's revision number */ 1597 /* read the card's revision number */
1598 pci_read_config_byte(pci, 8, &rme96->rev); 1598 pci_read_config_byte(pci, 8, &rme96->rev);
1599 1599
pps">"jfs_superblock.h" #include "jfs_filsys.h" #include "jfs_metapage.h" #include "jfs_txnmgr.h" #include "jfs_debug.h" #ifdef CONFIG_JFS_STATISTICS static struct { uint pagealloc; /* # of page allocations */ uint pagefree; /* # of page frees */ uint lockwait; /* # of sleeping lock_metapage() calls */ } mpStat; #endif #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag) #define trylock_metapage(mp) test_and_set_bit(META_locked, &(mp)->flag) static inline void unlock_metapage(struct metapage *mp) { clear_bit(META_locked, &mp->flag); wake_up(&mp->wait); } static inline void __lock_metapage(struct metapage *mp) { DECLARE_WAITQUEUE(wait, current); INCREMENT(mpStat.lockwait); add_wait_queue_exclusive(&mp->wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); if (metapage_locked(mp)) { unlock_page(mp->page); schedule(); lock_page(mp->page); } } while (trylock_metapage(mp)); __set_current_state(TASK_RUNNING); remove_wait_queue(&mp->wait, &wait); } /* * Must have mp->page locked */ static inline void lock_metapage(struct metapage *mp) { if (trylock_metapage(mp)) __lock_metapage(mp); } #define METAPOOL_MIN_PAGES 32 static kmem_cache_t *metapage_cache; static mempool_t *metapage_mempool; #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) #if MPS_PER_PAGE > 1 struct meta_anchor { int mp_count; atomic_t io_count; struct metapage *mp[MPS_PER_PAGE]; }; #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) static inline struct metapage *page_to_mp(struct page *page, uint offset) { if (!PagePrivate(page)) return NULL; return mp_anchor(page)->mp[offset >> L2PSIZE]; } static inline int insert_metapage(struct page *page, struct metapage *mp) { struct meta_anchor *a; int index; int l2mp_blocks; /* log2 blocks per metapage */ if (PagePrivate(page)) a = mp_anchor(page); else { a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS); if (!a) return -ENOMEM; set_page_private(page, (unsigned long)a); SetPagePrivate(page); kmap(page); } if (mp) { l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1); a->mp_count++; a->mp[index] = mp; } return 0; } static inline void remove_metapage(struct page *page, struct metapage *mp) { struct meta_anchor *a = mp_anchor(page); int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; int index; index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1); BUG_ON(a->mp[index] != mp); a->mp[index] = NULL; if (--a->mp_count == 0) { kfree(a); set_page_private(page, 0); ClearPagePrivate(page); kunmap(page); } } static inline void inc_io(struct page *page) { atomic_inc(&mp_anchor(page)->io_count); } static inline void dec_io(struct page *page, void (*handler) (struct page *)) { if (atomic_dec_and_test(&mp_anchor(page)->io_count)) handler(page); } #else static inline struct metapage *page_to_mp(struct page *page, uint offset) { return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL; } static inline int insert_metapage(struct page *page, struct metapage *mp) { if (mp) { set_page_private(page, (unsigned long)mp); SetPagePrivate(page); kmap(page); } return 0; } static inline void remove_metapage(struct page *page, struct metapage *mp) { set_page_private(page, 0); ClearPagePrivate(page); kunmap(page); } #define inc_io(page) do {} while(0) #define dec_io(page, handler) handler(page) #endif static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct metapage *mp = (struct metapage *)foo; if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { mp->lid = 0; mp->lsn = 0; mp->flag = 0; mp->data = NULL; mp->clsn = 0; mp->log = NULL; set_bit(META_free, &mp->flag); init_waitqueue_head(&mp->wait); } } static inline struct metapage *alloc_metapage(gfp_t gfp_mask) { return mempool_alloc(metapage_mempool, gfp_mask); } static inline void free_metapage(struct metapage *mp) { mp->flag = 0; set_bit(META_free, &mp->flag); mempool_free(mp, metapage_mempool); } int __init metapage_init(void) { /* * Allocate the metapage structures */ metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), 0, 0, init_once, NULL); if (metapage_cache == NULL) return -ENOMEM; metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES, metapage_cache); if (metapage_mempool == NULL) { kmem_cache_destroy(metapage_cache); return -ENOMEM; } return 0; } void metapage_exit(void) { mempool_destroy(metapage_mempool); kmem_cache_destroy(metapage_cache); } static inline void drop_metapage(struct page *page, struct metapage *mp) { if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) || test_bit(META_io, &mp->flag)) return; remove_metapage(page, mp); INCREMENT(mpStat.pagefree); free_metapage(mp); } /* * Metapage address space operations */ static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock, unsigned int *len) { int rc = 0; int xflag; s64 xaddr; sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_blkbits; if (lblock >= file_blocks) return 0; if (lblock + *len > file_blocks) *len = file_blocks - lblock; if (inode->i_ino) { rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0); if ((rc == 0) && *len) lblock = (sector_t)xaddr; else lblock = 0; } /* else no mapping */ return lblock; } static void last_read_complete(struct page *page) { if (!PageError(page)) SetPageUptodate(page); unlock_page(page); } static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done, int err) { struct page *page = bio->bi_private; if (bio->bi_size) return 1; if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { printk(KERN_ERR "metapage_read_end_io: I/O error\n"); SetPageError(page); } dec_io(page, last_read_complete); bio_put(bio); return 0; } static void remove_from_logsync(struct metapage *mp) { struct jfs_log *log = mp->log; unsigned long flags; /* * This can race. Recheck that log hasn't been set to null, and after * acquiring logsync lock, recheck lsn */ if (!log) return; LOGSYNC_LOCK(log, flags); if (mp->lsn) { mp->log = NULL; mp->lsn = 0; mp->clsn = 0; log->count--; list_del(&mp->synclist); } LOGSYNC_UNLOCK(log, flags); } static void last_write_complete(struct page *page) { struct metapage *mp; unsigned int offset; for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (mp && test_bit(META_io, &mp->flag)) { if (mp->lsn) remove_from_logsync(mp); clear_bit(META_io, &mp->flag); } /* * I'd like to call drop_metapage here, but I don't think it's * safe unless I have the page locked */ } end_page_writeback(page); } static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done, int err) { struct page *page = bio->bi_private; BUG_ON(!PagePrivate(page)); if (bio->bi_size) return 1; if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { printk(KERN_ERR "metapage_write_end_io: I/O error\n"); SetPageError(page); } dec_io(page, last_write_complete); bio_put(bio); return 0; } static int metapage_writepage(struct page *page, struct writeback_control *wbc) { struct bio *bio = NULL; unsigned int block_offset; /* block offset of mp within page */ struct inode *inode = page->mapping->host; unsigned int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage; unsigned int len; unsigned int xlen; struct metapage *mp;