aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2/commit.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-04-24 08:35:57 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-26 12:23:56 -0400
commit314eeac9e35d8b934dd7a09ed3a8e00d41977b84 (patch)
treee59218338ee9d62c02c50f4cdec1f1ca1b81aeb9 /fs/jbd2/commit.c
parent992d7ced75322307035a0e94074eb7188612a680 (diff)
dma-debug: remove broken dma memory leak detection for 2.6.30
The feature needs some more work because the notfier which is used to check for pending allocations is called before the device drivers ->remove() function. Therefore this feature reports false positives. A real fix for this issue is to introduce a new notifier event which sent _after_ the driver has deinitialized itself. That will done for the next kernel version. [ Impact: reduce the scope of CONFIG_DMA_API_DEBUG=y checks ] Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Cc: iommu@lists.linux-foundation.org LKML-Reference: <1240576557-22442-1-git-send-email-joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/jbd2/commit.c')
0 files changed, 0 insertions, 0 deletions
, using per_cpu containers, * in BH disabled context. */ #include <linux/seqlock.h> struct u64_stats_sync { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t seq; #endif }; static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 preempt_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 preempt_enable(); #endif return false; #endif } /* * In case softirq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. * - UP 32bit must disable BH. * - 64bit have no problem atomically reading u64 values, irq safe. */ static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 local_bh_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 local_bh_enable(); #endif return false; #endif } #endif /* _LINUX_U64_STATS_SYNC_H */