aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-11-26 16:08:43 -0500
committerMatthew Wilcox <willy@infradead.org>2018-12-06 08:26:17 -0500
commit55f3f7eab75c10d9b33d122670b5935ab64db50f (patch)
tree6a601ac3c08993730dfade808fe6806e1bfbf98c
parenteff3860bbfedbac6edac57fb0d7f3a60e860c1c3 (diff)
XArray: Add xa_cmpxchg_irq and xa_cmpxchg_bh
These convenience wrappers match the other _irq and _bh wrappers we already have. It turns out I'd already open-coded xa_cmpxchg_irq() in the shmem code, so convert that. Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r--Documentation/core-api/xarray.rst5
-rw-r--r--include/linux/xarray.h54
-rw-r--r--mm/shmem.c4
3 files changed, 59 insertions, 4 deletions
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index dbe96cb5558e..6a6d67acaf69 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -187,6 +187,8 @@ Takes xa_lock internally:
187 * :c:func:`xa_erase_bh` 187 * :c:func:`xa_erase_bh`
188 * :c:func:`xa_erase_irq` 188 * :c:func:`xa_erase_irq`
189 * :c:func:`xa_cmpxchg` 189 * :c:func:`xa_cmpxchg`
190 * :c:func:`xa_cmpxchg_bh`
191 * :c:func:`xa_cmpxchg_irq`
190 * :c:func:`xa_store_range` 192 * :c:func:`xa_store_range`
191 * :c:func:`xa_alloc` 193 * :c:func:`xa_alloc`
192 * :c:func:`xa_alloc_bh` 194 * :c:func:`xa_alloc_bh`
@@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
263context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` 265context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
264in the interrupt handler. Some of the more common patterns have helper 266in the interrupt handler. Some of the more common patterns have helper
265functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, 267functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
266:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. 268:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh`
269and :c:func:`xa_cmpxchg_irq`.
267 270
268Sometimes you need to protect access to the XArray with a mutex because 271Sometimes you need to protect access to the XArray with a mutex because
269that lock sits above another mutex in the locking hierarchy. That does 272that lock sits above another mutex in the locking hierarchy. That does
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 564892e19f8c..f492e21c4aa2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -554,6 +554,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
554} 554}
555 555
556/** 556/**
557 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
558 * @xa: XArray.
559 * @index: Index into array.
560 * @old: Old value to test against.
561 * @entry: New value to place in array.
562 * @gfp: Memory allocation flags.
563 *
564 * This function is like calling xa_cmpxchg() except it disables softirqs
565 * while holding the array lock.
566 *
567 * Context: Any context. Takes and releases the xa_lock while
568 * disabling softirqs. May sleep if the @gfp flags permit.
569 * Return: The old value at this index or xa_err() if an error happened.
570 */
571static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
572 void *old, void *entry, gfp_t gfp)
573{
574 void *curr;
575
576 xa_lock_bh(xa);
577 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
578 xa_unlock_bh(xa);
579
580 return curr;
581}
582
583/**
584 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
585 * @xa: XArray.
586 * @index: Index into array.
587 * @old: Old value to test against.
588 * @entry: New value to place in array.
589 * @gfp: Memory allocation flags.
590 *
591 * This function is like calling xa_cmpxchg() except it disables interrupts
592 * while holding the array lock.
593 *
594 * Context: Process context. Takes and releases the xa_lock while
595 * disabling interrupts. May sleep if the @gfp flags permit.
596 * Return: The old value at this index or xa_err() if an error happened.
597 */
598static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
599 void *old, void *entry, gfp_t gfp)
600{
601 void *curr;
602
603 xa_lock_irq(xa);
604 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
605 xa_unlock_irq(xa);
606
607 return curr;
608}
609
610/**
557 * xa_insert() - Store this entry in the XArray unless another entry is 611 * xa_insert() - Store this entry in the XArray unless another entry is
558 * already present. 612 * already present.
559 * @xa: XArray. 613 * @xa: XArray.
diff --git a/mm/shmem.c b/mm/shmem.c
index cddc72ac44d8..6adbdd349875 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping,
661{ 661{
662 void *old; 662 void *old;
663 663
664 xa_lock_irq(&mapping->i_pages); 664 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
665 old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0);
666 xa_unlock_irq(&mapping->i_pages);
667 if (old != radswap) 665 if (old != radswap)
668 return -ENOENT; 666 return -ENOENT;
669 free_swap_and_cache(radix_to_swp_entry(radswap)); 667 free_swap_and_cache(radix_to_swp_entry(radswap));