diff options
| -rw-r--r-- | net/core/page_pool.c | 39 |
1 files changed, 16 insertions, 23 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 31187ff55cbc..5bc65587f1c4 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c | |||
| @@ -82,12 +82,9 @@ EXPORT_SYMBOL(page_pool_create); | |||
| 82 | static struct page *__page_pool_get_cached(struct page_pool *pool) | 82 | static struct page *__page_pool_get_cached(struct page_pool *pool) |
| 83 | { | 83 | { |
| 84 | struct ptr_ring *r = &pool->ring; | 84 | struct ptr_ring *r = &pool->ring; |
| 85 | bool refill = false; | ||
| 85 | struct page *page; | 86 | struct page *page; |
| 86 | 87 | ||
| 87 | /* Quicker fallback, avoid locks when ring is empty */ | ||
| 88 | if (__ptr_ring_empty(r)) | ||
| 89 | return NULL; | ||
| 90 | |||
| 91 | /* Test for safe-context, caller should provide this guarantee */ | 88 | /* Test for safe-context, caller should provide this guarantee */ |
| 92 | if (likely(in_serving_softirq())) { | 89 | if (likely(in_serving_softirq())) { |
| 93 | if (likely(pool->alloc.count)) { | 90 | if (likely(pool->alloc.count)) { |
| @@ -95,27 +92,23 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) | |||
| 95 | page = pool->alloc.cache[--pool->alloc.count]; | 92 | page = pool->alloc.cache[--pool->alloc.count]; |
| 96 | return page; | 93 | return page; |
| 97 | } | 94 | } |
| 98 | /* Slower-path: Alloc array empty, time to refill | 95 | refill = true; |
| 99 | * | ||
| 100 | * Open-coded bulk ptr_ring consumer. | ||
| 101 | * | ||
| 102 | * Discussion: the ring consumer lock is not really | ||
| 103 | * needed due to the softirq/NAPI protection, but | ||
| 104 | * later need the ability to reclaim pages on the | ||
| 105 | * ring. Thus, keeping the locks. | ||
| 106 | */ | ||
| 107 | spin_lock(&r->consumer_lock); | ||
| 108 | while ((page = __ptr_ring_consume(r))) { | ||
| 109 | if (pool->alloc.count == PP_ALLOC_CACHE_REFILL) | ||
| 110 | break; | ||
| 111 | pool->alloc.cache[pool->alloc.count++] = page; | ||
| 112 | } | ||
| 113 | spin_unlock(&r->consumer_lock); | ||
| 114 | return page; | ||
| 115 | } | 96 | } |
| 116 | 97 | ||
| 117 | /* Slow-path: Get page from locked ring queue */ | 98 | /* Quicker fallback, avoid locks when ring is empty */ |
| 118 | page = ptr_ring_consume(&pool->ring); | 99 | if (__ptr_ring_empty(r)) |
| 100 | return NULL; | ||
| 101 | |||
| 102 | /* Slow-path: Get page from locked ring queue, | ||
| 103 | * refill alloc array if requested. | ||
| 104 | */ | ||
| 105 | spin_lock(&r->consumer_lock); | ||
| 106 | page = __ptr_ring_consume(r); | ||
| 107 | if (refill) | ||
| 108 | pool->alloc.count = __ptr_ring_consume_batched(r, | ||
| 109 | pool->alloc.cache, | ||
| 110 | PP_ALLOC_CACHE_REFILL); | ||
| 111 | spin_unlock(&r->consumer_lock); | ||
| 119 | return page; | 112 | return page; |
| 120 | } | 113 | } |
| 121 | 114 | ||
