aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Magenheimer <dan.magenheimer@oracle.com>2012-01-25 17:32:51 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-08 17:14:12 -0500
commit9256a4789be3dae37d00924c03546ba7958ea5a3 (patch)
treeb93a1f8cc641f11fc5da043f8ffb48a40666d84d
parentc5b1247bd1c3ab6722acfa95213be9a16bfb664c (diff)
zcache: fix deadlock condition
I discovered this deadlock condition awhile ago working on RAMster but it affects zcache as well. The list spinlock must be locked prior to the page spinlock and released after. As a result, the page copy must also be done while the locks are held. Applies to 3.2. Konrad, please push (via GregKH?)... this is definitely a bug fix so need not be pushed during a -rc0 window. Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/zcache/zcache-main.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 642840c612ac..ae0ed82dd3cb 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -358,8 +358,8 @@ static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
358 if (unlikely(zbpg == NULL)) 358 if (unlikely(zbpg == NULL))
359 goto out; 359 goto out;
360 /* ok, have a page, now compress the data before taking locks */ 360 /* ok, have a page, now compress the data before taking locks */
361 spin_lock(&zbpg->lock);
362 spin_lock(&zbud_budlists_spinlock); 361 spin_lock(&zbud_budlists_spinlock);
362 spin_lock(&zbpg->lock);
363 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list); 363 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
364 zbud_unbuddied[nchunks].count++; 364 zbud_unbuddied[nchunks].count++;
365 zh = &zbpg->buddy[0]; 365 zh = &zbpg->buddy[0];
@@ -389,12 +389,11 @@ init_zh:
389 zh->oid = *oid; 389 zh->oid = *oid;
390 zh->pool_id = pool_id; 390 zh->pool_id = pool_id;
391 zh->client_id = client_id; 391 zh->client_id = client_id;
392 /* can wait to copy the data until the list locks are dropped */
393 spin_unlock(&zbud_budlists_spinlock);
394
395 to = zbud_data(zh, size); 392 to = zbud_data(zh, size);
396 memcpy(to, cdata, size); 393 memcpy(to, cdata, size);
397 spin_unlock(&zbpg->lock); 394 spin_unlock(&zbpg->lock);
395 spin_unlock(&zbud_budlists_spinlock);
396
398 zbud_cumul_chunk_counts[nchunks]++; 397 zbud_cumul_chunk_counts[nchunks]++;
399 atomic_inc(&zcache_zbud_curr_zpages); 398 atomic_inc(&zcache_zbud_curr_zpages);
400 zcache_zbud_cumul_zpages++; 399 zcache_zbud_cumul_zpages++;