dm cache: optimize dirty bit checking with find_next_bit when resizing
commit f484697e619a83ecc370443a34746379ad99d204 upstream.
When shrinking the fast device, dm-cache iteratively searches for a
dirty bit among the cache blocks to be dropped, which is less efficient.
Use find_next_bit instead, as it is twice as fast as the iterative
approach with test_bit.
Signed-off-by: Ming-Hung Tsai <mtsai@redhat.com>
Fixes: f494a9c6b1 ("dm cache: cache shrinking support")
Cc: stable@vger.kernel.org
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Joe Thornber <thornber@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8501e38dc9
commit
8c3400a7fb
1 changed files with 8 additions and 8 deletions
|
|
@ -3009,14 +3009,14 @@ static bool can_resize(struct cache *cache, dm_cblock_t new_size)
|
||||||
/*
|
/*
|
||||||
* We can't drop a dirty block when shrinking the cache.
|
* We can't drop a dirty block when shrinking the cache.
|
||||||
*/
|
*/
|
||||||
while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
|
new_size = to_cblock(find_next_bit(cache->dirty_bitset,
|
||||||
if (is_dirty(cache, new_size)) {
|
from_cblock(cache->cache_size),
|
||||||
DMERR("%s: unable to shrink cache; cache block %llu is dirty",
|
from_cblock(new_size)));
|
||||||
cache_device_name(cache),
|
if (new_size != cache->cache_size) {
|
||||||
(unsigned long long) from_cblock(new_size));
|
DMERR("%s: unable to shrink cache; cache block %llu is dirty",
|
||||||
return false;
|
cache_device_name(cache),
|
||||||
}
|
(unsigned long long) from_cblock(new_size));
|
||||||
new_size = to_cblock(from_cblock(new_size) + 1);
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue