Merge "snapuserd: Lock the buffer during snapshot-merge" into main am: e10fc8369a am: 5374129251
Original change: https://android-review.googlesource.com/c/platform/system/core/+/3368179 Change-Id: Iab03d147477774062cdf88af1f819bd8a2bf57c5 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
This commit is contained in:
commit
84fbb8ada0
3 changed files with 41 additions and 14 deletions
|
|
@ -233,6 +233,11 @@ bool MergeWorker::MergeOrderedOpsAsync() {
|
|||
return false;
|
||||
}
|
||||
|
||||
std::optional<std::lock_guard<std::mutex>> buffer_lock;
|
||||
// Acquire the buffer lock at this point so that RA thread
|
||||
// doesn't step into this buffer. See b/377819507
|
||||
buffer_lock.emplace(snapuserd_->GetBufferLock());
|
||||
|
||||
snapuserd_->SetMergeInProgress(ra_block_index_);
|
||||
|
||||
loff_t offset = 0;
|
||||
|
|
@ -383,6 +388,9 @@ bool MergeWorker::MergeOrderedOpsAsync() {
|
|||
// Mark the block as merge complete
|
||||
snapuserd_->SetMergeCompleted(ra_block_index_);
|
||||
|
||||
// Release the buffer lock
|
||||
buffer_lock.reset();
|
||||
|
||||
// Notify RA thread that the merge thread is ready to merge the next
|
||||
// window
|
||||
snapuserd_->NotifyRAForMergeReady();
|
||||
|
|
@ -415,6 +423,11 @@ bool MergeWorker::MergeOrderedOps() {
|
|||
return false;
|
||||
}
|
||||
|
||||
std::optional<std::lock_guard<std::mutex>> buffer_lock;
|
||||
// Acquire the buffer lock at this point so that RA thread
|
||||
// doesn't step into this buffer. See b/377819507
|
||||
buffer_lock.emplace(snapuserd_->GetBufferLock());
|
||||
|
||||
snapuserd_->SetMergeInProgress(ra_block_index_);
|
||||
|
||||
loff_t offset = 0;
|
||||
|
|
@ -468,6 +481,9 @@ bool MergeWorker::MergeOrderedOps() {
|
|||
// Mark the block as merge complete
|
||||
snapuserd_->SetMergeCompleted(ra_block_index_);
|
||||
|
||||
// Release the buffer lock
|
||||
buffer_lock.reset();
|
||||
|
||||
// Notify RA thread that the merge thread is ready to merge the next
|
||||
// window
|
||||
snapuserd_->NotifyRAForMergeReady();
|
||||
|
|
|
|||
|
|
@ -186,6 +186,7 @@ class SnapshotHandler : public std::enable_shared_from_this<SnapshotHandler> {
|
|||
|
||||
bool IsIouringSupported();
|
||||
bool CheckPartitionVerification();
|
||||
std::mutex& GetBufferLock() { return buffer_lock_; }
|
||||
|
||||
private:
|
||||
bool ReadMetadata();
|
||||
|
|
@ -216,6 +217,9 @@ class SnapshotHandler : public std::enable_shared_from_this<SnapshotHandler> {
|
|||
std::mutex lock_;
|
||||
std::condition_variable cv;
|
||||
|
||||
// Lock the buffer used for snapshot-merge
|
||||
std::mutex buffer_lock_;
|
||||
|
||||
void* mapped_addr_;
|
||||
size_t total_mapped_addr_length_;
|
||||
|
||||
|
|
|
|||
|
|
@ -706,25 +706,32 @@ bool ReadAhead::ReadAheadIOStart() {
|
|||
return false;
|
||||
}
|
||||
|
||||
// Copy the data to scratch space
|
||||
memcpy(metadata_buffer_, ra_temp_meta_buffer_.get(), snapuserd_->GetBufferMetadataSize());
|
||||
memcpy(read_ahead_buffer_, ra_temp_buffer_.get(), total_blocks_merged_ * BLOCK_SZ);
|
||||
// Acquire buffer lock before doing memcpy to the scratch buffer. Although,
|
||||
// by now snapshot-merge thread shouldn't be working on this scratch space
|
||||
// but we take additional measure to ensure that the buffer is not being
|
||||
// used by the merge thread at this point. see b/377819507
|
||||
{
|
||||
std::lock_guard<std::mutex> buffer_lock(snapuserd_->GetBufferLock());
|
||||
// Copy the data to scratch space
|
||||
memcpy(metadata_buffer_, ra_temp_meta_buffer_.get(), snapuserd_->GetBufferMetadataSize());
|
||||
memcpy(read_ahead_buffer_, ra_temp_buffer_.get(), total_blocks_merged_ * BLOCK_SZ);
|
||||
|
||||
loff_t offset = 0;
|
||||
std::unordered_map<uint64_t, void*>& read_ahead_buffer_map = snapuserd_->GetReadAheadMap();
|
||||
read_ahead_buffer_map.clear();
|
||||
loff_t offset = 0;
|
||||
std::unordered_map<uint64_t, void*>& read_ahead_buffer_map = snapuserd_->GetReadAheadMap();
|
||||
read_ahead_buffer_map.clear();
|
||||
|
||||
for (size_t block_index = 0; block_index < blocks_.size(); block_index++) {
|
||||
void* bufptr = static_cast<void*>((char*)read_ahead_buffer_ + offset);
|
||||
uint64_t new_block = blocks_[block_index];
|
||||
for (size_t block_index = 0; block_index < blocks_.size(); block_index++) {
|
||||
void* bufptr = static_cast<void*>((char*)read_ahead_buffer_ + offset);
|
||||
uint64_t new_block = blocks_[block_index];
|
||||
|
||||
read_ahead_buffer_map[new_block] = bufptr;
|
||||
offset += BLOCK_SZ;
|
||||
read_ahead_buffer_map[new_block] = bufptr;
|
||||
offset += BLOCK_SZ;
|
||||
}
|
||||
|
||||
total_ra_blocks_completed_ += total_blocks_merged_;
|
||||
snapuserd_->SetMergedBlockCountForNextCommit(total_blocks_merged_);
|
||||
}
|
||||
|
||||
total_ra_blocks_completed_ += total_blocks_merged_;
|
||||
snapuserd_->SetMergedBlockCountForNextCommit(total_blocks_merged_);
|
||||
|
||||
// Flush the scratch data - Technically, we should flush only for overlapping
|
||||
// blocks; However, since this region is mmap'ed, the dirty pages can still
|
||||
// get flushed to disk at any random point in time. Instead, make sure
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue