logd: always compress SerializedLogChunk in FinishWriting()

When calculating the space used for pruning, if a log chunk is
compressed, that size is used otherwise the uncompressed size is
used.  This is intended to reach a steady state where 1/4 of the log
buffer is the uncompressed log chunk that is being written to and the
other 3/4 of the log buffer is compressed logs.

If we wait until there are no readers referencing the log chunk before
compressing it, we end up with 2 uncompressed logs (the one that was
just filled, that readers are still referencing, and the new one that
was allocated to fit the most recent log), which take up 1/2 of the
log buffer's allotted size and will thus cause prune to delete more
compressed logs than it should.

Instead, we should always compress the log chunks in FinishWriting()
such that the compressed size will always be used for log chunks other
than the one that is not actively written to.

Decompressed logs due to readers are ephemeral by their nature and
thus don't add to the log buffer size for pruning.

Test: observe that log buffers can be filled in the presence of a reader.
Change-Id: Ie21ccff032e41c4a0e51710cc435c5ab316563cb
This commit is contained in:
Tom Cherry 2020-07-16 20:46:14 -07:00
parent b371af9e0f
commit 59caa7a045
5 changed files with 18 additions and 20 deletions

View file

@ -31,7 +31,7 @@ SerializedFlushToState::SerializedFlushToState(uint64_t start, LogMask log_mask)
SerializedFlushToState::~SerializedFlushToState() {
log_id_for_each(i) {
if (log_positions_[i]) {
log_positions_[i]->buffer_it->DecReaderRefCount(true);
log_positions_[i]->buffer_it->DecReaderRefCount();
}
}
}
@ -78,7 +78,7 @@ void SerializedFlushToState::AddMinHeapEntry(log_id_t log_id) {
logs_needed_from_next_position_[log_id] = true;
} else {
// Otherwise, if there is another buffer piece, move to that and do the same check.
buffer_it->DecReaderRefCount(true);
buffer_it->DecReaderRefCount();
++buffer_it;
buffer_it->IncReaderRefCount();
log_positions_[log_id]->read_offset = 0;
@ -134,7 +134,7 @@ void SerializedFlushToState::Prune(log_id_t log_id,
}
// // Decrease the ref count since we're deleting our reference.
buffer_it->DecReaderRefCount(false);
buffer_it->DecReaderRefCount();
// Delete in the reference.
log_positions_[log_id].reset();

View file

@ -123,7 +123,7 @@ void SerializedLogBuffer::RemoveChunkFromStats(log_id_t log_id, SerializedLogChu
stats_->Subtract(entry->ToLogStatisticsElement(log_id));
read_offset += entry->total_len();
}
chunk.DecReaderRefCount(false);
chunk.DecReaderRefCount();
}
void SerializedLogBuffer::NotifyReadersOfPrune(

View file

@ -31,7 +31,6 @@ void SerializedLogChunk::Compress() {
<< " size used: " << write_offset_
<< " compressed size: " << compressed_log_.size();
}
contents_.Resize(0);
}
// TODO: Develop a better reference counting strategy to guard against the case where the writer is
@ -44,13 +43,13 @@ void SerializedLogChunk::IncReaderRefCount() {
CompressionEngine::GetInstance().Decompress(compressed_log_, contents_);
}
void SerializedLogChunk::DecReaderRefCount(bool compress) {
void SerializedLogChunk::DecReaderRefCount() {
CHECK_NE(reader_ref_count_, 0U);
if (--reader_ref_count_ != 0) {
return;
}
if (compress && !writer_active_) {
Compress();
if (!writer_active_) {
contents_.Resize(0);
}
}
@ -83,18 +82,19 @@ bool SerializedLogChunk::ClearUidLogs(uid_t uid, log_id_t log_id, LogStatistics*
}
if (new_write_offset == 0) {
DecReaderRefCount(false);
DecReaderRefCount();
return true;
}
// Clear the old compressed logs and set write_offset_ appropriately for DecReaderRefCount()
// to compress the new partially cleared log.
// Clear the old compressed logs and set write_offset_ appropriately to compress the new
// partially cleared log.
if (new_write_offset != write_offset_) {
compressed_log_.Resize(0);
write_offset_ = new_write_offset;
Compress();
}
DecReaderRefCount(true);
DecReaderRefCount();
return false;
}

View file

@ -30,9 +30,7 @@ class SerializedLogChunk {
void Compress();
void IncReaderRefCount();
// Decrease the reader ref count and compress the log if appropriate. `compress` should only be
// set to false in the case that the log buffer will be deleted afterwards.
void DecReaderRefCount(bool compress);
void DecReaderRefCount();
// Must have no readers referencing this. Return true if there are no logs left in this chunk.
bool ClearUidLogs(uid_t uid, log_id_t log_id, LogStatistics* stats);
@ -50,8 +48,9 @@ class SerializedLogChunk {
void FinishWriting() {
writer_active_ = false;
Compress();
if (reader_ref_count_ == 0) {
Compress();
contents_.Resize(0);
}
}

View file

@ -113,8 +113,7 @@ TEST(SerializedLogChunk, three_logs) {
TEST(SerializedLogChunk, catch_DecCompressedRef_CHECK) {
size_t chunk_size = 10 * 4096;
auto chunk = SerializedLogChunk{chunk_size};
EXPECT_DEATH({ chunk.DecReaderRefCount(true); }, "");
EXPECT_DEATH({ chunk.DecReaderRefCount(false); }, "");
EXPECT_DEATH({ chunk.DecReaderRefCount(); }, "");
}
// Check that the CHECK() in ClearUidLogs() if the ref count is greater than 0 is caught.
@ -123,7 +122,7 @@ TEST(SerializedLogChunk, catch_ClearUidLogs_CHECK) {
auto chunk = SerializedLogChunk{chunk_size};
chunk.IncReaderRefCount();
EXPECT_DEATH({ chunk.ClearUidLogs(1000, LOG_ID_MAIN, nullptr); }, "");
chunk.DecReaderRefCount(false);
chunk.DecReaderRefCount();
}
class UidClearTest : public testing::TestWithParam<bool> {
@ -144,7 +143,7 @@ class UidClearTest : public testing::TestWithParam<bool> {
check(chunk_);
if (finish_writing) {
chunk_.DecReaderRefCount(false);
chunk_.DecReaderRefCount();
}
}