From c60d2fc5665d7438bafc89e7f8008a338baff61e Mon Sep 17 00:00:00 2001 From: Daniel Zheng Date: Thu, 14 Mar 2024 16:21:46 -0700 Subject: [PATCH] libsnapshot: chunk iov writes Currently if our iov that we are trying to write is greater than 1024 our write will fail with error "INVALID ARGUMENT". This is because pwritev() system call takes a max input size of IOV_MAX (which is device dependant). With our increased cache size of 1mb or maybe even more (or if user configures batch size to be large), our write size could be greater than IOV_MAX, and will fail with an unhelpful error. We should chunk these writes to ensure they succeed. Bug: 322279333 Test: cow_api_test + manual testing with large iov write sizes Change-Id: Ia1fb53cbfc743cfcdfc7256ff9df075ad0c2dd38 --- .../libsnapshot/libsnapshot_cow/writer_v3.cpp | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp index ea1da4b53..8cc9964bf 100644 --- a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp +++ b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp @@ -717,13 +717,27 @@ bool CowWriterV3::WriteOperation(std::span ops, return false; } if (!data.empty()) { - const auto ret = pwritev(fd_, data.data(), data.size(), next_data_pos_); - if (ret != total_data_size) { + int total_written = 0; + int i = 0; + while (i < data.size()) { + int chunk = std::min(static_cast(data.size() - i), IOV_MAX); + + const auto ret = pwritev(fd_, data.data() + i, chunk, next_data_pos_ + total_written); + if (ret < 0) { + PLOG(ERROR) << "write failed chunk size of: " << chunk + << " at offset: " << next_data_pos_ + total_written << " " << errno; + return false; + } + total_written += ret; + i += chunk; + } + if (total_written != total_data_size) { PLOG(ERROR) << "write failed for data of size: " << data.size() - << " at offset: " << next_data_pos_ << " " << ret; + << " at offset: " << next_data_pos_ << " " << errno; return false; } } + header_.op_count += ops.size(); next_data_pos_ += total_data_size;