From d8435928ea68718da7cf0a9970b4f1950f2362e3 Mon Sep 17 00:00:00 2001 From: Alex Buynytskyy Date: Fri, 27 Mar 2020 14:34:21 -0700 Subject: [PATCH 1/6] [incfs] Stream the hash tree for incremental installation Instead of sending the whole tree upfront use the same streaming interface for it as for the data blocks This improves installation speed by almost 200ms, 650-800ms -> 500-600ms Bug: 153696423 Test: manual, adb install in various configurations Change-Id: Ia83de2af54ca0b1969397514ea5d761719af9055 (cherry picked from commit e2e850f32578527befe0b4281cec454f534c7cf6) Merged-In: Ia83de2af54ca0b1969397514ea5d761719af9055 --- adb/client/incremental.cpp | 80 +-------- adb/client/incremental_server.cpp | 264 ++++++++++++++++++++++-------- adb/client/incremental_utils.cpp | 117 +++++++++++-- adb/client/incremental_utils.h | 26 ++- 4 files changed, 326 insertions(+), 161 deletions(-) diff --git a/adb/client/incremental.cpp b/adb/client/incremental.cpp index 9765292c5..2814932c9 100644 --- a/adb/client/incremental.cpp +++ b/adb/client/incremental.cpp @@ -16,13 +16,13 @@ #include "incremental.h" -#include +#include "incremental_utils.h" + #include #include #include #include "adb_client.h" -#include "adb_io.h" #include "adb_utils.h" #include "commandline.h" #include "sysdeps.h" @@ -31,65 +31,8 @@ using namespace std::literals; namespace incremental { -namespace { - -static constexpr auto IDSIG = ".idsig"sv; - using android::base::StringPrintf; -using Size = int64_t; - -static inline int32_t read_int32(borrowed_fd fd) { - int32_t result; - return ReadFdExactly(fd, &result, sizeof(result)) ? result : -1; -} - -static inline void append_int(borrowed_fd fd, std::vector* bytes) { - int32_t le_val = read_int32(fd); - auto old_size = bytes->size(); - bytes->resize(old_size + sizeof(le_val)); - memcpy(bytes->data() + old_size, &le_val, sizeof(le_val)); -} - -static inline void append_bytes_with_size(borrowed_fd fd, std::vector* bytes) { - int32_t le_size = read_int32(fd); - if (le_size < 0) { - return; - } - int32_t size = int32_t(le32toh(le_size)); - auto old_size = bytes->size(); - bytes->resize(old_size + sizeof(le_size) + size); - memcpy(bytes->data() + old_size, &le_size, sizeof(le_size)); - ReadFdExactly(fd, bytes->data() + old_size + sizeof(le_size), size); -} - -static inline std::pair, int32_t> read_id_sig_headers(borrowed_fd fd) { - std::vector result; - append_int(fd, &result); // version - append_bytes_with_size(fd, &result); // hashingInfo - append_bytes_with_size(fd, &result); // signingInfo - auto le_tree_size = read_int32(fd); - auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree - return {std::move(result), tree_size}; -} - -static inline Size verity_tree_size_for_file(Size fileSize) { - constexpr int INCFS_DATA_FILE_BLOCK_SIZE = 4096; - constexpr int SHA256_DIGEST_SIZE = 32; - constexpr int digest_size = SHA256_DIGEST_SIZE; - constexpr int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size; - - Size total_tree_block_count = 0; - - auto block_count = 1 + (fileSize - 1) / INCFS_DATA_FILE_BLOCK_SIZE; - auto hash_block_count = block_count; - for (auto i = 0; hash_block_count > 1; i++) { - hash_block_count = (hash_block_count + hash_per_block - 1) / hash_per_block; - total_tree_block_count += hash_block_count; - } - return total_tree_block_count * INCFS_DATA_FILE_BLOCK_SIZE; -} - // Read, verify and return the signature bytes. Keeping fd at the position of start of verity tree. static std::pair> read_signature(Size file_size, std::string signature_file, @@ -104,7 +47,7 @@ static std::pair> read_signature(Size file_size, return {}; } - unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY | O_CLOEXEC)); + unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY)); if (fd < 0) { if (!silent) { fprintf(stderr, "Failed to open signature file: %s. Abort.\n", signature_file.c_str()); @@ -172,9 +115,8 @@ static unique_fd start_install(const Files& files, bool silent) { return {}; } - auto file_desc = - StringPrintf("%s:%lld:%s:%s", android::base::Basename(file).c_str(), - (long long)st.st_size, std::to_string(i).c_str(), signature.c_str()); + auto file_desc = StringPrintf("%s:%lld:%d:%s:1", android::base::Basename(file).c_str(), + (long long)st.st_size, i, signature.c_str()); command_args.push_back(std::move(file_desc)); signature_fds.push_back(std::move(signature_fd)); @@ -190,21 +132,9 @@ static unique_fd start_install(const Files& files, bool silent) { return {}; } - // Pushing verity trees for all installation files. - for (auto&& local_fd : signature_fds) { - if (!copy_to_file(local_fd.get(), connection_fd.get())) { - if (!silent) { - fprintf(stderr, "Failed to stream tree bytes: %s. Abort.\n", strerror(errno)); - } - return {}; - } - } - return connection_fd; } -} // namespace - bool can_install(const Files& files) { for (const auto& file : files) { struct stat st; diff --git a/adb/client/incremental_server.cpp b/adb/client/incremental_server.cpp index 4a131ce20..bfe18c0b2 100644 --- a/adb/client/incremental_server.cpp +++ b/adb/client/incremental_server.cpp @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,9 +44,10 @@ namespace incremental { -static constexpr int kBlockSize = 4096; +static constexpr int kHashesPerBlock = kBlockSize / kDigestSize; static constexpr int kCompressedSizeMax = kBlockSize * 0.95; static constexpr int8_t kTypeData = 0; +static constexpr int8_t kTypeHash = 1; static constexpr int8_t kCompressionNone = 0; static constexpr int8_t kCompressionLZ4 = 1; static constexpr int kCompressBound = std::max(kBlockSize, LZ4_COMPRESSBOUND(kBlockSize)); @@ -132,41 +133,64 @@ struct ResponseHeader { CompressionType compression_type; // 1 byte BlockIdx block_idx; // 4 bytes BlockSize block_size; // 2 bytes + + static constexpr size_t responseSizeFor(size_t dataSize) { + return dataSize + sizeof(ResponseHeader); + } +} __attribute__((packed)); + +template +struct BlockBuffer { + ResponseHeader header; + char data[Size]; } __attribute__((packed)); // Holds streaming state for a file class File { public: // Plain file - File(const char* filepath, FileId id, int64_t size, unique_fd fd) : File(filepath, id, size) { + File(const char* filepath, FileId id, int64_t size, unique_fd fd, int64_t tree_offset, + unique_fd tree_fd) + : File(filepath, id, size, tree_offset) { this->fd_ = std::move(fd); + this->tree_fd_ = std::move(tree_fd); priority_blocks_ = PriorityBlocksForFile(filepath, fd_.get(), size); } - int64_t ReadBlock(BlockIdx block_idx, void* buf, bool* is_zip_compressed, - std::string* error) const { - char* buf_ptr = static_cast(buf); + int64_t ReadDataBlock(BlockIdx block_idx, void* buf, bool* is_zip_compressed) const { int64_t bytes_read = -1; const off64_t offsetStart = blockIndexToOffset(block_idx); - bytes_read = adb_pread(fd_, &buf_ptr[sizeof(ResponseHeader)], kBlockSize, offsetStart); + bytes_read = adb_pread(fd_, buf, kBlockSize, offsetStart); + return bytes_read; + } + int64_t ReadTreeBlock(BlockIdx block_idx, void* buf) const { + int64_t bytes_read = -1; + const off64_t offsetStart = tree_offset_ + blockIndexToOffset(block_idx); + bytes_read = adb_pread(tree_fd_, buf, kBlockSize, offsetStart); return bytes_read; } - const unique_fd& RawFd() const { return fd_; } const std::vector& PriorityBlocks() const { return priority_blocks_; } std::vector sentBlocks; NumBlocks sentBlocksCount = 0; + std::vector sentTreeBlocks; + const char* const filepath; const FileId id; const int64_t size; private: - File(const char* filepath, FileId id, int64_t size) : filepath(filepath), id(id), size(size) { + File(const char* filepath, FileId id, int64_t size, int64_t tree_offset) + : filepath(filepath), id(id), size(size), tree_offset_(tree_offset) { sentBlocks.resize(numBytesToNumBlocks(size)); + sentTreeBlocks.resize(verity_tree_blocks_for_file(size)); } unique_fd fd_; std::vector priority_blocks_; + + unique_fd tree_fd_; + const int64_t tree_offset_; }; class IncrementalServer { @@ -174,6 +198,8 @@ class IncrementalServer { IncrementalServer(unique_fd adb_fd, unique_fd output_fd, std::vector files) : adb_fd_(std::move(adb_fd)), output_fd_(std::move(output_fd)), files_(std::move(files)) { buffer_.reserve(kReadBufferSize); + pendingBlocksBuffer_.resize(kChunkFlushSize + 2 * kBlockSize); + pendingBlocks_ = pendingBlocksBuffer_.data() + sizeof(ChunkHeader); } bool Serve(); @@ -208,7 +234,11 @@ class IncrementalServer { void erase_buffer_head(int count) { buffer_.erase(buffer_.begin(), buffer_.begin() + count); } enum class SendResult { Sent, Skipped, Error }; - SendResult SendBlock(FileId fileId, BlockIdx blockIdx, bool flush = false); + SendResult SendDataBlock(FileId fileId, BlockIdx blockIdx, bool flush = false); + + bool SendTreeBlock(FileId fileId, int32_t fileBlockIdx, BlockIdx blockIdx); + bool SendTreeBlocksForDataBlock(FileId fileId, BlockIdx blockIdx); + bool SendDone(); void RunPrefetching(); @@ -228,7 +258,10 @@ class IncrementalServer { int compressed_ = 0, uncompressed_ = 0; long long sentSize_ = 0; - std::vector pendingBlocks_; + static constexpr auto kChunkFlushSize = 31 * kBlockSize; + + std::vector pendingBlocksBuffer_; + char* pendingBlocks_ = nullptr; // True when client notifies that all the data has been received bool servingComplete_ = false; @@ -250,7 +283,7 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking) if (bcur > 0) { // output the rest. - WriteFdExactly(output_fd_, buffer_.data(), bcur); + (void)WriteFdExactly(output_fd_, buffer_.data(), bcur); erase_buffer_head(bcur); } @@ -265,9 +298,10 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking) auto res = adb_poll(&pfd, 1, blocking ? kPollTimeoutMillis : 0); if (res != 1) { - WriteFdExactly(output_fd_, buffer_.data(), buffer_.size()); + auto err = errno; + (void)WriteFdExactly(output_fd_, buffer_.data(), buffer_.size()); if (res < 0) { - D("Failed to poll: %s\n", strerror(errno)); + D("Failed to poll: %s", strerror(err)); return false; } if (blocking) { @@ -289,7 +323,7 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking) continue; } - D("Failed to read from fd %d: %d. Exit\n", adb_fd_.get(), errno); + D("Failed to read from fd %d: %d. Exit", adb_fd_.get(), errno); break; } // socket is closed. print remaining messages @@ -313,56 +347,113 @@ std::optional IncrementalServer::ReadRequest(bool blocking) { return request; } -auto IncrementalServer::SendBlock(FileId fileId, BlockIdx blockIdx, bool flush) -> SendResult { +bool IncrementalServer::SendTreeBlocksForDataBlock(const FileId fileId, const BlockIdx blockIdx) { + auto& file = files_[fileId]; + const int32_t data_block_count = numBytesToNumBlocks(file.size); + + const int32_t total_nodes_count(file.sentTreeBlocks.size()); + const int32_t leaf_nodes_count = (data_block_count + kHashesPerBlock - 1) / kHashesPerBlock; + + const int32_t leaf_nodes_offset = total_nodes_count - leaf_nodes_count; + + // Leaf level, sending only 1 block. + const int32_t leaf_idx = leaf_nodes_offset + blockIdx / kHashesPerBlock; + if (file.sentTreeBlocks[leaf_idx]) { + return true; + } + if (!SendTreeBlock(fileId, blockIdx, leaf_idx)) { + return false; + } + file.sentTreeBlocks[leaf_idx] = true; + + // Non-leaf, sending EVERYTHING. This should be done only once. + if (leaf_nodes_offset == 0 || file.sentTreeBlocks[0]) { + return true; + } + + for (int32_t i = 0; i < leaf_nodes_offset; ++i) { + if (!SendTreeBlock(fileId, blockIdx, i)) { + return false; + } + file.sentTreeBlocks[i] = true; + } + return true; +} + +bool IncrementalServer::SendTreeBlock(FileId fileId, int32_t fileBlockIdx, BlockIdx blockIdx) { + const auto& file = files_[fileId]; + + BlockBuffer buffer; + const int64_t bytesRead = file.ReadTreeBlock(blockIdx, buffer.data); + if (bytesRead <= 0) { + fprintf(stderr, "Failed to get data for %s.idsig at blockIdx=%d.\n", file.filepath, + blockIdx); + return false; + } + + buffer.header.compression_type = kCompressionNone; + buffer.header.block_type = kTypeHash; + buffer.header.file_id = toBigEndian(fileId); + buffer.header.block_size = toBigEndian(int16_t(bytesRead)); + buffer.header.block_idx = toBigEndian(blockIdx); + + Send(&buffer, ResponseHeader::responseSizeFor(bytesRead), /*flush=*/false); + + return true; +} + +auto IncrementalServer::SendDataBlock(FileId fileId, BlockIdx blockIdx, bool flush) -> SendResult { auto& file = files_[fileId]; if (blockIdx >= static_cast(file.sentBlocks.size())) { - fprintf(stderr, "Failed to read file %s at block %" PRId32 " (past end).\n", file.filepath, - blockIdx); - return SendResult::Error; + // may happen as we schedule some extra blocks for reported page misses + D("Skipped reading file %s at block %" PRId32 " (past end).", file.filepath, blockIdx); + return SendResult::Skipped; } if (file.sentBlocks[blockIdx]) { return SendResult::Skipped; } - std::string error; - char raw[sizeof(ResponseHeader) + kBlockSize]; - bool isZipCompressed = false; - const int64_t bytesRead = file.ReadBlock(blockIdx, &raw, &isZipCompressed, &error); - if (bytesRead < 0) { - fprintf(stderr, "Failed to get data for %s at blockIdx=%d (%s).\n", file.filepath, blockIdx, - error.c_str()); + + if (!SendTreeBlocksForDataBlock(fileId, blockIdx)) { return SendResult::Error; } - ResponseHeader* header = nullptr; - char data[sizeof(ResponseHeader) + kCompressBound]; - char* compressed = data + sizeof(*header); + BlockBuffer raw; + bool isZipCompressed = false; + const int64_t bytesRead = file.ReadDataBlock(blockIdx, raw.data, &isZipCompressed); + if (bytesRead < 0) { + fprintf(stderr, "Failed to get data for %s at blockIdx=%d (%d).\n", file.filepath, blockIdx, + errno); + return SendResult::Error; + } + + BlockBuffer compressed; int16_t compressedSize = 0; if (!isZipCompressed) { - compressedSize = - LZ4_compress_default(raw + sizeof(*header), compressed, bytesRead, kCompressBound); + compressedSize = LZ4_compress_default(raw.data, compressed.data, bytesRead, kCompressBound); } int16_t blockSize; + ResponseHeader* header; if (compressedSize > 0 && compressedSize < kCompressedSizeMax) { ++compressed_; blockSize = compressedSize; - header = reinterpret_cast(data); + header = &compressed.header; header->compression_type = kCompressionLZ4; } else { ++uncompressed_; blockSize = bytesRead; - header = reinterpret_cast(raw); + header = &raw.header; header->compression_type = kCompressionNone; } header->block_type = kTypeData; - header->file_id = toBigEndian(fileId); header->block_size = toBigEndian(blockSize); header->block_idx = toBigEndian(blockIdx); file.sentBlocks[blockIdx] = true; file.sentBlocksCount += 1; - Send(header, sizeof(*header) + blockSize, flush); + Send(header, ResponseHeader::responseSizeFor(blockSize), flush); + return SendResult::Sent; } @@ -388,7 +479,8 @@ void IncrementalServer::RunPrefetching() { if (!priority_blocks.empty()) { for (auto& i = prefetch.priorityIndex; blocksToSend > 0 && i < (BlockIdx)priority_blocks.size(); ++i) { - if (auto res = SendBlock(file.id, priority_blocks[i]); res == SendResult::Sent) { + if (auto res = SendDataBlock(file.id, priority_blocks[i]); + res == SendResult::Sent) { --blocksToSend; } else if (res == SendResult::Error) { fprintf(stderr, "Failed to send priority block %" PRId32 "\n", i); @@ -396,7 +488,7 @@ void IncrementalServer::RunPrefetching() { } } for (auto& i = prefetch.overallIndex; blocksToSend > 0 && i < prefetch.overallEnd; ++i) { - if (auto res = SendBlock(file.id, i); res == SendResult::Sent) { + if (auto res = SendDataBlock(file.id, i); res == SendResult::Sent) { --blocksToSend; } else if (res == SendResult::Error) { fprintf(stderr, "Failed to send block %" PRId32 "\n", i); @@ -409,30 +501,25 @@ void IncrementalServer::RunPrefetching() { } void IncrementalServer::Send(const void* data, size_t size, bool flush) { - constexpr auto kChunkFlushSize = 31 * kBlockSize; - - if (pendingBlocks_.empty()) { - pendingBlocks_.resize(sizeof(ChunkHeader)); - } - pendingBlocks_.insert(pendingBlocks_.end(), static_cast(data), - static_cast(data) + size); - if (flush || pendingBlocks_.size() > kChunkFlushSize) { + pendingBlocks_ = std::copy_n(static_cast(data), size, pendingBlocks_); + if (flush || pendingBlocks_ - pendingBlocksBuffer_.data() > kChunkFlushSize) { Flush(); } } void IncrementalServer::Flush() { - if (pendingBlocks_.empty()) { + auto dataBytes = pendingBlocks_ - (pendingBlocksBuffer_.data() + sizeof(ChunkHeader)); + if (dataBytes == 0) { return; } - *(ChunkHeader*)pendingBlocks_.data() = - toBigEndian(pendingBlocks_.size() - sizeof(ChunkHeader)); - if (!WriteFdExactly(adb_fd_, pendingBlocks_.data(), pendingBlocks_.size())) { - fprintf(stderr, "Failed to write %d bytes\n", int(pendingBlocks_.size())); + *(ChunkHeader*)pendingBlocksBuffer_.data() = toBigEndian(dataBytes); + auto totalBytes = sizeof(ChunkHeader) + dataBytes; + if (!WriteFdExactly(adb_fd_, pendingBlocksBuffer_.data(), totalBytes)) { + fprintf(stderr, "Failed to write %d bytes\n", int(totalBytes)); } - sentSize_ += pendingBlocks_.size(); - pendingBlocks_.clear(); + sentSize_ += totalBytes; + pendingBlocks_ = pendingBlocksBuffer_.data() + sizeof(ChunkHeader); } bool IncrementalServer::ServingComplete(std::optional startTime, int missesCount, @@ -443,7 +530,7 @@ bool IncrementalServer::ServingComplete(std::optional startTime, int D("Streaming completed.\n" "Misses: %d, of those unique: %d; sent compressed: %d, uncompressed: " "%d, mb: %.3f\n" - "Total time taken: %.3fms\n", + "Total time taken: %.3fms", missesCount, missesSent, compressed_, uncompressed_, sentSize_ / 1024.0 / 1024.0, duration_cast(endTime - (startTime ? *startTime : endTime)).count() / 1000.0); return true; @@ -510,9 +597,21 @@ bool IncrementalServer::Serve() { fileId, blockIdx); break; } - // fprintf(stderr, "\treading file %d block %04d\n", (int)fileId, - // (int)blockIdx); - if (auto res = SendBlock(fileId, blockIdx, true); res == SendResult::Error) { + + if (VLOG_IS_ON(INCREMENTAL)) { + auto& file = files_[fileId]; + auto posP = std::find(file.PriorityBlocks().begin(), + file.PriorityBlocks().end(), blockIdx); + D("\tMISSING BLOCK: reading file %d block %04d (in priority: %d of %d)", + (int)fileId, (int)blockIdx, + posP == file.PriorityBlocks().end() + ? -1 + : int(posP - file.PriorityBlocks().begin()), + int(file.PriorityBlocks().size())); + } + + if (auto res = SendDataBlock(fileId, blockIdx, true); + res == SendResult::Error) { fprintf(stderr, "Failed to send block %" PRId32 ".\n", blockIdx); } else if (res == SendResult::Sent) { ++missesSent; @@ -536,7 +635,7 @@ bool IncrementalServer::Serve() { fileId); break; } - D("Received prefetch request for file_id %" PRId16 ".\n", fileId); + D("Received prefetch request for file_id %" PRId16 ".", fileId); prefetches_.emplace_back(files_[fileId]); break; } @@ -551,6 +650,43 @@ bool IncrementalServer::Serve() { } } +static std::pair open_fd(const char* filepath) { + struct stat st; + if (stat(filepath, &st)) { + error_exit("inc-server: failed to stat input file '%s'.", filepath); + } + + unique_fd fd(adb_open(filepath, O_RDONLY)); + if (fd < 0) { + error_exit("inc-server: failed to open file '%s'.", filepath); + } + + return {std::move(fd), st.st_size}; +} + +static std::pair open_signature(int64_t file_size, const char* filepath) { + std::string signature_file(filepath); + signature_file += IDSIG; + + unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY)); + if (fd < 0) { + error_exit("inc-server: failed to open file '%s'.", signature_file.c_str()); + } + + auto [tree_offset, tree_size] = skip_id_sig_headers(fd); + if (auto expected = verity_tree_size_for_file(file_size); tree_size != expected) { + error_exit("Verity tree size mismatch in signature file: %s [was %lld, expected %lld].\n", + signature_file.c_str(), (long long)tree_size, (long long)expected); + } + + int32_t data_block_count = numBytesToNumBlocks(file_size); + int32_t leaf_nodes_count = (data_block_count + kHashesPerBlock - 1) / kHashesPerBlock; + D("Verity tree loaded: %s, tree size: %d (%d blocks, %d leafs)", signature_file.c_str(), + int(tree_size), int(numBytesToNumBlocks(tree_size)), int(leaf_nodes_count)); + + return {std::move(fd), tree_offset}; +} + bool serve(int connection_fd, int output_fd, int argc, const char** argv) { auto connection_ufd = unique_fd(connection_fd); auto output_ufd = unique_fd(output_fd); @@ -563,17 +699,11 @@ bool serve(int connection_fd, int output_fd, int argc, const char** argv) { for (int i = 0; i < argc; ++i) { auto filepath = argv[i]; - struct stat st; - if (stat(filepath, &st)) { - fprintf(stderr, "Failed to stat input file %s. Abort.\n", filepath); - return {}; - } + auto [file_fd, file_size] = open_fd(filepath); + auto [sign_fd, sign_offset] = open_signature(file_size, filepath); - unique_fd fd(adb_open(filepath, O_RDONLY)); - if (fd < 0) { - error_exit("inc-server: failed to open file '%s'.", filepath); - } - files.emplace_back(filepath, i, st.st_size, std::move(fd)); + files.emplace_back(filepath, i, file_size, std::move(file_fd), sign_offset, + std::move(sign_fd)); } IncrementalServer server(std::move(connection_ufd), std::move(output_ufd), std::move(files)); diff --git a/adb/client/incremental_utils.cpp b/adb/client/incremental_utils.cpp index fa501e49d..78497b8dc 100644 --- a/adb/client/incremental_utils.cpp +++ b/adb/client/incremental_utils.cpp @@ -18,6 +18,7 @@ #include "incremental_utils.h" +#include #include #include #include @@ -28,19 +29,98 @@ #include #include +#include "adb_io.h" #include "adb_trace.h" #include "sysdeps.h" using namespace std::literals; -static constexpr int kBlockSize = 4096; +namespace incremental { static constexpr inline int32_t offsetToBlockIndex(int64_t offset) { return (offset & ~(kBlockSize - 1)) >> 12; } +Size verity_tree_blocks_for_file(Size fileSize) { + if (fileSize == 0) { + return 0; + } + + constexpr int hash_per_block = kBlockSize / kDigestSize; + + Size total_tree_block_count = 0; + + auto block_count = 1 + (fileSize - 1) / kBlockSize; + auto hash_block_count = block_count; + for (auto i = 0; hash_block_count > 1; i++) { + hash_block_count = (hash_block_count + hash_per_block - 1) / hash_per_block; + total_tree_block_count += hash_block_count; + } + return total_tree_block_count; +} + +Size verity_tree_size_for_file(Size fileSize) { + return verity_tree_blocks_for_file(fileSize) * kBlockSize; +} + +static inline int32_t read_int32(borrowed_fd fd) { + int32_t result; + return ReadFdExactly(fd, &result, sizeof(result)) ? result : -1; +} + +static inline int32_t skip_int(borrowed_fd fd) { + return adb_lseek(fd, 4, SEEK_CUR); +} + +static inline void append_int(borrowed_fd fd, std::vector* bytes) { + int32_t le_val = read_int32(fd); + auto old_size = bytes->size(); + bytes->resize(old_size + sizeof(le_val)); + memcpy(bytes->data() + old_size, &le_val, sizeof(le_val)); +} + +static inline void append_bytes_with_size(borrowed_fd fd, std::vector* bytes) { + int32_t le_size = read_int32(fd); + if (le_size < 0) { + return; + } + int32_t size = int32_t(le32toh(le_size)); + auto old_size = bytes->size(); + bytes->resize(old_size + sizeof(le_size) + size); + memcpy(bytes->data() + old_size, &le_size, sizeof(le_size)); + ReadFdExactly(fd, bytes->data() + old_size + sizeof(le_size), size); +} + +static inline int32_t skip_bytes_with_size(borrowed_fd fd) { + int32_t le_size = read_int32(fd); + if (le_size < 0) { + return -1; + } + int32_t size = int32_t(le32toh(le_size)); + return (int32_t)adb_lseek(fd, size, SEEK_CUR); +} + +std::pair, int32_t> read_id_sig_headers(borrowed_fd fd) { + std::vector result; + append_int(fd, &result); // version + append_bytes_with_size(fd, &result); // hashingInfo + append_bytes_with_size(fd, &result); // signingInfo + auto le_tree_size = read_int32(fd); + auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree + return {std::move(result), tree_size}; +} + +std::pair skip_id_sig_headers(borrowed_fd fd) { + skip_int(fd); // version + skip_bytes_with_size(fd); // hashingInfo + auto offset = skip_bytes_with_size(fd); // signingInfo + auto le_tree_size = read_int32(fd); + auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree + return {offset + sizeof(le_tree_size), tree_size}; +} + template -T valueAt(int fd, off64_t offset) { +static T valueAt(borrowed_fd fd, off64_t offset) { T t; memset(&t, 0, sizeof(T)); if (adb_pread(fd, &t, sizeof(T), offset) != sizeof(T)) { @@ -68,7 +148,7 @@ static void unduplicate(std::vector& v) { v.end()); } -static off64_t CentralDirOffset(int fd, int64_t fileSize) { +static off64_t CentralDirOffset(borrowed_fd fd, Size fileSize) { static constexpr int kZipEocdRecMinSize = 22; static constexpr int32_t kZipEocdRecSig = 0x06054b50; static constexpr int kZipEocdCentralDirSizeFieldOffset = 12; @@ -103,7 +183,7 @@ static off64_t CentralDirOffset(int fd, int64_t fileSize) { } // Does not support APKs larger than 4GB -static off64_t SignerBlockOffset(int fd, int64_t fileSize) { +static off64_t SignerBlockOffset(borrowed_fd fd, Size fileSize) { static constexpr int kApkSigBlockMinSize = 32; static constexpr int kApkSigBlockFooterSize = 24; static constexpr int64_t APK_SIG_BLOCK_MAGIC_HI = 0x3234206b636f6c42l; @@ -132,7 +212,7 @@ static off64_t SignerBlockOffset(int fd, int64_t fileSize) { return signerBlockOffset; } -static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t fileSize) { +static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, Size fileSize) { int32_t signerBlockIndex = offsetToBlockIndex(signerBlockOffset); int32_t lastBlockIndex = offsetToBlockIndex(fileSize); const auto numPriorityBlocks = lastBlockIndex - signerBlockIndex + 1; @@ -160,7 +240,7 @@ static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t return zipPriorityBlocks; } -[[maybe_unused]] static ZipArchiveHandle openZipArchiveFd(int fd) { +[[maybe_unused]] static ZipArchiveHandle openZipArchiveFd(borrowed_fd fd) { bool transferFdOwnership = false; #ifdef _WIN32 // @@ -179,20 +259,22 @@ static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t D("%s failed at DuplicateHandle: %d", __func__, (int)::GetLastError()); return {}; } - fd = _open_osfhandle((intptr_t)dupedHandle, _O_RDONLY | _O_BINARY); - if (fd < 0) { + int osfd = _open_osfhandle((intptr_t)dupedHandle, _O_RDONLY | _O_BINARY); + if (osfd < 0) { D("%s failed at _open_osfhandle: %d", __func__, errno); ::CloseHandle(handle); return {}; } transferFdOwnership = true; +#else + int osfd = fd.get(); #endif ZipArchiveHandle zip; - if (OpenArchiveFd(fd, "apk_fd", &zip, transferFdOwnership) != 0) { + if (OpenArchiveFd(osfd, "apk_fd", &zip, transferFdOwnership) != 0) { D("%s failed at OpenArchiveFd: %d", __func__, errno); #ifdef _WIN32 // "_close()" is a secret WinCRT name for the regular close() function. - _close(fd); + _close(osfd); #endif return {}; } @@ -200,7 +282,7 @@ static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t } static std::pair> openZipArchive( - int fd, int64_t fileSize) { + borrowed_fd fd, Size fileSize) { #ifndef __LP64__ if (fileSize >= INT_MAX) { return {openZipArchiveFd(fd), nullptr}; @@ -220,9 +302,10 @@ static std::pair> o return {zip, std::move(mapping)}; } -static std::vector InstallationPriorityBlocks(int fd, int64_t fileSize) { +static std::vector InstallationPriorityBlocks(int fd, Size fileSize) { static constexpr std::array additional_matches = { "resources.arsc"sv, "AndroidManifest.xml"sv, "classes.dex"sv}; + auto [zip, _] = openZipArchive(fd, fileSize); if (!zip) { return {}; @@ -249,8 +332,9 @@ static std::vector InstallationPriorityBlocks(int fd, int64_t fileSize) if (entryName == "classes.dex"sv) { // Only the head is needed for installation int32_t startBlockIndex = offsetToBlockIndex(entry.offset); - appendBlocks(startBlockIndex, 1, &installationPriorityBlocks); - D("\tadding to priority blocks: '%.*s' 1", (int)entryName.size(), entryName.data()); + appendBlocks(startBlockIndex, 2, &installationPriorityBlocks); + D("\tadding to priority blocks: '%.*s' (%d)", (int)entryName.size(), entryName.data(), + 2); } else { // Full entries are needed for installation off64_t entryStartOffset = entry.offset; @@ -273,8 +357,8 @@ static std::vector InstallationPriorityBlocks(int fd, int64_t fileSize) return installationPriorityBlocks; } -namespace incremental { -std::vector PriorityBlocksForFile(const std::string& filepath, int fd, int64_t fileSize) { +std::vector PriorityBlocksForFile(const std::string& filepath, borrowed_fd fd, + Size fileSize) { if (!android::base::EndsWithIgnoreCase(filepath, ".apk")) { return {}; } @@ -291,4 +375,5 @@ std::vector PriorityBlocksForFile(const std::string& filepath, int fd, unduplicate(priorityBlocks); return priorityBlocks; } + } // namespace incremental diff --git a/adb/client/incremental_utils.h b/adb/client/incremental_utils.h index 8bcf6c081..d969d9412 100644 --- a/adb/client/incremental_utils.h +++ b/adb/client/incremental_utils.h @@ -16,11 +16,31 @@ #pragma once -#include +#include "adb_unique_fd.h" #include +#include +#include #include +#include + namespace incremental { -std::vector PriorityBlocksForFile(const std::string& filepath, int fd, int64_t fileSize); -} // namespace incremental \ No newline at end of file + +using Size = int64_t; +constexpr int kBlockSize = 4096; +constexpr int kSha256DigestSize = 32; +constexpr int kDigestSize = kSha256DigestSize; + +constexpr std::string_view IDSIG = ".idsig"; + +std::vector PriorityBlocksForFile(const std::string& filepath, borrowed_fd fd, + Size fileSize); + +Size verity_tree_blocks_for_file(Size fileSize); +Size verity_tree_size_for_file(Size fileSize); + +std::pair, int32_t> read_id_sig_headers(borrowed_fd fd); +std::pair skip_id_sig_headers(borrowed_fd fd); + +} // namespace incremental From 1093473be06409bd44d4cb60e313216350b3cdd5 Mon Sep 17 00:00:00 2001 From: Yurii Zubrytskyi Date: Wed, 15 Apr 2020 10:32:53 -0700 Subject: [PATCH 2/6] Fix a bad conflict resolution (missed int -> borrowed_fd parameter type change) Bug: 153696423 Test: builds Merged-In: Ia83de2af54ca0b1969397514ea5d761719af9055 Change-Id: I67500c4f9b44b322883dbd7851b8391a2fbbef20 --- adb/client/incremental_utils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adb/client/incremental_utils.cpp b/adb/client/incremental_utils.cpp index 78497b8dc..3297a6d3c 100644 --- a/adb/client/incremental_utils.cpp +++ b/adb/client/incremental_utils.cpp @@ -302,7 +302,7 @@ static std::pair> o return {zip, std::move(mapping)}; } -static std::vector InstallationPriorityBlocks(int fd, Size fileSize) { +static std::vector InstallationPriorityBlocks(borrowed_fd fd, Size fileSize) { static constexpr std::array additional_matches = { "resources.arsc"sv, "AndroidManifest.xml"sv, "classes.dex"sv}; From f846413e621d7245d8e78f04349a6a93d2bbbea4 Mon Sep 17 00:00:00 2001 From: Christopher Ferris Date: Tue, 26 May 2020 10:33:18 -0700 Subject: [PATCH 3/6] Fail explicitly on length overflow. Instead of aborting when FileMap::create detects an overflow, detect the overflow directly and fail the call. Bug: 156997193 Test: Ran unit tests, including new unit test that aborted before. Change-Id: Ie49975b8949fd12bbde14346ec9bbb774ef88a51 Merged-In: Ie49975b8949fd12bbde14346ec9bbb774ef88a51 (cherry picked from commit 68604b9c29b5bd11e2e2dbb848d6b364bf627d21) --- libutils/FileMap.cpp | 6 +++++- libutils/FileMap_test.cpp | 13 +++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/libutils/FileMap.cpp b/libutils/FileMap.cpp index 1202c156d..c8286311f 100644 --- a/libutils/FileMap.cpp +++ b/libutils/FileMap.cpp @@ -189,7 +189,11 @@ bool FileMap::create(const char* origFileName, int fd, off64_t offset, size_t le int adjust = offset % mPageSize; off64_t adjOffset = offset - adjust; - size_t adjLength = length + adjust; + size_t adjLength; + if (__builtin_add_overflow(length, adjust, &adjLength)) { + ALOGE("adjusted length overflow: length %zu adjust %d", length, adjust); + return false; + } int flags = MAP_SHARED; int prot = PROT_READ; diff --git a/libutils/FileMap_test.cpp b/libutils/FileMap_test.cpp index 576d89bbe..096e27a56 100644 --- a/libutils/FileMap_test.cpp +++ b/libutils/FileMap_test.cpp @@ -32,3 +32,16 @@ TEST(FileMap, zero_length_mapping) { ASSERT_EQ(0u, m.getDataLength()); ASSERT_EQ(4096, m.getDataOffset()); } + +TEST(FileMap, offset_overflow) { + // Make sure that an end that overflows SIZE_MAX will not abort. + // See http://b/156997193. + TemporaryFile tf; + ASSERT_TRUE(tf.fd != -1); + + off64_t offset = 200; + size_t length = SIZE_MAX; + + android::FileMap m; + ASSERT_FALSE(m.create("test", tf.fd, offset, length, true)); +} From 40f657b9f4b1053b0a04034f36a3dc1a89c3451a Mon Sep 17 00:00:00 2001 From: Christopher Ferris Date: Tue, 26 May 2020 10:33:18 -0700 Subject: [PATCH 4/6] Fail explicitly on length overflow. Instead of aborting when FileMap::create detects an overflow, detect the overflow directly and fail the call. Bug: 156997193 Test: Ran unit tests, including new unit test that aborted before. Change-Id: Ie49975b8949fd12bbde14346ec9bbb774ef88a51 Merged-In: Ie49975b8949fd12bbde14346ec9bbb774ef88a51 (cherry picked from commit 68604b9c29b5bd11e2e2dbb848d6b364bf627d21) --- libutils/FileMap.cpp | 6 +++++- libutils/FileMap_test.cpp | 13 +++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/libutils/FileMap.cpp b/libutils/FileMap.cpp index 1202c156d..c8286311f 100644 --- a/libutils/FileMap.cpp +++ b/libutils/FileMap.cpp @@ -189,7 +189,11 @@ bool FileMap::create(const char* origFileName, int fd, off64_t offset, size_t le int adjust = offset % mPageSize; off64_t adjOffset = offset - adjust; - size_t adjLength = length + adjust; + size_t adjLength; + if (__builtin_add_overflow(length, adjust, &adjLength)) { + ALOGE("adjusted length overflow: length %zu adjust %d", length, adjust); + return false; + } int flags = MAP_SHARED; int prot = PROT_READ; diff --git a/libutils/FileMap_test.cpp b/libutils/FileMap_test.cpp index 576d89bbe..096e27a56 100644 --- a/libutils/FileMap_test.cpp +++ b/libutils/FileMap_test.cpp @@ -32,3 +32,16 @@ TEST(FileMap, zero_length_mapping) { ASSERT_EQ(0u, m.getDataLength()); ASSERT_EQ(4096, m.getDataOffset()); } + +TEST(FileMap, offset_overflow) { + // Make sure that an end that overflows SIZE_MAX will not abort. + // See http://b/156997193. + TemporaryFile tf; + ASSERT_TRUE(tf.fd != -1); + + off64_t offset = 200; + size_t length = SIZE_MAX; + + android::FileMap m; + ASSERT_FALSE(m.create("test", tf.fd, offset, length, true)); +} From 4d14303653247da3922242796ab6d63123fbd004 Mon Sep 17 00:00:00 2001 From: Christopher Ferris Date: Tue, 26 May 2020 10:33:18 -0700 Subject: [PATCH 5/6] Fail explicitly on length overflow. Instead of aborting when FileMap::create detects an overflow, detect the overflow directly and fail the call. Bug: 156997193 Test: Ran unit tests, including new unit test that aborted before. Change-Id: Ie49975b8949fd12bbde14346ec9bbb774ef88a51 Merged-In: Ie49975b8949fd12bbde14346ec9bbb774ef88a51 (cherry picked from commit 68604b9c29b5bd11e2e2dbb848d6b364bf627d21) --- libutils/FileMap.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libutils/FileMap.cpp b/libutils/FileMap.cpp index 1afa1ecae..b9f411ef2 100644 --- a/libutils/FileMap.cpp +++ b/libutils/FileMap.cpp @@ -182,6 +182,10 @@ bool FileMap::create(const char* origFileName, int fd, off64_t offset, size_t le adjust = offset % mPageSize; adjOffset = offset - adjust; adjLength = length + adjust; + if (__builtin_add_overflow(length, adjust, &adjLength)) { + ALOGE("adjusted length overflow: length %zu adjust %d", length, adjust); + return false; + } flags = MAP_SHARED; prot = PROT_READ; From ee22384c54d42149491c8b9dbcda0d8c5e88eddc Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Tue, 28 Jul 2020 21:41:54 +0000 Subject: [PATCH 6/6] libutils: check vsnprintf error For encoding errors, this function will return a negative value which causes problems down the line. Check for an error and return. Also, integer overflows are guarded. Bug: 161894517 Test: fuzzer test case Change-Id: Ia85067d4258bde4b875c832d6223db5dd26b8838 Merged-In: Ia85067d4258bde4b875c832d6223db5dd26b8838 --- libutils/String8.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/libutils/String8.cpp b/libutils/String8.cpp index ad0e72ec1..8f9c9f723 100644 --- a/libutils/String8.cpp +++ b/libutils/String8.cpp @@ -346,8 +346,14 @@ status_t String8::appendFormatV(const char* fmt, va_list args) n = vsnprintf(NULL, 0, fmt, tmp_args); va_end(tmp_args); - if (n != 0) { + if (n < 0) return UNKNOWN_ERROR; + + if (n > 0) { size_t oldLength = length(); + if ((size_t)n > SIZE_MAX - 1 || + oldLength > SIZE_MAX - (size_t)n - 1) { + return NO_MEMORY; + } char* buf = lockBuffer(oldLength + n); if (buf) { vsnprintf(buf + oldLength, n + 1, fmt, args);