From b504fbd39fbd7dacfbb8b90994560adbc3986c53 Mon Sep 17 00:00:00 2001 From: Alex Buynytskyy Date: Fri, 27 Mar 2020 14:34:21 -0700 Subject: [PATCH 01/14] [incfs] Stream the hash tree for incremental installation Instead of sending the whole tree upfront use the same streaming interface for it as for the data blocks This improves installation speed by almost 200ms, 650-800ms -> 500-600ms Bug: 153696423 Test: manual, adb install in various configurations Change-Id: I32ad33c72b70067dc8703de02d2fbe26fda832d4 Merged-In: Ia83de2af54ca0b1969397514ea5d761719af9055 --- adb/client/incremental.cpp | 80 +-------- adb/client/incremental_server.cpp | 264 ++++++++++++++++++++++-------- adb/client/incremental_utils.cpp | 118 +++++++++++-- adb/client/incremental_utils.h | 26 ++- 4 files changed, 326 insertions(+), 162 deletions(-) diff --git a/adb/client/incremental.cpp b/adb/client/incremental.cpp index e30f384b8..a8b0ab3d0 100644 --- a/adb/client/incremental.cpp +++ b/adb/client/incremental.cpp @@ -16,13 +16,13 @@ #include "incremental.h" -#include +#include "incremental_utils.h" + #include #include #include #include "adb_client.h" -#include "adb_io.h" #include "adb_utils.h" #include "commandline.h" #include "sysdeps.h" @@ -31,65 +31,8 @@ using namespace std::literals; namespace incremental { -namespace { - -static constexpr auto IDSIG = ".idsig"sv; - using android::base::StringPrintf; -using Size = int64_t; - -static inline int32_t read_int32(borrowed_fd fd) { - int32_t result; - return ReadFdExactly(fd, &result, sizeof(result)) ? result : -1; -} - -static inline void append_int(borrowed_fd fd, std::vector* bytes) { - int32_t le_val = read_int32(fd); - auto old_size = bytes->size(); - bytes->resize(old_size + sizeof(le_val)); - memcpy(bytes->data() + old_size, &le_val, sizeof(le_val)); -} - -static inline void append_bytes_with_size(borrowed_fd fd, std::vector* bytes) { - int32_t le_size = read_int32(fd); - if (le_size < 0) { - return; - } - int32_t size = int32_t(le32toh(le_size)); - auto old_size = bytes->size(); - bytes->resize(old_size + sizeof(le_size) + size); - memcpy(bytes->data() + old_size, &le_size, sizeof(le_size)); - ReadFdExactly(fd, bytes->data() + old_size + sizeof(le_size), size); -} - -static inline std::pair, int32_t> read_id_sig_headers(borrowed_fd fd) { - std::vector result; - append_int(fd, &result); // version - append_bytes_with_size(fd, &result); // hashingInfo - append_bytes_with_size(fd, &result); // signingInfo - auto le_tree_size = read_int32(fd); - auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree - return {std::move(result), tree_size}; -} - -static inline Size verity_tree_size_for_file(Size fileSize) { - constexpr int INCFS_DATA_FILE_BLOCK_SIZE = 4096; - constexpr int SHA256_DIGEST_SIZE = 32; - constexpr int digest_size = SHA256_DIGEST_SIZE; - constexpr int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size; - - Size total_tree_block_count = 0; - - auto block_count = 1 + (fileSize - 1) / INCFS_DATA_FILE_BLOCK_SIZE; - auto hash_block_count = block_count; - for (auto i = 0; hash_block_count > 1; i++) { - hash_block_count = (hash_block_count + hash_per_block - 1) / hash_per_block; - total_tree_block_count += hash_block_count; - } - return total_tree_block_count * INCFS_DATA_FILE_BLOCK_SIZE; -} - // Read, verify and return the signature bytes. Keeping fd at the position of start of verity tree. static std::pair> read_signature(Size file_size, std::string signature_file, @@ -104,7 +47,7 @@ static std::pair> read_signature(Size file_size, return {}; } - unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY | O_CLOEXEC)); + unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY)); if (fd < 0) { if (!silent) { fprintf(stderr, "Failed to open signature file: %s. Abort.\n", signature_file.c_str()); @@ -170,9 +113,8 @@ static unique_fd start_install(const Files& files, const Args& passthrough_args, return {}; } - auto file_desc = - StringPrintf("%s:%lld:%s:%s", android::base::Basename(file).c_str(), - (long long)st.st_size, std::to_string(i).c_str(), signature.c_str()); + auto file_desc = StringPrintf("%s:%lld:%d:%s:1", android::base::Basename(file).c_str(), + (long long)st.st_size, i, signature.c_str()); command_args.push_back(std::move(file_desc)); } @@ -186,21 +128,9 @@ static unique_fd start_install(const Files& files, const Args& passthrough_args, return {}; } - // Pushing verity trees for all installation files. - for (auto&& local_fd : signature_fds) { - if (!copy_to_file(local_fd.get(), connection_fd.get())) { - if (!silent) { - fprintf(stderr, "Failed to stream tree bytes: %s. Abort.\n", strerror(errno)); - } - return {}; - } - } - return connection_fd; } -} // namespace - bool can_install(const Files& files) { for (const auto& file : files) { struct stat st; diff --git a/adb/client/incremental_server.cpp b/adb/client/incremental_server.cpp index 4a131ce20..bfe18c0b2 100644 --- a/adb/client/incremental_server.cpp +++ b/adb/client/incremental_server.cpp @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,9 +44,10 @@ namespace incremental { -static constexpr int kBlockSize = 4096; +static constexpr int kHashesPerBlock = kBlockSize / kDigestSize; static constexpr int kCompressedSizeMax = kBlockSize * 0.95; static constexpr int8_t kTypeData = 0; +static constexpr int8_t kTypeHash = 1; static constexpr int8_t kCompressionNone = 0; static constexpr int8_t kCompressionLZ4 = 1; static constexpr int kCompressBound = std::max(kBlockSize, LZ4_COMPRESSBOUND(kBlockSize)); @@ -132,41 +133,64 @@ struct ResponseHeader { CompressionType compression_type; // 1 byte BlockIdx block_idx; // 4 bytes BlockSize block_size; // 2 bytes + + static constexpr size_t responseSizeFor(size_t dataSize) { + return dataSize + sizeof(ResponseHeader); + } +} __attribute__((packed)); + +template +struct BlockBuffer { + ResponseHeader header; + char data[Size]; } __attribute__((packed)); // Holds streaming state for a file class File { public: // Plain file - File(const char* filepath, FileId id, int64_t size, unique_fd fd) : File(filepath, id, size) { + File(const char* filepath, FileId id, int64_t size, unique_fd fd, int64_t tree_offset, + unique_fd tree_fd) + : File(filepath, id, size, tree_offset) { this->fd_ = std::move(fd); + this->tree_fd_ = std::move(tree_fd); priority_blocks_ = PriorityBlocksForFile(filepath, fd_.get(), size); } - int64_t ReadBlock(BlockIdx block_idx, void* buf, bool* is_zip_compressed, - std::string* error) const { - char* buf_ptr = static_cast(buf); + int64_t ReadDataBlock(BlockIdx block_idx, void* buf, bool* is_zip_compressed) const { int64_t bytes_read = -1; const off64_t offsetStart = blockIndexToOffset(block_idx); - bytes_read = adb_pread(fd_, &buf_ptr[sizeof(ResponseHeader)], kBlockSize, offsetStart); + bytes_read = adb_pread(fd_, buf, kBlockSize, offsetStart); + return bytes_read; + } + int64_t ReadTreeBlock(BlockIdx block_idx, void* buf) const { + int64_t bytes_read = -1; + const off64_t offsetStart = tree_offset_ + blockIndexToOffset(block_idx); + bytes_read = adb_pread(tree_fd_, buf, kBlockSize, offsetStart); return bytes_read; } - const unique_fd& RawFd() const { return fd_; } const std::vector& PriorityBlocks() const { return priority_blocks_; } std::vector sentBlocks; NumBlocks sentBlocksCount = 0; + std::vector sentTreeBlocks; + const char* const filepath; const FileId id; const int64_t size; private: - File(const char* filepath, FileId id, int64_t size) : filepath(filepath), id(id), size(size) { + File(const char* filepath, FileId id, int64_t size, int64_t tree_offset) + : filepath(filepath), id(id), size(size), tree_offset_(tree_offset) { sentBlocks.resize(numBytesToNumBlocks(size)); + sentTreeBlocks.resize(verity_tree_blocks_for_file(size)); } unique_fd fd_; std::vector priority_blocks_; + + unique_fd tree_fd_; + const int64_t tree_offset_; }; class IncrementalServer { @@ -174,6 +198,8 @@ class IncrementalServer { IncrementalServer(unique_fd adb_fd, unique_fd output_fd, std::vector files) : adb_fd_(std::move(adb_fd)), output_fd_(std::move(output_fd)), files_(std::move(files)) { buffer_.reserve(kReadBufferSize); + pendingBlocksBuffer_.resize(kChunkFlushSize + 2 * kBlockSize); + pendingBlocks_ = pendingBlocksBuffer_.data() + sizeof(ChunkHeader); } bool Serve(); @@ -208,7 +234,11 @@ class IncrementalServer { void erase_buffer_head(int count) { buffer_.erase(buffer_.begin(), buffer_.begin() + count); } enum class SendResult { Sent, Skipped, Error }; - SendResult SendBlock(FileId fileId, BlockIdx blockIdx, bool flush = false); + SendResult SendDataBlock(FileId fileId, BlockIdx blockIdx, bool flush = false); + + bool SendTreeBlock(FileId fileId, int32_t fileBlockIdx, BlockIdx blockIdx); + bool SendTreeBlocksForDataBlock(FileId fileId, BlockIdx blockIdx); + bool SendDone(); void RunPrefetching(); @@ -228,7 +258,10 @@ class IncrementalServer { int compressed_ = 0, uncompressed_ = 0; long long sentSize_ = 0; - std::vector pendingBlocks_; + static constexpr auto kChunkFlushSize = 31 * kBlockSize; + + std::vector pendingBlocksBuffer_; + char* pendingBlocks_ = nullptr; // True when client notifies that all the data has been received bool servingComplete_ = false; @@ -250,7 +283,7 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking) if (bcur > 0) { // output the rest. - WriteFdExactly(output_fd_, buffer_.data(), bcur); + (void)WriteFdExactly(output_fd_, buffer_.data(), bcur); erase_buffer_head(bcur); } @@ -265,9 +298,10 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking) auto res = adb_poll(&pfd, 1, blocking ? kPollTimeoutMillis : 0); if (res != 1) { - WriteFdExactly(output_fd_, buffer_.data(), buffer_.size()); + auto err = errno; + (void)WriteFdExactly(output_fd_, buffer_.data(), buffer_.size()); if (res < 0) { - D("Failed to poll: %s\n", strerror(errno)); + D("Failed to poll: %s", strerror(err)); return false; } if (blocking) { @@ -289,7 +323,7 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking) continue; } - D("Failed to read from fd %d: %d. Exit\n", adb_fd_.get(), errno); + D("Failed to read from fd %d: %d. Exit", adb_fd_.get(), errno); break; } // socket is closed. print remaining messages @@ -313,56 +347,113 @@ std::optional IncrementalServer::ReadRequest(bool blocking) { return request; } -auto IncrementalServer::SendBlock(FileId fileId, BlockIdx blockIdx, bool flush) -> SendResult { +bool IncrementalServer::SendTreeBlocksForDataBlock(const FileId fileId, const BlockIdx blockIdx) { + auto& file = files_[fileId]; + const int32_t data_block_count = numBytesToNumBlocks(file.size); + + const int32_t total_nodes_count(file.sentTreeBlocks.size()); + const int32_t leaf_nodes_count = (data_block_count + kHashesPerBlock - 1) / kHashesPerBlock; + + const int32_t leaf_nodes_offset = total_nodes_count - leaf_nodes_count; + + // Leaf level, sending only 1 block. + const int32_t leaf_idx = leaf_nodes_offset + blockIdx / kHashesPerBlock; + if (file.sentTreeBlocks[leaf_idx]) { + return true; + } + if (!SendTreeBlock(fileId, blockIdx, leaf_idx)) { + return false; + } + file.sentTreeBlocks[leaf_idx] = true; + + // Non-leaf, sending EVERYTHING. This should be done only once. + if (leaf_nodes_offset == 0 || file.sentTreeBlocks[0]) { + return true; + } + + for (int32_t i = 0; i < leaf_nodes_offset; ++i) { + if (!SendTreeBlock(fileId, blockIdx, i)) { + return false; + } + file.sentTreeBlocks[i] = true; + } + return true; +} + +bool IncrementalServer::SendTreeBlock(FileId fileId, int32_t fileBlockIdx, BlockIdx blockIdx) { + const auto& file = files_[fileId]; + + BlockBuffer buffer; + const int64_t bytesRead = file.ReadTreeBlock(blockIdx, buffer.data); + if (bytesRead <= 0) { + fprintf(stderr, "Failed to get data for %s.idsig at blockIdx=%d.\n", file.filepath, + blockIdx); + return false; + } + + buffer.header.compression_type = kCompressionNone; + buffer.header.block_type = kTypeHash; + buffer.header.file_id = toBigEndian(fileId); + buffer.header.block_size = toBigEndian(int16_t(bytesRead)); + buffer.header.block_idx = toBigEndian(blockIdx); + + Send(&buffer, ResponseHeader::responseSizeFor(bytesRead), /*flush=*/false); + + return true; +} + +auto IncrementalServer::SendDataBlock(FileId fileId, BlockIdx blockIdx, bool flush) -> SendResult { auto& file = files_[fileId]; if (blockIdx >= static_cast(file.sentBlocks.size())) { - fprintf(stderr, "Failed to read file %s at block %" PRId32 " (past end).\n", file.filepath, - blockIdx); - return SendResult::Error; + // may happen as we schedule some extra blocks for reported page misses + D("Skipped reading file %s at block %" PRId32 " (past end).", file.filepath, blockIdx); + return SendResult::Skipped; } if (file.sentBlocks[blockIdx]) { return SendResult::Skipped; } - std::string error; - char raw[sizeof(ResponseHeader) + kBlockSize]; - bool isZipCompressed = false; - const int64_t bytesRead = file.ReadBlock(blockIdx, &raw, &isZipCompressed, &error); - if (bytesRead < 0) { - fprintf(stderr, "Failed to get data for %s at blockIdx=%d (%s).\n", file.filepath, blockIdx, - error.c_str()); + + if (!SendTreeBlocksForDataBlock(fileId, blockIdx)) { return SendResult::Error; } - ResponseHeader* header = nullptr; - char data[sizeof(ResponseHeader) + kCompressBound]; - char* compressed = data + sizeof(*header); + BlockBuffer raw; + bool isZipCompressed = false; + const int64_t bytesRead = file.ReadDataBlock(blockIdx, raw.data, &isZipCompressed); + if (bytesRead < 0) { + fprintf(stderr, "Failed to get data for %s at blockIdx=%d (%d).\n", file.filepath, blockIdx, + errno); + return SendResult::Error; + } + + BlockBuffer compressed; int16_t compressedSize = 0; if (!isZipCompressed) { - compressedSize = - LZ4_compress_default(raw + sizeof(*header), compressed, bytesRead, kCompressBound); + compressedSize = LZ4_compress_default(raw.data, compressed.data, bytesRead, kCompressBound); } int16_t blockSize; + ResponseHeader* header; if (compressedSize > 0 && compressedSize < kCompressedSizeMax) { ++compressed_; blockSize = compressedSize; - header = reinterpret_cast(data); + header = &compressed.header; header->compression_type = kCompressionLZ4; } else { ++uncompressed_; blockSize = bytesRead; - header = reinterpret_cast(raw); + header = &raw.header; header->compression_type = kCompressionNone; } header->block_type = kTypeData; - header->file_id = toBigEndian(fileId); header->block_size = toBigEndian(blockSize); header->block_idx = toBigEndian(blockIdx); file.sentBlocks[blockIdx] = true; file.sentBlocksCount += 1; - Send(header, sizeof(*header) + blockSize, flush); + Send(header, ResponseHeader::responseSizeFor(blockSize), flush); + return SendResult::Sent; } @@ -388,7 +479,8 @@ void IncrementalServer::RunPrefetching() { if (!priority_blocks.empty()) { for (auto& i = prefetch.priorityIndex; blocksToSend > 0 && i < (BlockIdx)priority_blocks.size(); ++i) { - if (auto res = SendBlock(file.id, priority_blocks[i]); res == SendResult::Sent) { + if (auto res = SendDataBlock(file.id, priority_blocks[i]); + res == SendResult::Sent) { --blocksToSend; } else if (res == SendResult::Error) { fprintf(stderr, "Failed to send priority block %" PRId32 "\n", i); @@ -396,7 +488,7 @@ void IncrementalServer::RunPrefetching() { } } for (auto& i = prefetch.overallIndex; blocksToSend > 0 && i < prefetch.overallEnd; ++i) { - if (auto res = SendBlock(file.id, i); res == SendResult::Sent) { + if (auto res = SendDataBlock(file.id, i); res == SendResult::Sent) { --blocksToSend; } else if (res == SendResult::Error) { fprintf(stderr, "Failed to send block %" PRId32 "\n", i); @@ -409,30 +501,25 @@ void IncrementalServer::RunPrefetching() { } void IncrementalServer::Send(const void* data, size_t size, bool flush) { - constexpr auto kChunkFlushSize = 31 * kBlockSize; - - if (pendingBlocks_.empty()) { - pendingBlocks_.resize(sizeof(ChunkHeader)); - } - pendingBlocks_.insert(pendingBlocks_.end(), static_cast(data), - static_cast(data) + size); - if (flush || pendingBlocks_.size() > kChunkFlushSize) { + pendingBlocks_ = std::copy_n(static_cast(data), size, pendingBlocks_); + if (flush || pendingBlocks_ - pendingBlocksBuffer_.data() > kChunkFlushSize) { Flush(); } } void IncrementalServer::Flush() { - if (pendingBlocks_.empty()) { + auto dataBytes = pendingBlocks_ - (pendingBlocksBuffer_.data() + sizeof(ChunkHeader)); + if (dataBytes == 0) { return; } - *(ChunkHeader*)pendingBlocks_.data() = - toBigEndian(pendingBlocks_.size() - sizeof(ChunkHeader)); - if (!WriteFdExactly(adb_fd_, pendingBlocks_.data(), pendingBlocks_.size())) { - fprintf(stderr, "Failed to write %d bytes\n", int(pendingBlocks_.size())); + *(ChunkHeader*)pendingBlocksBuffer_.data() = toBigEndian(dataBytes); + auto totalBytes = sizeof(ChunkHeader) + dataBytes; + if (!WriteFdExactly(adb_fd_, pendingBlocksBuffer_.data(), totalBytes)) { + fprintf(stderr, "Failed to write %d bytes\n", int(totalBytes)); } - sentSize_ += pendingBlocks_.size(); - pendingBlocks_.clear(); + sentSize_ += totalBytes; + pendingBlocks_ = pendingBlocksBuffer_.data() + sizeof(ChunkHeader); } bool IncrementalServer::ServingComplete(std::optional startTime, int missesCount, @@ -443,7 +530,7 @@ bool IncrementalServer::ServingComplete(std::optional startTime, int D("Streaming completed.\n" "Misses: %d, of those unique: %d; sent compressed: %d, uncompressed: " "%d, mb: %.3f\n" - "Total time taken: %.3fms\n", + "Total time taken: %.3fms", missesCount, missesSent, compressed_, uncompressed_, sentSize_ / 1024.0 / 1024.0, duration_cast(endTime - (startTime ? *startTime : endTime)).count() / 1000.0); return true; @@ -510,9 +597,21 @@ bool IncrementalServer::Serve() { fileId, blockIdx); break; } - // fprintf(stderr, "\treading file %d block %04d\n", (int)fileId, - // (int)blockIdx); - if (auto res = SendBlock(fileId, blockIdx, true); res == SendResult::Error) { + + if (VLOG_IS_ON(INCREMENTAL)) { + auto& file = files_[fileId]; + auto posP = std::find(file.PriorityBlocks().begin(), + file.PriorityBlocks().end(), blockIdx); + D("\tMISSING BLOCK: reading file %d block %04d (in priority: %d of %d)", + (int)fileId, (int)blockIdx, + posP == file.PriorityBlocks().end() + ? -1 + : int(posP - file.PriorityBlocks().begin()), + int(file.PriorityBlocks().size())); + } + + if (auto res = SendDataBlock(fileId, blockIdx, true); + res == SendResult::Error) { fprintf(stderr, "Failed to send block %" PRId32 ".\n", blockIdx); } else if (res == SendResult::Sent) { ++missesSent; @@ -536,7 +635,7 @@ bool IncrementalServer::Serve() { fileId); break; } - D("Received prefetch request for file_id %" PRId16 ".\n", fileId); + D("Received prefetch request for file_id %" PRId16 ".", fileId); prefetches_.emplace_back(files_[fileId]); break; } @@ -551,6 +650,43 @@ bool IncrementalServer::Serve() { } } +static std::pair open_fd(const char* filepath) { + struct stat st; + if (stat(filepath, &st)) { + error_exit("inc-server: failed to stat input file '%s'.", filepath); + } + + unique_fd fd(adb_open(filepath, O_RDONLY)); + if (fd < 0) { + error_exit("inc-server: failed to open file '%s'.", filepath); + } + + return {std::move(fd), st.st_size}; +} + +static std::pair open_signature(int64_t file_size, const char* filepath) { + std::string signature_file(filepath); + signature_file += IDSIG; + + unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY)); + if (fd < 0) { + error_exit("inc-server: failed to open file '%s'.", signature_file.c_str()); + } + + auto [tree_offset, tree_size] = skip_id_sig_headers(fd); + if (auto expected = verity_tree_size_for_file(file_size); tree_size != expected) { + error_exit("Verity tree size mismatch in signature file: %s [was %lld, expected %lld].\n", + signature_file.c_str(), (long long)tree_size, (long long)expected); + } + + int32_t data_block_count = numBytesToNumBlocks(file_size); + int32_t leaf_nodes_count = (data_block_count + kHashesPerBlock - 1) / kHashesPerBlock; + D("Verity tree loaded: %s, tree size: %d (%d blocks, %d leafs)", signature_file.c_str(), + int(tree_size), int(numBytesToNumBlocks(tree_size)), int(leaf_nodes_count)); + + return {std::move(fd), tree_offset}; +} + bool serve(int connection_fd, int output_fd, int argc, const char** argv) { auto connection_ufd = unique_fd(connection_fd); auto output_ufd = unique_fd(output_fd); @@ -563,17 +699,11 @@ bool serve(int connection_fd, int output_fd, int argc, const char** argv) { for (int i = 0; i < argc; ++i) { auto filepath = argv[i]; - struct stat st; - if (stat(filepath, &st)) { - fprintf(stderr, "Failed to stat input file %s. Abort.\n", filepath); - return {}; - } + auto [file_fd, file_size] = open_fd(filepath); + auto [sign_fd, sign_offset] = open_signature(file_size, filepath); - unique_fd fd(adb_open(filepath, O_RDONLY)); - if (fd < 0) { - error_exit("inc-server: failed to open file '%s'.", filepath); - } - files.emplace_back(filepath, i, st.st_size, std::move(fd)); + files.emplace_back(filepath, i, file_size, std::move(file_fd), sign_offset, + std::move(sign_fd)); } IncrementalServer server(std::move(connection_ufd), std::move(output_ufd), std::move(files)); diff --git a/adb/client/incremental_utils.cpp b/adb/client/incremental_utils.cpp index fa501e49d..dd117d228 100644 --- a/adb/client/incremental_utils.cpp +++ b/adb/client/incremental_utils.cpp @@ -18,6 +18,7 @@ #include "incremental_utils.h" +#include #include #include #include @@ -28,19 +29,98 @@ #include #include +#include "adb_io.h" #include "adb_trace.h" #include "sysdeps.h" using namespace std::literals; -static constexpr int kBlockSize = 4096; +namespace incremental { static constexpr inline int32_t offsetToBlockIndex(int64_t offset) { return (offset & ~(kBlockSize - 1)) >> 12; } +Size verity_tree_blocks_for_file(Size fileSize) { + if (fileSize == 0) { + return 0; + } + + constexpr int hash_per_block = kBlockSize / kDigestSize; + + Size total_tree_block_count = 0; + + auto block_count = 1 + (fileSize - 1) / kBlockSize; + auto hash_block_count = block_count; + for (auto i = 0; hash_block_count > 1; i++) { + hash_block_count = (hash_block_count + hash_per_block - 1) / hash_per_block; + total_tree_block_count += hash_block_count; + } + return total_tree_block_count; +} + +Size verity_tree_size_for_file(Size fileSize) { + return verity_tree_blocks_for_file(fileSize) * kBlockSize; +} + +static inline int32_t read_int32(borrowed_fd fd) { + int32_t result; + return ReadFdExactly(fd, &result, sizeof(result)) ? result : -1; +} + +static inline int32_t skip_int(borrowed_fd fd) { + return adb_lseek(fd, 4, SEEK_CUR); +} + +static inline void append_int(borrowed_fd fd, std::vector* bytes) { + int32_t le_val = read_int32(fd); + auto old_size = bytes->size(); + bytes->resize(old_size + sizeof(le_val)); + memcpy(bytes->data() + old_size, &le_val, sizeof(le_val)); +} + +static inline void append_bytes_with_size(borrowed_fd fd, std::vector* bytes) { + int32_t le_size = read_int32(fd); + if (le_size < 0) { + return; + } + int32_t size = int32_t(le32toh(le_size)); + auto old_size = bytes->size(); + bytes->resize(old_size + sizeof(le_size) + size); + memcpy(bytes->data() + old_size, &le_size, sizeof(le_size)); + ReadFdExactly(fd, bytes->data() + old_size + sizeof(le_size), size); +} + +static inline int32_t skip_bytes_with_size(borrowed_fd fd) { + int32_t le_size = read_int32(fd); + if (le_size < 0) { + return -1; + } + int32_t size = int32_t(le32toh(le_size)); + return (int32_t)adb_lseek(fd, size, SEEK_CUR); +} + +std::pair, int32_t> read_id_sig_headers(borrowed_fd fd) { + std::vector result; + append_int(fd, &result); // version + append_bytes_with_size(fd, &result); // hashingInfo + append_bytes_with_size(fd, &result); // signingInfo + auto le_tree_size = read_int32(fd); + auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree + return {std::move(result), tree_size}; +} + +std::pair skip_id_sig_headers(borrowed_fd fd) { + skip_int(fd); // version + skip_bytes_with_size(fd); // hashingInfo + auto offset = skip_bytes_with_size(fd); // signingInfo + auto le_tree_size = read_int32(fd); + auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree + return {offset + sizeof(le_tree_size), tree_size}; +} + template -T valueAt(int fd, off64_t offset) { +static T valueAt(borrowed_fd fd, off64_t offset) { T t; memset(&t, 0, sizeof(T)); if (adb_pread(fd, &t, sizeof(T), offset) != sizeof(T)) { @@ -68,7 +148,7 @@ static void unduplicate(std::vector& v) { v.end()); } -static off64_t CentralDirOffset(int fd, int64_t fileSize) { +static off64_t CentralDirOffset(borrowed_fd fd, Size fileSize) { static constexpr int kZipEocdRecMinSize = 22; static constexpr int32_t kZipEocdRecSig = 0x06054b50; static constexpr int kZipEocdCentralDirSizeFieldOffset = 12; @@ -103,7 +183,7 @@ static off64_t CentralDirOffset(int fd, int64_t fileSize) { } // Does not support APKs larger than 4GB -static off64_t SignerBlockOffset(int fd, int64_t fileSize) { +static off64_t SignerBlockOffset(borrowed_fd fd, Size fileSize) { static constexpr int kApkSigBlockMinSize = 32; static constexpr int kApkSigBlockFooterSize = 24; static constexpr int64_t APK_SIG_BLOCK_MAGIC_HI = 0x3234206b636f6c42l; @@ -132,7 +212,7 @@ static off64_t SignerBlockOffset(int fd, int64_t fileSize) { return signerBlockOffset; } -static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t fileSize) { +static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, Size fileSize) { int32_t signerBlockIndex = offsetToBlockIndex(signerBlockOffset); int32_t lastBlockIndex = offsetToBlockIndex(fileSize); const auto numPriorityBlocks = lastBlockIndex - signerBlockIndex + 1; @@ -160,7 +240,7 @@ static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t return zipPriorityBlocks; } -[[maybe_unused]] static ZipArchiveHandle openZipArchiveFd(int fd) { +[[maybe_unused]] static ZipArchiveHandle openZipArchiveFd(borrowed_fd fd) { bool transferFdOwnership = false; #ifdef _WIN32 // @@ -179,20 +259,22 @@ static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t D("%s failed at DuplicateHandle: %d", __func__, (int)::GetLastError()); return {}; } - fd = _open_osfhandle((intptr_t)dupedHandle, _O_RDONLY | _O_BINARY); - if (fd < 0) { + int osfd = _open_osfhandle((intptr_t)dupedHandle, _O_RDONLY | _O_BINARY); + if (osfd < 0) { D("%s failed at _open_osfhandle: %d", __func__, errno); ::CloseHandle(handle); return {}; } transferFdOwnership = true; +#else + int osfd = fd.get(); #endif ZipArchiveHandle zip; - if (OpenArchiveFd(fd, "apk_fd", &zip, transferFdOwnership) != 0) { + if (OpenArchiveFd(osfd, "apk_fd", &zip, transferFdOwnership) != 0) { D("%s failed at OpenArchiveFd: %d", __func__, errno); #ifdef _WIN32 // "_close()" is a secret WinCRT name for the regular close() function. - _close(fd); + _close(osfd); #endif return {}; } @@ -200,7 +282,7 @@ static std::vector ZipPriorityBlocks(off64_t signerBlockOffset, int64_t } static std::pair> openZipArchive( - int fd, int64_t fileSize) { + borrowed_fd fd, Size fileSize) { #ifndef __LP64__ if (fileSize >= INT_MAX) { return {openZipArchiveFd(fd), nullptr}; @@ -220,7 +302,7 @@ static std::pair> o return {zip, std::move(mapping)}; } -static std::vector InstallationPriorityBlocks(int fd, int64_t fileSize) { +static std::vector InstallationPriorityBlocks(borrowed_fd fd, Size fileSize) { static constexpr std::array additional_matches = { "resources.arsc"sv, "AndroidManifest.xml"sv, "classes.dex"sv}; auto [zip, _] = openZipArchive(fd, fileSize); @@ -249,8 +331,9 @@ static std::vector InstallationPriorityBlocks(int fd, int64_t fileSize) if (entryName == "classes.dex"sv) { // Only the head is needed for installation int32_t startBlockIndex = offsetToBlockIndex(entry.offset); - appendBlocks(startBlockIndex, 1, &installationPriorityBlocks); - D("\tadding to priority blocks: '%.*s' 1", (int)entryName.size(), entryName.data()); + appendBlocks(startBlockIndex, 2, &installationPriorityBlocks); + D("\tadding to priority blocks: '%.*s' (%d)", (int)entryName.size(), entryName.data(), + 2); } else { // Full entries are needed for installation off64_t entryStartOffset = entry.offset; @@ -273,9 +356,9 @@ static std::vector InstallationPriorityBlocks(int fd, int64_t fileSize) return installationPriorityBlocks; } -namespace incremental { -std::vector PriorityBlocksForFile(const std::string& filepath, int fd, int64_t fileSize) { - if (!android::base::EndsWithIgnoreCase(filepath, ".apk")) { +std::vector PriorityBlocksForFile(const std::string& filepath, borrowed_fd fd, + Size fileSize) { + if (!android::base::EndsWithIgnoreCase(filepath, ".apk"sv)) { return {}; } off64_t signerOffset = SignerBlockOffset(fd, fileSize); @@ -291,4 +374,5 @@ std::vector PriorityBlocksForFile(const std::string& filepath, int fd, unduplicate(priorityBlocks); return priorityBlocks; } + } // namespace incremental diff --git a/adb/client/incremental_utils.h b/adb/client/incremental_utils.h index 8bcf6c081..d969d9412 100644 --- a/adb/client/incremental_utils.h +++ b/adb/client/incremental_utils.h @@ -16,11 +16,31 @@ #pragma once -#include +#include "adb_unique_fd.h" #include +#include +#include #include +#include + namespace incremental { -std::vector PriorityBlocksForFile(const std::string& filepath, int fd, int64_t fileSize); -} // namespace incremental \ No newline at end of file + +using Size = int64_t; +constexpr int kBlockSize = 4096; +constexpr int kSha256DigestSize = 32; +constexpr int kDigestSize = kSha256DigestSize; + +constexpr std::string_view IDSIG = ".idsig"; + +std::vector PriorityBlocksForFile(const std::string& filepath, borrowed_fd fd, + Size fileSize); + +Size verity_tree_blocks_for_file(Size fileSize); +Size verity_tree_size_for_file(Size fileSize); + +std::pair, int32_t> read_id_sig_headers(borrowed_fd fd); +std::pair skip_id_sig_headers(borrowed_fd fd); + +} // namespace incremental From eb568b55826b17ceaeaa242f74f99a98c701d1b8 Mon Sep 17 00:00:00 2001 From: "P.Adarsh Reddy" Date: Tue, 21 Jul 2020 03:09:28 +0530 Subject: [PATCH 02/14] Ueventd: Fix a corner case in ReadUevent() that triggers duplicate firmware loading. Presently, within ReadUevent(), true is returned for a successful case as well as for the case where we read an invalid uevent (overflowed buffer)where the Uevent object is not cleared, and the caller calls the callback (with the earlier stored uevent object),leading to duplicate firmware loading. Uevent uevent; while (ReadUevent(&uevent)) { if (callback(uevent) == ListenerAction::kStop) return; } Scenario: 1. Proper Uevent received and callback is called (firmware loading is triggered). 2. Overflowed uevent is received as part of the same ReadUevent session, ReadUevent() returns true, but the uevent object is not cleared and still has earlier event values. 3. Callback is called again, leading to duplicate firmware load. Handle it by adding explicit return codes to let the caller know if the uevent read is invalid, and the caller can ignore it and read further pending uevents. Bug: 161580785 Test: in AOSP Merged-In: I09e80052337fd1495b968dc02ecff5ceb683da18 Change-Id: I09e80052337fd1495b968dc02ecff5ceb683da18 (cherry picked from commit a86bf05a012e871c41b4676b0cc947b1baea194b) --- init/uevent_listener.cpp | 20 ++++++++++++-------- init/uevent_listener.h | 8 +++++++- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/init/uevent_listener.cpp b/init/uevent_listener.cpp index d8d9b36ff..7cd396a8b 100644 --- a/init/uevent_listener.cpp +++ b/init/uevent_listener.cpp @@ -95,20 +95,18 @@ UeventListener::UeventListener(size_t uevent_socket_rcvbuf_size) { fcntl(device_fd_, F_SETFL, O_NONBLOCK); } -bool UeventListener::ReadUevent(Uevent* uevent) const { +ReadUeventResult UeventListener::ReadUevent(Uevent* uevent) const { char msg[UEVENT_MSG_LEN + 2]; int n = uevent_kernel_multicast_recv(device_fd_, msg, UEVENT_MSG_LEN); if (n <= 0) { if (errno != EAGAIN && errno != EWOULDBLOCK) { PLOG(ERROR) << "Error reading from Uevent Fd"; } - return false; + return ReadUeventResult::kFailed; } if (n >= UEVENT_MSG_LEN) { LOG(ERROR) << "Uevent overflowed buffer, discarding"; - // Return true here even if we discard as we may have more uevents pending and we - // want to keep processing them. - return true; + return ReadUeventResult::kInvalid; } msg[n] = '\0'; @@ -116,7 +114,7 @@ bool UeventListener::ReadUevent(Uevent* uevent) const { ParseEvent(msg, uevent); - return true; + return ReadUeventResult::kSuccess; } // RegenerateUevents*() walks parts of the /sys tree and pokes the uevent files to cause the kernel @@ -137,7 +135,10 @@ ListenerAction UeventListener::RegenerateUeventsForDir(DIR* d, close(fd); Uevent uevent; - while (ReadUevent(&uevent)) { + ReadUeventResult result; + while ((result = ReadUevent(&uevent)) != ReadUeventResult::kFailed) { + // Skip processing the uevent if it is invalid. + if (result == ReadUeventResult::kInvalid) continue; if (callback(uevent) == ListenerAction::kStop) return ListenerAction::kStop; } } @@ -212,7 +213,10 @@ void UeventListener::Poll(const ListenerCallback& callback, // We're non-blocking, so if we receive a poll event keep processing until // we have exhausted all uevent messages. Uevent uevent; - while (ReadUevent(&uevent)) { + ReadUeventResult result; + while ((result = ReadUevent(&uevent)) != ReadUeventResult::kFailed) { + // Skip processing the uevent if it is invalid. + if (result == ReadUeventResult::kInvalid) continue; if (callback(uevent) == ListenerAction::kStop) return; } } diff --git a/init/uevent_listener.h b/init/uevent_listener.h index aea094e77..d308fa75f 100644 --- a/init/uevent_listener.h +++ b/init/uevent_listener.h @@ -37,6 +37,12 @@ enum class ListenerAction { kContinue, // Continue regenerating uevents as we haven't seen the one(s) we're interested in. }; +enum class ReadUeventResult { + kSuccess = 0, // Uevent was successfully read. + kFailed, // Uevent reading has failed. + kInvalid, // An Invalid Uevent was read (like say, the msg received is >= UEVENT_MSG_LEN). +}; + using ListenerCallback = std::function; class UeventListener { @@ -50,7 +56,7 @@ class UeventListener { const std::optional relative_timeout = {}) const; private: - bool ReadUevent(Uevent* uevent) const; + ReadUeventResult ReadUevent(Uevent* uevent) const; ListenerAction RegenerateUeventsForDir(DIR* d, const ListenerCallback& callback) const; android::base::unique_fd device_fd_; From fd9e6e1472c4c2aec4622e163e6f6d1d82c0d7b6 Mon Sep 17 00:00:00 2001 From: Tom Cherry Date: Mon, 20 Jul 2020 13:18:01 -0700 Subject: [PATCH 03/14] ueventd: bump UEVENT_MSG_LEN to 8192 The previous size, 2048, is only the size of the 'environment' for the uevent message, but doesn't include the @ portion. The portion has a max length < 10, but the portion is unbounded. 8192 should be plenty to capture all of these parameters. Bug: 161580785 Test: ueventd still works Merged-In: I6de6fd3a444ac91b3b4df154097abde3696e21b3 Change-Id: I6de6fd3a444ac91b3b4df154097abde3696e21b3 (cherry picked from commit 939b41c79b4a02b814e9aa677e85b3b5cacc83c8) --- init/uevent_listener.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/uevent_listener.h b/init/uevent_listener.h index aea094e77..f9f954aa0 100644 --- a/init/uevent_listener.h +++ b/init/uevent_listener.h @@ -27,7 +27,7 @@ #include "uevent.h" -#define UEVENT_MSG_LEN 2048 +#define UEVENT_MSG_LEN 8192 namespace android { namespace init { From 7a6e6204470e81c15a534cc0cac47939a1b3b1bc Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 4 Aug 2020 11:11:13 -0700 Subject: [PATCH 04/14] fs_mgr: Revert "Try to recover corrupted ext4 /data with backup superblock" This reverts commit 72abd7b246f733e5f417e2bc529c9b482c4a778b (change Ia39af3340c0e241f62557b7c2cc8b800443342f9). When vold enables either FDE or metadata encryption, it encrypts the filesystem in-place. Unfortunately, due to a bug, for ext4 filesystems it hasn't been encrypting the backup superblocks. Also, in read_ext4_superblock(), the check for StartsWith(blk_device, "/dev/block/dm-") can return true even if the encryption mapping hasn't been added yet, since when a GSI image is booted the userdata block device is a logical volume using dm-linear. The result is that read_ext4_superblock() can recognize a backup superblock when the encryption mapping hasn't been added yet, causing e2fsck to run without the encryption mapping and corrupt the filesystem. https://android-review.googlesource.com/c/platform/system/vold/+/1385029 will fix this for new or factory-reset devices. However, there probably are many existing devices that already have their backup superblocks unencrypted. Therefore, the EncryptInPlace fix isn't enough and we have to revert the change that started using the backup superblocks too. Bug: 161871210 Bug: 162479411 Change-Id: I279f84c072bc6c8d3e251a5e95c78f8d6c0d50ba Merged-In: I279f84c072bc6c8d3e251a5e95c78f8d6c0d50ba (cherry picked from dc3e897a881291c7803dd1fc517915952396dd98) --- fs_mgr/fs_mgr.cpp | 35 +++++------------------------------ 1 file changed, 5 insertions(+), 30 deletions(-) diff --git a/fs_mgr/fs_mgr.cpp b/fs_mgr/fs_mgr.cpp index 956147198..30db652ed 100644 --- a/fs_mgr/fs_mgr.cpp +++ b/fs_mgr/fs_mgr.cpp @@ -301,13 +301,10 @@ static bool is_ext4_superblock_valid(const struct ext4_super_block* es) { return true; } -static bool needs_block_encryption(const FstabEntry& entry); -static bool should_use_metadata_encryption(const FstabEntry& entry); - // Read the primary superblock from an ext4 filesystem. On failure return // false. If it's not an ext4 filesystem, also set FS_STAT_INVALID_MAGIC. -static bool read_ext4_superblock(const std::string& blk_device, const FstabEntry& entry, - struct ext4_super_block* sb, int* fs_stat) { +static bool read_ext4_superblock(const std::string& blk_device, struct ext4_super_block* sb, + int* fs_stat) { android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(blk_device.c_str(), O_RDONLY | O_CLOEXEC))); if (fd < 0) { @@ -324,29 +321,7 @@ static bool read_ext4_superblock(const std::string& blk_device, const FstabEntry LINFO << "Invalid ext4 superblock on '" << blk_device << "'"; // not a valid fs, tune2fs, fsck, and mount will all fail. *fs_stat |= FS_STAT_INVALID_MAGIC; - - bool encrypted = should_use_metadata_encryption(entry) || needs_block_encryption(entry); - if (entry.mount_point == "/data" && - (!encrypted || android::base::StartsWith(blk_device, "/dev/block/dm-"))) { - // try backup superblock, if main superblock is corrupted - for (unsigned int blocksize = EXT4_MIN_BLOCK_SIZE; blocksize <= EXT4_MAX_BLOCK_SIZE; - blocksize *= 2) { - uint64_t superblock = blocksize * 8; - if (blocksize == EXT4_MIN_BLOCK_SIZE) superblock++; - - if (TEMP_FAILURE_RETRY(pread(fd, sb, sizeof(*sb), superblock * blocksize)) != - sizeof(*sb)) { - PERROR << "Can't read '" << blk_device << "' superblock"; - return false; - } - if (is_ext4_superblock_valid(sb) && - (1 << (10 + sb->s_log_block_size) == blocksize)) { - *fs_stat &= ~FS_STAT_INVALID_MAGIC; - break; - } - } - } - if (*fs_stat & FS_STAT_INVALID_MAGIC) return false; + return false; } *fs_stat |= FS_STAT_IS_EXT4; LINFO << "superblock s_max_mnt_count:" << sb->s_max_mnt_count << "," << blk_device; @@ -688,7 +663,7 @@ static int prepare_fs_for_mount(const std::string& blk_device, const FstabEntry& if (is_extfs(entry.fs_type)) { struct ext4_super_block sb; - if (read_ext4_superblock(blk_device, entry, &sb, &fs_stat)) { + if (read_ext4_superblock(blk_device, &sb, &fs_stat)) { if ((sb.s_feature_incompat & EXT4_FEATURE_INCOMPAT_RECOVER) != 0 || (sb.s_state & EXT4_VALID_FS) == 0) { LINFO << "Filesystem on " << blk_device << " was not cleanly shutdown; " @@ -718,7 +693,7 @@ static int prepare_fs_for_mount(const std::string& blk_device, const FstabEntry& entry.fs_mgr_flags.fs_verity || entry.fs_mgr_flags.ext_meta_csum)) { struct ext4_super_block sb; - if (read_ext4_superblock(blk_device, entry, &sb, &fs_stat)) { + if (read_ext4_superblock(blk_device, &sb, &fs_stat)) { tune_reserved_size(blk_device, entry, &sb, &fs_stat); tune_encrypt(blk_device, entry, &sb, &fs_stat); tune_verity(blk_device, entry, &sb, &fs_stat); From e689b935d106b69f3dec1ccc115377a982b56a01 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Tue, 2 Jun 2020 15:21:13 -0700 Subject: [PATCH 05/14] libprocessgroup: support for cgroup v2 hierarchy for a first implementation the cgroup v2 freezer controller will be used in a way similar to cgroup v1, that is a single child group will hold all frozen processes. Some adjustments are needed for the new structure. - Add support for cgroup v2 syntax under procfs. - Separate creation of a directory with ownership/mode changes to allow changes after mounting the cgroup kernfs root. - Allow the creation of sub-groups under a cgroup v2 hierarchy. Bug: 154548692 Test: manually verified that a proper cgroup v2 hierarchy is created and accessible Change-Id: I9af59e8214acaead3f520a94c95e75394c0df948 Merged-In: I9af59e8214acaead3f520a94c95e75394c0df948 --- libprocessgroup/cgroup_map.cpp | 8 +- .../cgrouprc/include/android/cgrouprc.h | 1 + libprocessgroup/setup/cgroup_descriptor.h | 2 +- libprocessgroup/setup/cgroup_map_write.cpp | 232 ++++++++++++------ 4 files changed, 162 insertions(+), 81 deletions(-) diff --git a/libprocessgroup/cgroup_map.cpp b/libprocessgroup/cgroup_map.cpp index 2fc920b54..b82b0ab63 100644 --- a/libprocessgroup/cgroup_map.cpp +++ b/libprocessgroup/cgroup_map.cpp @@ -115,7 +115,13 @@ bool CgroupController::GetTaskGroup(int tid, std::string* group) const { return true; } - std::string cg_tag = StringPrintf(":%s:", name()); + std::string cg_tag; + + if (version() == 2) { + cg_tag = "0::"; + } else { + cg_tag = StringPrintf(":%s:", name()); + } size_t start_pos = content.find(cg_tag); if (start_pos == std::string::npos) { return false; diff --git a/libprocessgroup/cgrouprc/include/android/cgrouprc.h b/libprocessgroup/cgrouprc/include/android/cgrouprc.h index 0ce51237a..7e74432f7 100644 --- a/libprocessgroup/cgrouprc/include/android/cgrouprc.h +++ b/libprocessgroup/cgrouprc/include/android/cgrouprc.h @@ -69,6 +69,7 @@ __attribute__((warn_unused_result)) uint32_t ACgroupController_getVersion(const * Flag bitmask used in ACgroupController_getFlags */ #define CGROUPRC_CONTROLLER_FLAG_MOUNTED 0x1 +#define CGROUPRC_CONTROLLER_FLAG_NEEDS_ACTIVATION 0x2 #if __ANDROID_API__ >= __ANDROID_API_R__ diff --git a/libprocessgroup/setup/cgroup_descriptor.h b/libprocessgroup/setup/cgroup_descriptor.h index f029c4f12..699c03cec 100644 --- a/libprocessgroup/setup/cgroup_descriptor.h +++ b/libprocessgroup/setup/cgroup_descriptor.h @@ -25,7 +25,7 @@ namespace cgrouprc { class CgroupDescriptor { public: CgroupDescriptor(uint32_t version, const std::string& name, const std::string& path, - mode_t mode, const std::string& uid, const std::string& gid); + mode_t mode, const std::string& uid, const std::string& gid, uint32_t flags); const format::CgroupController* controller() const { return &controller_; } mode_t mode() const { return mode_; } diff --git a/libprocessgroup/setup/cgroup_map_write.cpp b/libprocessgroup/setup/cgroup_map_write.cpp index 17ea06e09..25f16a6e9 100644 --- a/libprocessgroup/setup/cgroup_map_write.cpp +++ b/libprocessgroup/setup/cgroup_map_write.cpp @@ -17,6 +17,7 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "libprocessgroup" +#include #include #include #include @@ -54,58 +55,53 @@ namespace cgrouprc { static constexpr const char* CGROUPS_DESC_FILE = "/etc/cgroups.json"; static constexpr const char* CGROUPS_DESC_VENDOR_FILE = "/vendor/etc/cgroups.json"; -static bool Mkdir(const std::string& path, mode_t mode, const std::string& uid, - const std::string& gid) { - if (mode == 0) { - mode = 0755; - } - - if (mkdir(path.c_str(), mode) != 0) { - /* chmod in case the directory already exists */ - if (errno == EEXIST) { - if (fchmodat(AT_FDCWD, path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0) { - // /acct is a special case when the directory already exists - // TODO: check if file mode is already what we want instead of using EROFS - if (errno != EROFS) { - PLOG(ERROR) << "fchmodat() failed for " << path; - return false; - } - } - } else { - PLOG(ERROR) << "mkdir() failed for " << path; - return false; - } - } - - if (uid.empty()) { - return true; - } - - passwd* uid_pwd = getpwnam(uid.c_str()); - if (!uid_pwd) { - PLOG(ERROR) << "Unable to decode UID for '" << uid << "'"; - return false; - } - - uid_t pw_uid = uid_pwd->pw_uid; +static bool ChangeDirModeAndOwner(const std::string& path, mode_t mode, const std::string& uid, + const std::string& gid, bool permissive_mode = false) { + uid_t pw_uid = -1; gid_t gr_gid = -1; - if (!gid.empty()) { - group* gid_pwd = getgrnam(gid.c_str()); - if (!gid_pwd) { - PLOG(ERROR) << "Unable to decode GID for '" << gid << "'"; + + if (!uid.empty()) { + passwd* uid_pwd = getpwnam(uid.c_str()); + if (!uid_pwd) { + PLOG(ERROR) << "Unable to decode UID for '" << uid << "'"; return false; } - gr_gid = gid_pwd->gr_gid; + + pw_uid = uid_pwd->pw_uid; + gr_gid = -1; + + if (!gid.empty()) { + group* gid_pwd = getgrnam(gid.c_str()); + if (!gid_pwd) { + PLOG(ERROR) << "Unable to decode GID for '" << gid << "'"; + return false; + } + gr_gid = gid_pwd->gr_gid; + } } - if (lchown(path.c_str(), pw_uid, gr_gid) < 0) { - PLOG(ERROR) << "lchown() failed for " << path; + auto dir = std::unique_ptr(opendir(path.c_str()), closedir); + + if (dir == NULL) { + PLOG(ERROR) << "opendir failed for " << path; return false; } - /* chown may have cleared S_ISUID and S_ISGID, chmod again */ - if (mode & (S_ISUID | S_ISGID)) { - if (fchmodat(AT_FDCWD, path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0) { + struct dirent* dir_entry; + while ((dir_entry = readdir(dir.get()))) { + if (!strcmp("..", dir_entry->d_name)) { + continue; + } + + std::string file_path = path + "/" + dir_entry->d_name; + + if (pw_uid != -1 && lchown(file_path.c_str(), pw_uid, gr_gid) < 0) { + PLOG(ERROR) << "lchown() failed for " << file_path; + return false; + } + + if (fchmodat(AT_FDCWD, file_path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0 && + (errno != EROFS || !permissive_mode)) { PLOG(ERROR) << "fchmodat() failed for " << path; return false; } @@ -114,6 +110,67 @@ static bool Mkdir(const std::string& path, mode_t mode, const std::string& uid, return true; } +static bool Mkdir(const std::string& path, mode_t mode, const std::string& uid, + const std::string& gid) { + bool permissive_mode = false; + + if (mode == 0) { + /* Allow chmod to fail */ + permissive_mode = true; + mode = 0755; + } + + if (mkdir(path.c_str(), mode) != 0) { + // /acct is a special case when the directory already exists + if (errno != EEXIST) { + PLOG(ERROR) << "mkdir() failed for " << path; + return false; + } else { + permissive_mode = true; + } + } + + if (uid.empty() && permissive_mode) { + return true; + } + + if (!ChangeDirModeAndOwner(path, mode, uid, gid, permissive_mode)) { + PLOG(ERROR) << "change of ownership or mode failed for " << path; + return false; + } + + return true; +} + +static void MergeCgroupToDescriptors(std::map* descriptors, + const Json::Value& cgroup, const std::string& name, + const std::string& root_path, int cgroups_version) { + std::string path; + + if (!root_path.empty()) { + path = root_path + "/" + cgroup["Path"].asString(); + } else { + path = cgroup["Path"].asString(); + } + + uint32_t controller_flags = 0; + + if (cgroup["NeedsActivation"].isBool() && cgroup["NeedsActivation"].asBool()) { + controller_flags |= CGROUPRC_CONTROLLER_FLAG_NEEDS_ACTIVATION; + } + + CgroupDescriptor descriptor( + cgroups_version, name, path, std::strtoul(cgroup["Mode"].asString().c_str(), 0, 8), + cgroup["UID"].asString(), cgroup["GID"].asString(), controller_flags); + + auto iter = descriptors->find(name); + if (iter == descriptors->end()) { + descriptors->emplace(name, descriptor); + } else { + iter->second = descriptor; + } +} + static bool ReadDescriptorsFromFile(const std::string& file_name, std::map* descriptors) { std::vector result; @@ -135,36 +192,19 @@ static bool ReadDescriptorsFromFile(const std::string& file_name, const Json::Value& cgroups = root["Cgroups"]; for (Json::Value::ArrayIndex i = 0; i < cgroups.size(); ++i) { std::string name = cgroups[i]["Controller"].asString(); - auto iter = descriptors->find(name); - if (iter == descriptors->end()) { - descriptors->emplace( - name, CgroupDescriptor( - 1, name, cgroups[i]["Path"].asString(), - std::strtoul(cgroups[i]["Mode"].asString().c_str(), 0, 8), - cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString())); - } else { - iter->second = CgroupDescriptor( - 1, name, cgroups[i]["Path"].asString(), - std::strtoul(cgroups[i]["Mode"].asString().c_str(), 0, 8), - cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString()); - } + MergeCgroupToDescriptors(descriptors, cgroups[i], name, "", 1); } } if (root.isMember("Cgroups2")) { const Json::Value& cgroups2 = root["Cgroups2"]; - auto iter = descriptors->find(CGROUPV2_CONTROLLER_NAME); - if (iter == descriptors->end()) { - descriptors->emplace( - CGROUPV2_CONTROLLER_NAME, - CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(), - std::strtoul(cgroups2["Mode"].asString().c_str(), 0, 8), - cgroups2["UID"].asString(), cgroups2["GID"].asString())); - } else { - iter->second = - CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(), - std::strtoul(cgroups2["Mode"].asString().c_str(), 0, 8), - cgroups2["UID"].asString(), cgroups2["GID"].asString()); + std::string root_path = cgroups2["Path"].asString(); + MergeCgroupToDescriptors(descriptors, cgroups2, CGROUPV2_CONTROLLER_NAME, "", 2); + + const Json::Value& childGroups = cgroups2["Controllers"]; + for (Json::Value::ArrayIndex i = 0; i < childGroups.size(); ++i) { + std::string name = childGroups[i]["Controller"].asString(); + MergeCgroupToDescriptors(descriptors, childGroups[i], name, root_path, 2); } } @@ -192,17 +232,51 @@ static bool ReadDescriptors(std::map* descriptors static bool SetupCgroup(const CgroupDescriptor& descriptor) { const format::CgroupController* controller = descriptor.controller(); - // mkdir [mode] [owner] [group] - if (!Mkdir(controller->path(), descriptor.mode(), descriptor.uid(), descriptor.gid())) { - LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup"; - return false; - } - int result; if (controller->version() == 2) { - result = mount("none", controller->path(), "cgroup2", MS_NODEV | MS_NOEXEC | MS_NOSUID, - nullptr); + result = 0; + if (!strcmp(controller->name(), CGROUPV2_CONTROLLER_NAME)) { + // /sys/fs/cgroup is created by cgroup2 with specific selinux permissions, + // try to create again in case the mount point is changed + if (!Mkdir(controller->path(), 0, "", "")) { + LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup"; + return false; + } + + result = mount("none", controller->path(), "cgroup2", MS_NODEV | MS_NOEXEC | MS_NOSUID, + nullptr); + + // selinux permissions change after mounting, so it's ok to change mode and owner now + if (!ChangeDirModeAndOwner(controller->path(), descriptor.mode(), descriptor.uid(), + descriptor.gid())) { + LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup"; + result = -1; + } else { + LOG(ERROR) << "restored ownership for " << controller->name() << " cgroup"; + } + } else { + if (!Mkdir(controller->path(), descriptor.mode(), descriptor.uid(), descriptor.gid())) { + LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup"; + return false; + } + + if (controller->flags() & CGROUPRC_CONTROLLER_FLAG_NEEDS_ACTIVATION) { + std::string str = std::string("+") + controller->name(); + std::string path = std::string(controller->path()) + "/cgroup.subtree_control"; + + if (!base::WriteStringToFile(str, path)) { + LOG(ERROR) << "Failed to activate controller " << controller->name(); + return false; + } + } + } } else { + // mkdir [mode] [owner] [group] + if (!Mkdir(controller->path(), descriptor.mode(), descriptor.uid(), descriptor.gid())) { + LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup"; + return false; + } + // Unfortunately historically cpuset controller was mounted using a mount command // different from all other controllers. This results in controller attributes not // to be prepended with controller name. For example this way instead of @@ -267,8 +341,8 @@ static bool WriteRcFile(const std::map& descripto CgroupDescriptor::CgroupDescriptor(uint32_t version, const std::string& name, const std::string& path, mode_t mode, const std::string& uid, - const std::string& gid) - : controller_(version, 0, name, path), mode_(mode), uid_(uid), gid_(gid) {} + const std::string& gid, uint32_t flags = 0) + : controller_(version, flags, name, path), mode_(mode), uid_(uid), gid_(gid) {} void CgroupDescriptor::set_mounted(bool mounted) { uint32_t flags = controller_.flags(); From 12d456421f93a69472df7c0591dbb675cbbd76b3 Mon Sep 17 00:00:00 2001 From: Bowgo Tsai Date: Thu, 9 Apr 2020 14:27:00 +0800 Subject: [PATCH 06/14] Add systrace tag for system property Introduce a new systrace tag, TRACE_TAG_SYSPROP, for use with system property. Bug: 147275573 Test: build Change-Id: I6f85f3f52f6580bab4ff43fc1dc0e87c689b054e Merged-In: I6f85f3f52f6580bab4ff43fc1dc0e87c689b054e (cherry picked from commit 573fc58bad9b49f1e04d01d9e29e5c4a9c32e023) --- libcutils/include/cutils/trace.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libcutils/include/cutils/trace.h b/libcutils/include/cutils/trace.h index c74ee3e9d..793e2ce8a 100644 --- a/libcutils/include/cutils/trace.h +++ b/libcutils/include/cutils/trace.h @@ -75,7 +75,8 @@ __BEGIN_DECLS #define ATRACE_TAG_AIDL (1<<24) #define ATRACE_TAG_NNAPI (1<<25) #define ATRACE_TAG_RRO (1<<26) -#define ATRACE_TAG_LAST ATRACE_TAG_RRO +#define ATRACE_TAG_SYSPROP (1<<27) +#define ATRACE_TAG_LAST ATRACE_TAG_SYSPROP // Reserved for initialization. #define ATRACE_TAG_NOT_READY (1ULL<<63) From 54794ac613d50bf4072174476f60527e2b0b4cdf Mon Sep 17 00:00:00 2001 From: Elliott Hughes Date: Mon, 17 Aug 2020 12:37:25 -0700 Subject: [PATCH 07/14] FileMap::create: remove duplicate addition. The previous change was intended to detect the overflow, but accidentally retained the existing addition, so we'd abort before getting to the explicit check. Also reformat slightly to better match the current code in qt-dev and beyond, to reduce merge conflicts. Bug: 156997193 Test: treehugger Change-Id: I73a3a4e75f0aad00888e8f2b49a07b7e8b4eda05 Merged-In: I73a3a4e75f0aad00888e8f2b49a07b7e8b4eda05 --- libutils/FileMap.cpp | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/libutils/FileMap.cpp b/libutils/FileMap.cpp index b9f411ef2..45ca85392 100644 --- a/libutils/FileMap.cpp +++ b/libutils/FileMap.cpp @@ -160,12 +160,6 @@ bool FileMap::create(const char* origFileName, int fd, off64_t offset, size_t le return false; } #else // !defined(__MINGW32__) - int prot, flags, adjust; - off64_t adjOffset; - size_t adjLength; - - void* ptr; - assert(fd >= 0); assert(offset >= 0); assert(length > 0); @@ -179,20 +173,19 @@ bool FileMap::create(const char* origFileName, int fd, off64_t offset, size_t le } } - adjust = offset % mPageSize; - adjOffset = offset - adjust; - adjLength = length + adjust; + int adjust = offset % mPageSize; + off64_t adjOffset = offset - adjust; + size_t adjLength; if (__builtin_add_overflow(length, adjust, &adjLength)) { ALOGE("adjusted length overflow: length %zu adjust %d", length, adjust); return false; } - flags = MAP_SHARED; - prot = PROT_READ; - if (!readOnly) - prot |= PROT_WRITE; + int flags = MAP_SHARED; + int prot = PROT_READ; + if (!readOnly) prot |= PROT_WRITE; - ptr = mmap(NULL, adjLength, prot, flags, fd, adjOffset); + void* ptr = mmap(nullptr, adjLength, prot, flags, fd, adjOffset); if (ptr == MAP_FAILED) { ALOGE("mmap(%lld,%zu) failed: %s\n", (long long)adjOffset, adjLength, strerror(errno)); From 510f89ec0ed82b07638f0e08f7b70d4c232f49c0 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Tue, 2 Jun 2020 15:35:30 -0700 Subject: [PATCH 08/14] libprocessgroup: switch freezer to cgroup v2 remove cgroup v1 freezer entries from init.rc, add a new cgroup v2 controller and modify plists to properly interact with it. Bug: 154548692 Test: manually verified the the cgroup v1 freezer controller isn't created and a new controller for cgroup v2 is created under the correct sysfs directory. Change-Id: I1b811300ade486f88fdbd157255a7f37750cc54d Merged-In: I1b811300ade486f88fdbd157255a7f37750cc54d --- libprocessgroup/profiles/cgroups.json | 24 +++++++++++---------- libprocessgroup/profiles/task_profiles.json | 14 ++++++------ rootdir/init.rc | 10 --------- 3 files changed, 20 insertions(+), 28 deletions(-) diff --git a/libprocessgroup/profiles/cgroups.json b/libprocessgroup/profiles/cgroups.json index 03419020c..451848711 100644 --- a/libprocessgroup/profiles/cgroups.json +++ b/libprocessgroup/profiles/cgroups.json @@ -39,19 +39,21 @@ "Mode": "0755", "UID": "system", "GID": "system" - }, - { - "Controller": "freezer", - "Path": "/dev/freezer", - "Mode": "0755", - "UID": "system", - "GID": "system" } ], "Cgroups2": { - "Path": "/dev/cg2_bpf", - "Mode": "0600", - "UID": "root", - "GID": "root" + "Path": "/sys/fs/cgroup", + "Mode": "0755", + "UID": "system", + "GID": "system", + "Controllers": [ + { + "Controller": "freezer", + "Path": "freezer", + "Mode": "0755", + "UID": "system", + "GID": "system" + } + ] } } diff --git a/libprocessgroup/profiles/task_profiles.json b/libprocessgroup/profiles/task_profiles.json index bc6bc7c25..c4dbf8e55 100644 --- a/libprocessgroup/profiles/task_profiles.json +++ b/libprocessgroup/profiles/task_profiles.json @@ -53,7 +53,7 @@ { "Name": "FreezerState", "Controller": "freezer", - "File": "frozen/freezer.state" + "File": "cgroup.freeze" } ], @@ -79,7 +79,7 @@ "Params": { "Controller": "freezer", - "Path": "frozen" + "Path": "" } } ] @@ -92,7 +92,7 @@ "Params": { "Controller": "freezer", - "Path": "" + "Path": "../" } } ] @@ -538,27 +538,27 @@ ] }, { - "Name": "FreezerThawed", + "Name": "FreezerDisabled", "Actions": [ { "Name": "SetAttribute", "Params": { "Name": "FreezerState", - "Value": "THAWED" + "Value": "0" } } ] }, { - "Name": "FreezerFrozen", + "Name": "FreezerEnabled", "Actions": [ { "Name": "SetAttribute", "Params": { "Name": "FreezerState", - "Value": "FROZEN" + "Value": "1" } } ] diff --git a/rootdir/init.rc b/rootdir/init.rc index 510d1e911..01ad32de6 100644 --- a/rootdir/init.rc +++ b/rootdir/init.rc @@ -322,16 +322,6 @@ on init chmod 0664 /dev/cpuset/restricted/tasks chmod 0664 /dev/cpuset/tasks - # freezer cgroup entries - mkdir /dev/freezer/frozen - write /dev/freezer/frozen/freezer.state FROZEN - chown system system /dev/freezer/cgroup.procs - chown system system /dev/freezer/frozen - chown system system /dev/freezer/frozen/freezer.state - chown system system /dev/freezer/frozen/cgroup.procs - - chmod 0664 /dev/freezer/frozen/freezer.state - # make the PSI monitor accessible to others chown system system /proc/pressure/memory chmod 0664 /proc/pressure/memory From 6b67edb3fb18a476c2b9e52c100af79def0a58a7 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Thu, 20 Aug 2020 08:59:09 -0700 Subject: [PATCH 09/14] libprocessgroup: json prototype for cgroups v2 cgroups v2 support introduces new fields in the json format. Adapt the proto file accordingly Bug: 154548692 Test: atest libprocessgroup_proto_test -- Change-Id: I40f8757a8f4e6a0b839caa7faa976dfebf3aac98 Merged-In: I40f8757a8f4e6a0b839caa7faa976dfebf3aac98 --- libprocessgroup/profiles/cgroups.proto | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/libprocessgroup/profiles/cgroups.proto b/libprocessgroup/profiles/cgroups.proto index f4070c550..13adcae07 100644 --- a/libprocessgroup/profiles/cgroups.proto +++ b/libprocessgroup/profiles/cgroups.proto @@ -24,19 +24,24 @@ message Cgroups { Cgroups2 cgroups2 = 2 [json_name = "Cgroups2"]; } -// Next: 6 +// Next: 7 message Cgroup { string controller = 1 [json_name = "Controller"]; string path = 2 [json_name = "Path"]; string mode = 3 [json_name = "Mode"]; string uid = 4 [json_name = "UID"]; string gid = 5 [json_name = "GID"]; +// Booleans default to false when not specified. File reconstruction fails +// when a boolean is specified as false, so leave unspecified in that case +// https://developers.google.com/protocol-buffers/docs/proto3#default + bool needs_activation = 6 [json_name = "NeedsActivation"]; } -// Next: 5 +// Next: 6 message Cgroups2 { string path = 1 [json_name = "Path"]; string mode = 2 [json_name = "Mode"]; string uid = 3 [json_name = "UID"]; string gid = 4 [json_name = "GID"]; + repeated Cgroup controllers = 5 [json_name = "Controllers"]; } From 109a140f6cf20ba0aa1f517999c690f6f4281b42 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Fri, 21 Aug 2020 08:00:23 -0700 Subject: [PATCH 10/14] init.rc: remove initializations to cg2_bpf path https://r.android.com/c/1324649/5 moves the cgroup folder to its sysfs path. Directory access rights are defined by kernel code and sepolicy, so remove the initialization lines from init.rc. Test: manually booted the device and verified access rights for /sys/fs/cgroup Bug: 154548692 Change-Id: I67284dc651ed529cae69e413b66c6e1292a2d970 Merged-In: I67284dc651ed529cae69e413b66c6e1292a2d970 --- rootdir/init.rc | 2 -- 1 file changed, 2 deletions(-) diff --git a/rootdir/init.rc b/rootdir/init.rc index 01ad32de6..a9af0b094 100644 --- a/rootdir/init.rc +++ b/rootdir/init.rc @@ -336,8 +336,6 @@ on init # This is needed by any process that uses socket tagging. chmod 0644 /dev/xt_qtaguid - chown root root /dev/cg2_bpf - chmod 0600 /dev/cg2_bpf mount bpf bpf /sys/fs/bpf nodev noexec nosuid # Create location for fs_mgr to store abbreviated output from filesystem From 4deef11990c996cfb001f5d6ca72df884a1df3ee Mon Sep 17 00:00:00 2001 From: Stephane Lee Date: Wed, 12 Aug 2020 22:38:20 -0700 Subject: [PATCH 11/14] Assume UNSUPPORTED if the battery capacity level sysfs node does not exist. Bug: 163382088 Test: Ensure that, if the ./capacity_level does not exist, that the device will shut down at 0 % SOC in an Health 2.1 implementation Change-Id: I00822b4bfba5d823c1a30163ea6824e59d3aae32 Merged-In: I00822b4bfba5d823c1a30163ea6824e59d3aae32 --- healthd/BatteryMonitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/healthd/BatteryMonitor.cpp b/healthd/BatteryMonitor.cpp index 599f5005f..fd810cbf7 100644 --- a/healthd/BatteryMonitor.cpp +++ b/healthd/BatteryMonitor.cpp @@ -79,7 +79,7 @@ static void initHealthInfo(HealthInfo_2_1* health_info_2_1) { // HIDL enum values are zero initialized, so they need to be initialized // properly. - health_info_2_1->batteryCapacityLevel = BatteryCapacityLevel::UNKNOWN; + health_info_2_1->batteryCapacityLevel = BatteryCapacityLevel::UNSUPPORTED; health_info_2_1->batteryChargeTimeToFullNowSeconds = (int64_t)Constants::BATTERY_CHARGE_TIME_TO_FULL_NOW_SECONDS_UNSUPPORTED; auto* props = &health_info_2_1->legacy.legacy; From c3f71bc459ac164e8b6ca2123c2531897ab59355 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Tue, 18 Aug 2020 15:34:15 +0200 Subject: [PATCH 12/14] Allow shell to write to /sdcard/Android/data and /sdcard/Android/obb. On devices without sdcardfs, these are only writable by the owning UID, and/or the ext_data_rw/ext_obb_rw groups respectively. Bug: 161134565 Bug: 162810387 Test: try to write to /sdcard/Android/data/ from shell uid Change-Id: I19a9980bcff7b6fd0554dd2a4c8d59abe6ad540b (cherry picked from commit 4da604ba4e99619e4318f2aee952cffbd6c25ace) --- adb/daemon/main.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/adb/daemon/main.cpp b/adb/daemon/main.cpp index 658e24456..7a0f7ffcd 100644 --- a/adb/daemon/main.cpp +++ b/adb/daemon/main.cpp @@ -108,9 +108,12 @@ static void drop_privileges(int server_port) { // AID_NET_BW_STATS to read out qtaguid statistics // AID_READPROC for reading /proc entries across UID boundaries // AID_UHID for using 'hid' command to read/write to /dev/uhid + // AID_EXT_DATA_RW for writing to /sdcard/Android/data (devices without sdcardfs) + // AID_EXT_OBB_RW for writing to /sdcard/Android/obb (devices without sdcardfs) gid_t groups[] = {AID_ADB, AID_LOG, AID_INPUT, AID_INET, AID_NET_BT, AID_NET_BT_ADMIN, AID_SDCARD_R, AID_SDCARD_RW, - AID_NET_BW_STATS, AID_READPROC, AID_UHID}; + AID_NET_BW_STATS, AID_READPROC, AID_UHID, AID_EXT_DATA_RW, + AID_EXT_OBB_RW}; minijail_set_supplementary_gids(jail.get(), arraysize(groups), groups); // Don't listen on a port (default 5037) if running in secure mode. From f7451e6782680cc1eb4fc1ea70806307800c3124 Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Fri, 11 Sep 2020 11:40:00 -0700 Subject: [PATCH 13/14] adbd: Fix check against valid payload size block->payload and its size are not valid when it is used to check against bytes_left due to std::move() performed on its just prior to the check. Hence check will always fail to detect the case where received data is more than expected. To detect this condition and allow error handling with std::move(), remove extra payload variable and directly use block->payload. Bug: http://b/168917244 Change-Id: I992bbba9d9a9861a195834f69d62e69b90658210 (cherry picked from commit 71a33cfa678ddc7704693b45244f3190af1da65d) --- adb/daemon/usb.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/adb/daemon/usb.cpp b/adb/daemon/usb.cpp index a66387193..50d73644d 100644 --- a/adb/daemon/usb.cpp +++ b/adb/daemon/usb.cpp @@ -584,12 +584,11 @@ struct UsbFfsConnection : public Connection { incoming_header_ = msg; } else { size_t bytes_left = incoming_header_->data_length - incoming_payload_.size(); - Block payload = std::move(block->payload); if (block->payload.size() > bytes_left) { HandleError("received too many bytes while waiting for payload"); return false; } - incoming_payload_.append(std::move(payload)); + incoming_payload_.append(std::move(block->payload)); } if (incoming_header_->data_length == incoming_payload_.size()) { From ad90b45558db9fc1834e2cbf6e6815213c495a6a Mon Sep 17 00:00:00 2001 From: josephjang Date: Wed, 16 Sep 2020 16:27:42 +0800 Subject: [PATCH 14/14] fastboot: add new oem command for post wipe userdata When user input fastboot erase userdata, need a follow up oem command to wipe other user data in device. We support this new postwipedata command in "fastboot erase userdata" only. Bug: 150929955 Change-Id: I9b6a5a4aaed31d1168e633418b189f9bb6d34d01 Ignore-AOSP-First: I9b6a5a4aaed31d1168e633418b189f9bb6d34d01 --- fastboot/device/commands.cpp | 51 +++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/fastboot/device/commands.cpp b/fastboot/device/commands.cpp index 25533535b..efa5ed1ee 100644 --- a/fastboot/device/commands.cpp +++ b/fastboot/device/commands.cpp @@ -164,6 +164,39 @@ bool GetVarHandler(FastbootDevice* device, const std::vector& args) return device->WriteOkay(message); } +bool OemPostWipeData(FastbootDevice* device) { + auto fastboot_hal = device->fastboot_hal(); + if (!fastboot_hal) { + return false; + } + + Result ret; + // Check whether fastboot_hal support "oem postwipedata" API or not. + const std::string checkPostWipeDataCmd("oem postwipedata support"); + auto check_cmd_ret_val = fastboot_hal->doOemCommand(checkPostWipeDataCmd, + [&](Result result) { ret = result; }); + if (!check_cmd_ret_val.isOk()) { + return false; + } + if (ret.status != Status::SUCCESS) { + return false; + } + + const std::string postWipeDataCmd("oem postwipedata userdata"); + auto ret_val = fastboot_hal->doOemCommand(postWipeDataCmd, + [&](Result result) { ret = result; }); + if (!ret_val.isOk()) { + return false; + } + if (ret.status != Status::SUCCESS) { + device->WriteStatus(FastbootResult::FAIL, ret.message); + } else { + device->WriteStatus(FastbootResult::OKAY, "Erasing succeeded"); + } + + return true; +} + bool EraseHandler(FastbootDevice* device, const std::vector& args) { if (args.size() < 2) { return device->WriteStatus(FastbootResult::FAIL, "Invalid arguments"); @@ -184,7 +217,18 @@ bool EraseHandler(FastbootDevice* device, const std::vector& args) return device->WriteStatus(FastbootResult::FAIL, "Partition doesn't exist"); } if (wipe_block_device(handle.fd(), get_block_device_size(handle.fd())) == 0) { - return device->WriteStatus(FastbootResult::OKAY, "Erasing succeeded"); + //Perform oem PostWipeData if Android userdata partition has been erased + bool support_oem_postwipedata = false; + if (partition_name == "userdata") { + support_oem_postwipedata = OemPostWipeData(device); + } + + if (!support_oem_postwipedata) { + return device->WriteStatus(FastbootResult::OKAY, "Erasing succeeded"); + } else { + //Write device status in OemPostWipeData(), so just return true + return true; + } } return device->WriteStatus(FastbootResult::FAIL, "Erasing failed"); } @@ -195,6 +239,11 @@ bool OemCmdHandler(FastbootDevice* device, const std::vector& args) return device->WriteStatus(FastbootResult::FAIL, "Unable to open fastboot HAL"); } + //Disable "oem postwipedata userdata" to prevent user wipe oem userdata only. + if (args[0] == "oem postwipedata userdata") { + return device->WriteStatus(FastbootResult::FAIL, "Unable to do oem postwipedata userdata"); + } + Result ret; auto ret_val = fastboot_hal->doOemCommand(args[0], [&](Result result) { ret = result; }); if (!ret_val.isOk()) {