diff --git a/debuggerd/libdebuggerd/tombstone.cpp b/debuggerd/libdebuggerd/tombstone.cpp index 725c42cad..a0ba81b68 100644 --- a/debuggerd/libdebuggerd/tombstone.cpp +++ b/debuggerd/libdebuggerd/tombstone.cpp @@ -417,7 +417,7 @@ static void dump_all_maps(Backtrace* backtrace, BacktraceMap* map, log_t* log, p "memory map (%zu entr%s):", map->size(), map->size() == 1 ? "y" : "ies"); if (print_fault_address_marker) { - if (map->begin() != map->end() && addr < map->begin()->start) { + if (map->begin() != map->end() && addr < (*map->begin())->start) { _LOG(log, logtype::MAPS, "\n--->Fault address falls at %s before any mapped regions\n", get_addr_string(addr).c_str()); print_fault_address_marker = false; @@ -429,49 +429,50 @@ static void dump_all_maps(Backtrace* backtrace, BacktraceMap* map, log_t* log, p } std::string line; - for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { + for (auto it = map->begin(); it != map->end(); ++it) { + const backtrace_map_t* entry = *it; line = " "; if (print_fault_address_marker) { - if (addr < it->start) { + if (addr < entry->start) { _LOG(log, logtype::MAPS, "--->Fault address falls at %s between mapped regions\n", get_addr_string(addr).c_str()); print_fault_address_marker = false; - } else if (addr >= it->start && addr < it->end) { + } else if (addr >= entry->start && addr < entry->end) { line = "--->"; print_fault_address_marker = false; } } - line += get_addr_string(it->start) + '-' + get_addr_string(it->end - 1) + ' '; - if (it->flags & PROT_READ) { + line += get_addr_string(entry->start) + '-' + get_addr_string(entry->end - 1) + ' '; + if (entry->flags & PROT_READ) { line += 'r'; } else { line += '-'; } - if (it->flags & PROT_WRITE) { + if (entry->flags & PROT_WRITE) { line += 'w'; } else { line += '-'; } - if (it->flags & PROT_EXEC) { + if (entry->flags & PROT_EXEC) { line += 'x'; } else { line += '-'; } - line += StringPrintf(" %8" PRIxPTR " %8" PRIxPTR, it->offset, it->end - it->start); + line += StringPrintf(" %8" PRIxPTR " %8" PRIxPTR, entry->offset, entry->end - entry->start); bool space_needed = true; - if (it->name.length() > 0) { + if (entry->name.length() > 0) { space_needed = false; - line += " " + it->name; + line += " " + entry->name; std::string build_id; - if ((it->flags & PROT_READ) && elf_get_build_id(backtrace, it->start, &build_id)) { + if ((entry->flags & PROT_READ) && elf_get_build_id(backtrace, entry->start, &build_id)) { line += " (BuildId: " + build_id + ")"; } } - if (it->load_bias != 0) { + if (entry->load_bias != 0) { if (space_needed) { line += ' '; } - line += StringPrintf(" (load bias 0x%" PRIxPTR ")", it->load_bias); + line += StringPrintf(" (load bias 0x%" PRIxPTR ")", entry->load_bias); } _LOG(log, logtype::MAPS, "%s\n", line.c_str()); } diff --git a/libbacktrace/BacktraceMap.cpp b/libbacktrace/BacktraceMap.cpp index 0e314958f..0f1ae11f7 100644 --- a/libbacktrace/BacktraceMap.cpp +++ b/libbacktrace/BacktraceMap.cpp @@ -40,9 +40,10 @@ BacktraceMap::~BacktraceMap() { void BacktraceMap::FillIn(uintptr_t addr, backtrace_map_t* map) { ScopedBacktraceMapIteratorLock lock(this); - for (BacktraceMap::const_iterator it = begin(); it != end(); ++it) { - if (addr >= it->start && addr < it->end) { - *map = *it; + for (auto it = begin(); it != end(); ++it) { + const backtrace_map_t* entry = *it; + if (addr >= entry->start && addr < entry->end) { + *map = *entry; return; } } diff --git a/libbacktrace/UnwindStackMap.cpp b/libbacktrace/UnwindStackMap.cpp index 9ac0a0b54..836a774b9 100644 --- a/libbacktrace/UnwindStackMap.cpp +++ b/libbacktrace/UnwindStackMap.cpp @@ -71,8 +71,19 @@ void UnwindStackMap::FillIn(uintptr_t addr, backtrace_map_t* map) { if (map_info == nullptr) { return; } - unwindstack::Elf* elf = map_info->GetElf(process_memory_, true); - map->load_bias = elf->GetLoadBias(); + map->load_bias = map_info->GetLoadBias(process_memory_); +} + +uint64_t UnwindStackMap::GetLoadBias(size_t index) { + if (index >= stack_maps_->Total()) { + return 0; + } + + unwindstack::MapInfo* map_info = stack_maps_->Get(index); + if (map_info == nullptr) { + return 0; + } + return map_info->GetLoadBias(process_memory_); } std::string UnwindStackMap::GetFunctionName(uintptr_t pc, uintptr_t* offset) { diff --git a/libbacktrace/UnwindStackMap.h b/libbacktrace/UnwindStackMap.h index bc432e745..2f63655fd 100644 --- a/libbacktrace/UnwindStackMap.h +++ b/libbacktrace/UnwindStackMap.h @@ -42,6 +42,8 @@ class UnwindStackMap : public BacktraceMap { const std::shared_ptr& process_memory() { return process_memory_; } protected: + uint64_t GetLoadBias(size_t index) override; + std::unique_ptr stack_maps_; std::shared_ptr process_memory_; }; diff --git a/libbacktrace/backtrace_offline_test.cpp b/libbacktrace/backtrace_offline_test.cpp index 0a1f33dfa..093566009 100644 --- a/libbacktrace/backtrace_offline_test.cpp +++ b/libbacktrace/backtrace_offline_test.cpp @@ -171,10 +171,12 @@ TEST(libbacktrace, DISABLED_generate_offline_testdata) { testdata += android::base::StringPrintf("pid: %d tid: %d\n", getpid(), arg.tid); // 2. Dump maps for (auto it = map->begin(); it != map->end(); ++it) { - testdata += android::base::StringPrintf( - "map: start: %" PRIxPTR " end: %" PRIxPTR " offset: %" PRIxPTR " load_bias: %" PRIxPTR - " flags: %d name: %s\n", - it->start, it->end, it->offset, it->load_bias, it->flags, it->name.c_str()); + const backtrace_map_t* entry = *it; + testdata += + android::base::StringPrintf("map: start: %" PRIxPTR " end: %" PRIxPTR " offset: %" PRIxPTR + " load_bias: %" PRIxPTR " flags: %d name: %s\n", + entry->start, entry->end, entry->offset, entry->load_bias, + entry->flags, entry->name.c_str()); } // 3. Dump registers testdata += android::base::StringPrintf("registers: %zu ", sizeof(arg.unw_context)); diff --git a/libbacktrace/backtrace_test.cpp b/libbacktrace/backtrace_test.cpp index 9911e74a6..890ab3f0e 100644 --- a/libbacktrace/backtrace_test.cpp +++ b/libbacktrace/backtrace_test.cpp @@ -857,6 +857,34 @@ struct map_test_t { static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; } +static std::string GetTestMapsAsString(const std::vector& maps) { + if (maps.size() == 0) { + return "No test map entries\n"; + } + std::string map_txt; + for (auto map : maps) { + map_txt += android::base::StringPrintf("%" PRIxPTR "-%" PRIxPTR "\n", map.start, map.end); + } + return map_txt; +} + +static std::string GetMapsAsString(BacktraceMap* maps) { + if (maps->size() == 0) { + return "No map entries\n"; + } + std::string map_txt; + for (const backtrace_map_t* map : *maps) { + map_txt += android::base::StringPrintf( + "%" PRIxPTR "-%" PRIxPTR " flags: 0x%x offset: 0x%" PRIxPTR " load_bias: 0x%" PRIxPTR, + map->start, map->end, map->flags, map->offset, map->load_bias); + if (!map->name.empty()) { + map_txt += ' ' + map->name; + } + map_txt += '\n'; + } + return map_txt; +} + static void VerifyMap(pid_t pid) { char buffer[4096]; snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid); @@ -875,12 +903,20 @@ static void VerifyMap(pid_t pid) { std::unique_ptr map(BacktraceMap::Create(pid)); // Basic test that verifies that the map is in the expected order. - ScopedBacktraceMapIteratorLock lock(map.get()); - std::vector::const_iterator test_it = test_maps.begin(); - for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { - ASSERT_TRUE(test_it != test_maps.end()); - ASSERT_EQ(test_it->start, it->start); - ASSERT_EQ(test_it->end, it->end); + auto test_it = test_maps.begin(); + for (auto it = map->begin(); it != map->end(); ++it) { + ASSERT_TRUE(test_it != test_maps.end()) << "Mismatch in number of maps, expected test maps:\n" + << GetTestMapsAsString(test_maps) << "Actual maps:\n" + << GetMapsAsString(map.get()); + ASSERT_EQ(test_it->start, (*it)->start) << "Mismatch in map data, expected test maps:\n" + << GetTestMapsAsString(test_maps) << "Actual maps:\n" + << GetMapsAsString(map.get()); + ASSERT_EQ(test_it->end, (*it)->end) << "Mismatch maps in map data, expected test maps:\n" + << GetTestMapsAsString(test_maps) << "Actual maps:\n" + << GetMapsAsString(map.get()); + // Make sure the load bias get set to a value. + ASSERT_NE(static_cast(-1), (*it)->load_bias) << "Found uninitialized load_bias\n" + << GetMapsAsString(map.get()); ++test_it; } ASSERT_TRUE(test_it == test_maps.end()); diff --git a/libbacktrace/include/backtrace/BacktraceMap.h b/libbacktrace/include/backtrace/BacktraceMap.h index d0783929a..4ae68dde0 100644 --- a/libbacktrace/include/backtrace/BacktraceMap.h +++ b/libbacktrace/include/backtrace/BacktraceMap.h @@ -30,6 +30,7 @@ #endif #include +#include #include #include @@ -61,6 +62,49 @@ public: virtual ~BacktraceMap(); + class iterator : public std::iterator { + public: + iterator(BacktraceMap* map, size_t index) : map_(map), index_(index) {} + + iterator& operator++() { + index_++; + return *this; + } + iterator& operator++(int increment) { + index_ += increment; + return *this; + } + iterator& operator--() { + index_--; + return *this; + } + iterator& operator--(int decrement) { + index_ -= decrement; + return *this; + } + + bool operator==(const iterator& rhs) { return this->index_ == rhs.index_; } + bool operator!=(const iterator& rhs) { return this->index_ != rhs.index_; } + + const backtrace_map_t* operator*() { + if (index_ >= map_->size()) { + return nullptr; + } + backtrace_map_t* map = &map_->maps_[index_]; + if (map->load_bias == static_cast(-1)) { + map->load_bias = map_->GetLoadBias(index_); + } + return map; + } + + private: + BacktraceMap* map_ = nullptr; + size_t index_ = 0; + }; + + iterator begin() { return iterator(this, 0); } + iterator end() { return iterator(this, maps_.size()); } + // Fill in the map data structure for the given address. virtual void FillIn(uintptr_t addr, backtrace_map_t* map); @@ -89,14 +133,6 @@ public: virtual void LockIterator() {} virtual void UnlockIterator() {} - typedef std::deque::iterator iterator; - iterator begin() { return maps_.begin(); } - iterator end() { return maps_.end(); } - - typedef std::deque::const_iterator const_iterator; - const_iterator begin() const { return maps_.begin(); } - const_iterator end() const { return maps_.end(); } - size_t size() const { return maps_.size(); } virtual bool Build(); @@ -114,6 +150,8 @@ public: protected: BacktraceMap(pid_t pid); + virtual uint64_t GetLoadBias(size_t /* index */) { return 0; } + virtual bool ParseLine(const char* line, backtrace_map_t* map); pid_t pid_; diff --git a/libunwindstack/Android.bp b/libunwindstack/Android.bp index 75aa427f5..74930d6eb 100644 --- a/libunwindstack/Android.bp +++ b/libunwindstack/Android.bp @@ -118,6 +118,7 @@ cc_test { "tests/ElfTestUtils.cpp", "tests/LogFake.cpp", "tests/MapInfoGetElfTest.cpp", + "tests/MapInfoGetLoadBiasTest.cpp", "tests/MapsTest.cpp", "tests/MemoryBufferTest.cpp", "tests/MemoryFake.cpp", diff --git a/libunwindstack/Elf.cpp b/libunwindstack/Elf.cpp index 97ade56fc..5f307ed9e 100644 --- a/libunwindstack/Elf.cpp +++ b/libunwindstack/Elf.cpp @@ -215,4 +215,22 @@ ElfInterface* Elf::CreateInterfaceFromMemory(Memory* memory) { return interface.release(); } +uint64_t Elf::GetLoadBias(Memory* memory) { + if (!IsValidElf(memory)) { + return 0; + } + + uint8_t class_type; + if (!memory->Read(EI_CLASS, &class_type, 1)) { + return 0; + } + + if (class_type == ELFCLASS32) { + return ElfInterface::GetLoadBias(memory); + } else if (class_type == ELFCLASS64) { + return ElfInterface::GetLoadBias(memory); + } + return 0; +} + } // namespace unwindstack diff --git a/libunwindstack/ElfInterface.cpp b/libunwindstack/ElfInterface.cpp index 9bdb09402..334cf76f6 100644 --- a/libunwindstack/ElfInterface.cpp +++ b/libunwindstack/ElfInterface.cpp @@ -147,6 +147,26 @@ bool ElfInterface::ReadAllHeaders(uint64_t* load_bias) { return true; } +template +uint64_t ElfInterface::GetLoadBias(Memory* memory) { + EhdrType ehdr; + if (!memory->Read(0, &ehdr, sizeof(ehdr))) { + return false; + } + + uint64_t offset = ehdr.e_phoff; + for (size_t i = 0; i < ehdr.e_phnum; i++, offset += ehdr.e_phentsize) { + PhdrType phdr; + if (!memory->Read(offset, &phdr, sizeof(phdr))) { + return 0; + } + if (phdr.p_type == PT_LOAD && phdr.p_offset == 0) { + return phdr.p_vaddr; + } + } + return 0; +} + template bool ElfInterface::ReadProgramHeaders(const EhdrType& ehdr, uint64_t* load_bias) { uint64_t offset = ehdr.e_phoff; @@ -421,4 +441,7 @@ template bool ElfInterface::GetFunctionNameWithTemplate(uint64_t, uin template void ElfInterface::GetMaxSizeWithTemplate(Memory*, uint64_t*); template void ElfInterface::GetMaxSizeWithTemplate(Memory*, uint64_t*); +template uint64_t ElfInterface::GetLoadBias(Memory*); +template uint64_t ElfInterface::GetLoadBias(Memory*); + } // namespace unwindstack diff --git a/libunwindstack/MapInfo.cpp b/libunwindstack/MapInfo.cpp index 8a7ad9cee..51bce8eea 100644 --- a/libunwindstack/MapInfo.cpp +++ b/libunwindstack/MapInfo.cpp @@ -121,4 +121,23 @@ Elf* MapInfo::GetElf(const std::shared_ptr& process_memory, bool init_gn return elf; } +uint64_t MapInfo::GetLoadBias(const std::shared_ptr& process_memory) { + { + // Make sure no other thread is trying to add the elf to this map. + std::lock_guard guard(mutex_); + if (elf != nullptr) { + if (elf->valid()) { + return elf->GetLoadBias(); + } else { + return 0; + } + } + } + + // Call lightweight static function that will only read enough of the + // elf data to get the load bias. + std::unique_ptr memory(CreateMemory(process_memory)); + return Elf::GetLoadBias(memory.get()); +} + } // namespace unwindstack diff --git a/libunwindstack/include/unwindstack/Elf.h b/libunwindstack/include/unwindstack/Elf.h index da2ddc027..d9ea9c49b 100644 --- a/libunwindstack/include/unwindstack/Elf.h +++ b/libunwindstack/include/unwindstack/Elf.h @@ -74,6 +74,8 @@ class Elf { static void GetInfo(Memory* memory, bool* valid, uint64_t* size); + static uint64_t GetLoadBias(Memory* memory); + protected: bool valid_ = false; uint64_t load_bias_ = 0; diff --git a/libunwindstack/include/unwindstack/ElfInterface.h b/libunwindstack/include/unwindstack/ElfInterface.h index 86e51b382..5cfe74dae 100644 --- a/libunwindstack/include/unwindstack/ElfInterface.h +++ b/libunwindstack/include/unwindstack/ElfInterface.h @@ -82,6 +82,9 @@ class ElfInterface { DwarfSection* eh_frame() { return eh_frame_.get(); } DwarfSection* debug_frame() { return debug_frame_.get(); } + template + static uint64_t GetLoadBias(Memory* memory); + protected: template void InitHeadersWithTemplate(); diff --git a/libunwindstack/include/unwindstack/MapInfo.h b/libunwindstack/include/unwindstack/MapInfo.h index e54b348af..6f8ceca51 100644 --- a/libunwindstack/include/unwindstack/MapInfo.h +++ b/libunwindstack/include/unwindstack/MapInfo.h @@ -51,6 +51,8 @@ struct MapInfo { // This function guarantees it will never return nullptr. Elf* GetElf(const std::shared_ptr& process_memory, bool init_gnu_debugdata = false); + uint64_t GetLoadBias(const std::shared_ptr& process_memory); + private: MapInfo(const MapInfo&) = delete; void operator=(const MapInfo&) = delete; diff --git a/libunwindstack/tests/MapInfoGetLoadBiasTest.cpp b/libunwindstack/tests/MapInfoGetLoadBiasTest.cpp new file mode 100644 index 000000000..44a73a8f6 --- /dev/null +++ b/libunwindstack/tests/MapInfoGetLoadBiasTest.cpp @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "ElfFake.h" +#include "ElfTestUtils.h" +#include "MemoryFake.h" + +namespace unwindstack { + +class MapInfoGetLoadBiasTest : public ::testing::Test { + protected: + void SetUp() override { + memory_ = new MemoryFake; + process_memory_.reset(memory_); + elf_ = new ElfFake(new MemoryFake); + elf_container_.reset(elf_); + map_info_.reset(new MapInfo(0x1000, 0x20000, 0, PROT_READ | PROT_WRITE, "")); + } + + void MultipleThreadTest(uint64_t expected_load_bias); + + std::shared_ptr process_memory_; + MemoryFake* memory_; + ElfFake* elf_; + std::unique_ptr elf_container_; + std::unique_ptr map_info_; +}; + +TEST_F(MapInfoGetLoadBiasTest, no_elf_and_no_valid_elf_in_memory) { + MapInfo info(0x1000, 0x2000, 0, PROT_READ, ""); + + EXPECT_EQ(0U, info.GetLoadBias(process_memory_)); +} + +TEST_F(MapInfoGetLoadBiasTest, elf_exists) { + map_info_->elf = elf_container_.release(); + + elf_->FakeSetLoadBias(0); + EXPECT_EQ(0U, map_info_->GetLoadBias(process_memory_)); + + elf_->FakeSetLoadBias(0x1000); + EXPECT_EQ(0x1000U, map_info_->GetLoadBias(process_memory_)); +} + +void MapInfoGetLoadBiasTest::MultipleThreadTest(uint64_t expected_load_bias) { + static constexpr size_t kNumConcurrentThreads = 100; + + uint64_t load_bias_values[kNumConcurrentThreads]; + std::vector threads; + + std::atomic_bool wait; + wait = true; + // Create all of the threads and have them do the GetLoadBias at the same time + // to make it likely that a race will occur. + for (size_t i = 0; i < kNumConcurrentThreads; i++) { + std::thread* thread = new std::thread([i, this, &wait, &load_bias_values]() { + while (wait) + ; + load_bias_values[i] = map_info_->GetLoadBias(process_memory_); + }); + threads.push_back(thread); + } + + // Set them all going and wait for the threads to finish. + wait = false; + for (auto thread : threads) { + thread->join(); + delete thread; + } + + // Now verify that all of the elf files are exactly the same and valid. + for (size_t i = 0; i < kNumConcurrentThreads; i++) { + EXPECT_EQ(expected_load_bias, load_bias_values[i]) << "Thread " << i << " mismatched."; + } +} + +TEST_F(MapInfoGetLoadBiasTest, multiple_thread_elf_exists) { + map_info_->elf = elf_container_.release(); + elf_->FakeSetLoadBias(0x1000); + + MultipleThreadTest(0x1000); +} + +static void InitElfData(MemoryFake* memory, uint64_t offset) { + Elf32_Ehdr ehdr; + TestInitEhdr(&ehdr, ELFCLASS32, EM_ARM); + ehdr.e_phoff = 0x5000; + ehdr.e_phnum = 2; + ehdr.e_phentsize = sizeof(Elf32_Phdr); + memory->SetMemory(offset, &ehdr, sizeof(ehdr)); + + Elf32_Phdr phdr; + memset(&phdr, 0, sizeof(phdr)); + phdr.p_type = PT_NULL; + memory->SetMemory(offset + 0x5000, &phdr, sizeof(phdr)); + phdr.p_type = PT_LOAD; + phdr.p_offset = 0; + phdr.p_vaddr = 0xe000; + memory->SetMemory(offset + 0x5000 + sizeof(phdr), &phdr, sizeof(phdr)); +} + +TEST_F(MapInfoGetLoadBiasTest, elf_exists_in_memory) { + InitElfData(memory_, map_info_->start); + + EXPECT_EQ(0xe000U, map_info_->GetLoadBias(process_memory_)); +} + +TEST_F(MapInfoGetLoadBiasTest, multiple_thread_elf_exists_in_memory) { + InitElfData(memory_, map_info_->start); + + MultipleThreadTest(0xe000); +} + +} // namespace unwindstack