Merge "libmemunreachable: clang-format everything"

This commit is contained in:
Colin Cross 2017-06-22 20:57:02 +00:00 committed by Gerrit Code Review
commit 75752c1911
36 changed files with 565 additions and 633 deletions

View file

@ -33,9 +33,9 @@
#include "android-base/macros.h" #include "android-base/macros.h"
#include "anon_vma_naming.h"
#include "Allocator.h" #include "Allocator.h"
#include "LinkedList.h" #include "LinkedList.h"
#include "anon_vma_naming.h"
// runtime interfaces used: // runtime interfaces used:
// abort // abort
@ -57,10 +57,9 @@ static constexpr size_t kChunkSize = 256 * 1024;
static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize; static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4; static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
static constexpr size_t kMinBucketAllocationSize = 8; static constexpr size_t kMinBucketAllocationSize = 8;
static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize) static constexpr unsigned int kNumBuckets =
- const_log2(kMinBucketAllocationSize) + 1; const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
/ kPageSize;
std::atomic<int> heap_count; std::atomic<int> heap_count;
@ -93,7 +92,7 @@ class HeapImpl {
void FreeLocked(void* ptr); void FreeLocked(void* ptr);
struct MapAllocation { struct MapAllocation {
void *ptr; void* ptr;
size_t size; size_t size;
MapAllocation* next; MapAllocation* next;
}; };
@ -107,8 +106,7 @@ static inline unsigned int log2(size_t n) {
} }
static inline unsigned int size_to_bucket(size_t size) { static inline unsigned int size_to_bucket(size_t size) {
if (size < kMinBucketAllocationSize) if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
return kMinBucketAllocationSize;
return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize); return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
} }
@ -140,8 +138,7 @@ static void* MapAligned(size_t size, size_t align) {
// Trim beginning // Trim beginning
if (aligned_ptr != ptr) { if (aligned_ptr != ptr) {
ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
- reinterpret_cast<uintptr_t>(ptr);
munmap(ptr, extra); munmap(ptr, extra);
map_size -= extra; map_size -= extra;
ptr = aligned_ptr; ptr = aligned_ptr;
@ -151,14 +148,13 @@ static void* MapAligned(size_t size, size_t align) {
if (map_size != size) { if (map_size != size) {
assert(map_size > size); assert(map_size > size);
assert(ptr != NULL); assert(ptr != NULL);
munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
map_size - size);
} }
#define PR_SET_VMA 0x53564d41 #define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0 #define PR_SET_VMA_ANON_NAME 0
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc"); "leak_detector_malloc");
return ptr; return ptr;
} }
@ -170,36 +166,31 @@ class Chunk {
Chunk(HeapImpl* heap, int bucket); Chunk(HeapImpl* heap, int bucket);
~Chunk() {} ~Chunk() {}
void *Alloc(); void* Alloc();
void Free(void* ptr); void Free(void* ptr);
void Purge(); void Purge();
bool Empty(); bool Empty();
static Chunk* ptr_to_chunk(void* ptr) { static Chunk* ptr_to_chunk(void* ptr) {
return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
& ~(kChunkSize - 1));
} }
static bool is_chunk(void* ptr) { static bool is_chunk(void* ptr) {
return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0; return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
} }
unsigned int free_count() { unsigned int free_count() { return free_count_; }
return free_count_; HeapImpl* heap() { return heap_; }
} LinkedList<Chunk*> node_; // linked list sorted by minimum free count
HeapImpl* heap() {
return heap_;
}
LinkedList<Chunk*> node_; // linked list sorted by minimum free count
private: private:
DISALLOW_COPY_AND_ASSIGN(Chunk); DISALLOW_COPY_AND_ASSIGN(Chunk);
HeapImpl* heap_; HeapImpl* heap_;
unsigned int bucket_; unsigned int bucket_;
unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
unsigned int max_allocations_; // maximum number of allocations in the chunk unsigned int max_allocations_; // maximum number of allocations in the chunk
unsigned int first_free_bitmap_; // index into bitmap for first non-full entry unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
unsigned int free_count_; // number of available allocations unsigned int free_count_; // number of available allocations
unsigned int frees_since_purge_; // number of calls to Free since last Purge unsigned int frees_since_purge_; // number of calls to Free since last Purge
// bitmap of pages that have been dirtied // bitmap of pages that have been dirtied
uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)]; uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
@ -210,13 +201,10 @@ class Chunk {
char data_[0]; char data_[0];
unsigned int ptr_to_n(void* ptr) { unsigned int ptr_to_n(void* ptr) {
ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
- reinterpret_cast<uintptr_t>(data_);
return offset / allocation_size_; return offset / allocation_size_;
} }
void* n_to_ptr(unsigned int n) { void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
return data_ + n * allocation_size_;
}
}; };
static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page"); static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
@ -225,23 +213,27 @@ void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
assert(count == sizeof(Chunk)); assert(count == sizeof(Chunk));
void* mem = MapAligned(kChunkSize, kChunkSize); void* mem = MapAligned(kChunkSize, kChunkSize);
if (!mem) { if (!mem) {
abort(); //throw std::bad_alloc; abort(); // throw std::bad_alloc;
} }
return mem; return mem;
} }
// Override new operator on chunk to use mmap to allocate kChunkSize // Override new operator on chunk to use mmap to allocate kChunkSize
void Chunk::operator delete(void *ptr) { void Chunk::operator delete(void* ptr) {
assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr)); assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
munmap(ptr, kChunkSize); munmap(ptr, kChunkSize);
} }
Chunk::Chunk(HeapImpl* heap, int bucket) : Chunk::Chunk(HeapImpl* heap, int bucket)
node_(this), heap_(heap), bucket_(bucket), allocation_size_( : node_(this),
bucket_to_size(bucket)), max_allocations_( heap_(heap),
kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_( bucket_(bucket),
max_allocations_), frees_since_purge_(0) { allocation_size_(bucket_to_size(bucket)),
max_allocations_(kUsableChunkSize / allocation_size_),
first_free_bitmap_(0),
free_count_(max_allocations_),
frees_since_purge_(0) {
memset(dirty_pages_, 0, sizeof(dirty_pages_)); memset(dirty_pages_, 0, sizeof(dirty_pages_));
memset(free_bitmap_, 0xff, sizeof(free_bitmap_)); memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
} }
@ -254,8 +246,7 @@ void* Chunk::Alloc() {
assert(free_count_ > 0); assert(free_count_ > 0);
unsigned int i = first_free_bitmap_; unsigned int i = first_free_bitmap_;
while (free_bitmap_[i] == 0) while (free_bitmap_[i] == 0) i++;
i++;
assert(i < arraysize(free_bitmap_)); assert(i < arraysize(free_bitmap_));
unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1; unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
assert(free_bitmap_[i] & (1U << bit)); assert(free_bitmap_[i] & (1U << bit));
@ -306,38 +297,35 @@ void Chunk::Free(void* ptr) {
void Chunk::Purge() { void Chunk::Purge() {
frees_since_purge_ = 0; frees_since_purge_ = 0;
//unsigned int allocsPerPage = kPageSize / allocation_size_; // unsigned int allocsPerPage = kPageSize / allocation_size_;
} }
// Override new operator on HeapImpl to use mmap to allocate a page // Override new operator on HeapImpl to use mmap to allocate a page
void* HeapImpl::operator new(std::size_t count __attribute__((unused))) void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
noexcept {
assert(count == sizeof(HeapImpl)); assert(count == sizeof(HeapImpl));
void* mem = MapAligned(kPageSize, kPageSize); void* mem = MapAligned(kPageSize, kPageSize);
if (!mem) { if (!mem) {
abort(); //throw std::bad_alloc; abort(); // throw std::bad_alloc;
} }
heap_count++; heap_count++;
return mem; return mem;
} }
void HeapImpl::operator delete(void *ptr) { void HeapImpl::operator delete(void* ptr) {
munmap(ptr, kPageSize); munmap(ptr, kPageSize);
} }
HeapImpl::HeapImpl() : HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
}
bool HeapImpl::Empty() { bool HeapImpl::Empty() {
for (unsigned int i = 0; i < kNumBuckets; i++) { for (unsigned int i = 0; i < kNumBuckets; i++) {
for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) { for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
if (!it->data()->Empty()) { if (!it->data()->Empty()) {
return false; return false;
} }
} }
for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) { for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
if (!it->data()->Empty()) { if (!it->data()->Empty()) {
return false; return false;
} }
@ -350,12 +338,12 @@ bool HeapImpl::Empty() {
HeapImpl::~HeapImpl() { HeapImpl::~HeapImpl() {
for (unsigned int i = 0; i < kNumBuckets; i++) { for (unsigned int i = 0; i < kNumBuckets; i++) {
while (!free_chunks_[i].empty()) { while (!free_chunks_[i].empty()) {
Chunk *chunk = free_chunks_[i].next()->data(); Chunk* chunk = free_chunks_[i].next()->data();
chunk->node_.remove(); chunk->node_.remove();
delete chunk; delete chunk;
} }
while (!full_chunks_[i].empty()) { while (!full_chunks_[i].empty()) {
Chunk *chunk = full_chunks_[i].next()->data(); Chunk* chunk = full_chunks_[i].next()->data();
chunk->node_.remove(); chunk->node_.remove();
delete chunk; delete chunk;
} }
@ -373,18 +361,18 @@ void* HeapImpl::AllocLocked(size_t size) {
} }
int bucket = size_to_bucket(size); int bucket = size_to_bucket(size);
if (free_chunks_[bucket].empty()) { if (free_chunks_[bucket].empty()) {
Chunk *chunk = new Chunk(this, bucket); Chunk* chunk = new Chunk(this, bucket);
free_chunks_[bucket].insert(chunk->node_); free_chunks_[bucket].insert(chunk->node_);
} }
return free_chunks_[bucket].next()->data()->Alloc(); return free_chunks_[bucket].next()->data()->Alloc();
} }
void HeapImpl::Free(void *ptr) { void HeapImpl::Free(void* ptr) {
std::lock_guard<std::mutex> lk(m_); std::lock_guard<std::mutex> lk(m_);
FreeLocked(ptr); FreeLocked(ptr);
} }
void HeapImpl::FreeLocked(void *ptr) { void HeapImpl::FreeLocked(void* ptr) {
if (!Chunk::is_chunk(ptr)) { if (!Chunk::is_chunk(ptr)) {
HeapImpl::MapFree(ptr); HeapImpl::MapFree(ptr);
} else { } else {
@ -397,12 +385,11 @@ void HeapImpl::FreeLocked(void *ptr) {
void* HeapImpl::MapAlloc(size_t size) { void* HeapImpl::MapAlloc(size_t size) {
size = (size + kPageSize - 1) & ~(kPageSize - 1); size = (size + kPageSize - 1) & ~(kPageSize - 1);
MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked( MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
sizeof(MapAllocation)));
void* ptr = MapAligned(size, kChunkSize); void* ptr = MapAligned(size, kChunkSize);
if (!ptr) { if (!ptr) {
FreeLocked(allocation); FreeLocked(allocation);
abort(); //throw std::bad_alloc; abort(); // throw std::bad_alloc;
} }
allocation->ptr = ptr; allocation->ptr = ptr;
allocation->size = size; allocation->size = size;
@ -412,10 +399,9 @@ void* HeapImpl::MapAlloc(size_t size) {
return ptr; return ptr;
} }
void HeapImpl::MapFree(void *ptr) { void HeapImpl::MapFree(void* ptr) {
MapAllocation **allocation = &map_allocation_list_; MapAllocation** allocation = &map_allocation_list_;
while (*allocation && (*allocation)->ptr != ptr) while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
allocation = &(*allocation)->next;
assert(*allocation != nullptr); assert(*allocation != nullptr);
@ -425,22 +411,22 @@ void HeapImpl::MapFree(void *ptr) {
*allocation = (*allocation)->next; *allocation = (*allocation)->next;
} }
void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) { void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
MoveToList(chunk, &free_chunks_[bucket]); MoveToList(chunk, &free_chunks_[bucket]);
} }
void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) { void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
MoveToList(chunk, &full_chunks_[bucket]); MoveToList(chunk, &full_chunks_[bucket]);
} }
void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) { void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
// Remove from old list // Remove from old list
chunk->node_.remove(); chunk->node_.remove();
LinkedList<Chunk*> *node = head; LinkedList<Chunk*>* node = head;
// Insert into new list, sorted by lowest free count // Insert into new list, sorted by lowest free count
while (node->next() != head && node->data() != nullptr while (node->next() != head && node->data() != nullptr &&
&& node->data()->free_count() < chunk->free_count()) node->data()->free_count() < chunk->free_count())
node = node->next(); node = node->next();
node->insert(chunk->node_); node->insert(chunk->node_);
@ -469,7 +455,7 @@ void Heap::deallocate(void* ptr) {
impl_->Free(ptr); impl_->Free(ptr);
} }
void Heap::deallocate(HeapImpl*impl, void* ptr) { void Heap::deallocate(HeapImpl* impl, void* ptr) {
impl->Free(ptr); impl->Free(ptr);
} }

View file

@ -31,14 +31,13 @@ extern std::atomic<int> heap_count;
class HeapImpl; class HeapImpl;
template<typename T> template <typename T>
class Allocator; class Allocator;
// Non-templated class that implements wraps HeapImpl to keep // Non-templated class that implements wraps HeapImpl to keep
// implementation out of the header file // implementation out of the header file
class Heap { class Heap {
public: public:
Heap(); Heap();
~Heap(); ~Heap();
@ -59,110 +58,99 @@ public:
static void deallocate(HeapImpl* impl, void* ptr); static void deallocate(HeapImpl* impl, void* ptr);
// Allocate a class of type T // Allocate a class of type T
template<class T> template <class T>
T* allocate() { T* allocate() {
return reinterpret_cast<T*>(allocate(sizeof(T))); return reinterpret_cast<T*>(allocate(sizeof(T)));
} }
// Comparators, copied objects will be equal // Comparators, copied objects will be equal
bool operator ==(const Heap& other) const { bool operator==(const Heap& other) const { return impl_ == other.impl_; }
return impl_ == other.impl_; bool operator!=(const Heap& other) const { return !(*this == other); }
}
bool operator !=(const Heap& other) const {
return !(*this == other);
}
// std::unique_ptr wrapper that allocates using allocate and deletes using // std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate // deallocate
template<class T> template <class T>
using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>; using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>;
template<class T, class... Args> template <class T, class... Args>
unique_ptr<T> make_unique(Args&&... args) { unique_ptr<T> make_unique(Args&&... args) {
HeapImpl* impl = impl_; HeapImpl* impl = impl_;
return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...), return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...), [impl](void* ptr) {
[impl](void* ptr) { reinterpret_cast<T*>(ptr)->~T();
reinterpret_cast<T*>(ptr)->~T(); deallocate(impl, ptr);
deallocate(impl, ptr); });
});
} }
// std::unique_ptr wrapper that allocates using allocate and deletes using // std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate // deallocate
template<class T> template <class T>
using shared_ptr = std::shared_ptr<T>; using shared_ptr = std::shared_ptr<T>;
template<class T, class... Args> template <class T, class... Args>
shared_ptr<T> make_shared(Args&&... args); shared_ptr<T> make_shared(Args&&... args);
protected: protected:
HeapImpl* impl_; HeapImpl* impl_;
bool owns_impl_; bool owns_impl_;
}; };
// STLAllocator implements the std allocator interface on top of a Heap // STLAllocator implements the std allocator interface on top of a Heap
template<typename T> template <typename T>
class STLAllocator { class STLAllocator {
public: public:
using value_type = T; using value_type = T;
~STLAllocator() { ~STLAllocator() {}
}
// Construct an STLAllocator on top of a Heap // Construct an STLAllocator on top of a Heap
STLAllocator(const Heap& heap) : // NOLINT, implicit STLAllocator(const Heap& heap)
heap_(heap) { : // NOLINT, implicit
} heap_(heap) {}
// Rebind an STLAllocator from an another STLAllocator // Rebind an STLAllocator from an another STLAllocator
template<typename U> template <typename U>
STLAllocator(const STLAllocator<U>& other) : // NOLINT, implicit STLAllocator(const STLAllocator<U>& other)
heap_(other.heap_) { : // NOLINT, implicit
} heap_(other.heap_) {}
STLAllocator(const STLAllocator&) = default; STLAllocator(const STLAllocator&) = default;
STLAllocator<T>& operator=(const STLAllocator<T>&) = default; STLAllocator<T>& operator=(const STLAllocator<T>&) = default;
T* allocate(std::size_t n) { T* allocate(std::size_t n) { return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T))); }
return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T)));
}
void deallocate(T* ptr, std::size_t) { void deallocate(T* ptr, std::size_t) { heap_.deallocate(ptr); }
heap_.deallocate(ptr);
}
template<typename U> template <typename U>
bool operator ==(const STLAllocator<U>& other) const { bool operator==(const STLAllocator<U>& other) const {
return heap_ == other.heap_; return heap_ == other.heap_;
} }
template<typename U> template <typename U>
inline bool operator !=(const STLAllocator<U>& other) const { inline bool operator!=(const STLAllocator<U>& other) const {
return !(this == other); return !(this == other);
} }
template<typename U> template <typename U>
friend class STLAllocator; friend class STLAllocator;
protected: protected:
Heap heap_; Heap heap_;
}; };
// Allocator extends STLAllocator with some convenience methods for allocating // Allocator extends STLAllocator with some convenience methods for allocating
// a single object and for constructing unique_ptr and shared_ptr objects with // a single object and for constructing unique_ptr and shared_ptr objects with
// appropriate deleters. // appropriate deleters.
template<class T> template <class T>
class Allocator : public STLAllocator<T> { class Allocator : public STLAllocator<T> {
public: public:
~Allocator() {} ~Allocator() {}
Allocator(const Heap& other) : // NOLINT, implicit Allocator(const Heap& other)
STLAllocator<T>(other) { : // NOLINT, implicit
} STLAllocator<T>(other) {}
template<typename U> template <typename U>
Allocator(const STLAllocator<U>& other) : // NOLINT, implicit Allocator(const STLAllocator<U>& other)
STLAllocator<T>(other) { : // NOLINT, implicit
} STLAllocator<T>(other) {}
Allocator(const Allocator&) = default; Allocator(const Allocator&) = default;
Allocator<T>& operator=(const Allocator<T>&) = default; Allocator<T>& operator=(const Allocator<T>&) = default;
@ -171,24 +159,20 @@ class Allocator : public STLAllocator<T> {
using STLAllocator<T>::deallocate; using STLAllocator<T>::deallocate;
using STLAllocator<T>::heap_; using STLAllocator<T>::heap_;
T* allocate() { T* allocate() { return STLAllocator<T>::allocate(1); }
return STLAllocator<T>::allocate(1); void deallocate(void* ptr) { heap_.deallocate(ptr); }
}
void deallocate(void* ptr) {
heap_.deallocate(ptr);
}
using shared_ptr = Heap::shared_ptr<T>; using shared_ptr = Heap::shared_ptr<T>;
template<class... Args> template <class... Args>
shared_ptr make_shared(Args&& ...args) { shared_ptr make_shared(Args&&... args) {
return heap_.template make_shared<T>(std::forward<Args>(args)...); return heap_.template make_shared<T>(std::forward<Args>(args)...);
} }
using unique_ptr = Heap::unique_ptr<T>; using unique_ptr = Heap::unique_ptr<T>;
template<class... Args> template <class... Args>
unique_ptr make_unique(Args&& ...args) { unique_ptr make_unique(Args&&... args) {
return heap_.template make_unique<T>(std::forward<Args>(args)...); return heap_.template make_unique<T>(std::forward<Args>(args)...);
} }
}; };
@ -196,30 +180,31 @@ class Allocator : public STLAllocator<T> {
// std::unique_ptr wrapper that allocates using allocate and deletes using // std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate. Implemented outside class definition in order to pass // deallocate. Implemented outside class definition in order to pass
// Allocator<T> to shared_ptr. // Allocator<T> to shared_ptr.
template<class T, class... Args> template <class T, class... Args>
inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) { inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) {
return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this), return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this),
std::forward<Args>(args)...); std::forward<Args>(args)...);
} }
namespace allocator { namespace allocator {
template<class T> template <class T>
using vector = std::vector<T, Allocator<T>>; using vector = std::vector<T, Allocator<T>>;
template<class T> template <class T>
using list = std::list<T, Allocator<T>>; using list = std::list<T, Allocator<T>>;
template<class Key, class T, class Compare = std::less<Key>> template <class Key, class T, class Compare = std::less<Key>>
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>; using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
template<class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>> template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_map = std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>; using unordered_map =
std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>> template <class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>; using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;
template<class Key, class Compare = std::less<Key>> template <class Key, class Compare = std::less<Key>>
using set = std::set<Key, Compare, Allocator<Key>>; using set = std::set<Key, Compare, Allocator<Key>>;
using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>; using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>;

View file

@ -114,8 +114,8 @@ bool HeapWalker::DetectLeaks() {
return true; return true;
} }
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
size_t* num_leaks_out, size_t* leak_bytes_out) { size_t* leak_bytes_out) {
leaked.clear(); leaked.clear();
size_t num_leaks = 0; size_t num_leaks = 0;
@ -148,9 +148,9 @@ bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
static bool MapOverPage(void* addr) { static bool MapOverPage(void* addr) {
const size_t page_size = sysconf(_SC_PAGE_SIZE); const size_t page_size = sysconf(_SC_PAGE_SIZE);
void *page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size-1)); void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (ret == MAP_FAILED) { if (ret == MAP_FAILED) {
MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno)); MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
return false; return false;
@ -159,7 +159,8 @@ static bool MapOverPage(void* addr) {
return true; return true;
} }
void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, void* /*uctx*/) { void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
void* /*uctx*/) {
uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr); uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
if (addr != walking_ptr_) { if (addr != walking_ptr_) {
handler.reset(); handler.reset();

View file

@ -34,31 +34,31 @@ struct Range {
bool operator==(const Range& other) const { bool operator==(const Range& other) const {
return this->begin == other.begin && this->end == other.end; return this->begin == other.begin && this->end == other.end;
} }
bool operator!=(const Range& other) const { bool operator!=(const Range& other) const { return !(*this == other); }
return !(*this == other);
}
}; };
// Comparator for Ranges that returns equivalence for overlapping ranges // Comparator for Ranges that returns equivalence for overlapping ranges
struct compare_range { struct compare_range {
bool operator()(const Range& a, const Range& b) const { bool operator()(const Range& a, const Range& b) const { return a.end <= b.begin; }
return a.end <= b.begin;
}
}; };
class HeapWalker { class HeapWalker {
public: public:
explicit HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator), explicit HeapWalker(Allocator<HeapWalker> allocator)
allocations_(allocator), allocation_bytes_(0), : allocator_(allocator),
roots_(allocator), root_vals_(allocator), allocations_(allocator),
segv_handler_(allocator), walking_ptr_(0) { allocation_bytes_(0),
roots_(allocator),
root_vals_(allocator),
segv_handler_(allocator),
walking_ptr_(0) {
valid_allocations_range_.end = 0; valid_allocations_range_.end = 0;
valid_allocations_range_.begin = ~valid_allocations_range_.end; valid_allocations_range_.begin = ~valid_allocations_range_.end;
segv_handler_.install(SIGSEGV, segv_handler_.install(
[=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) { SIGSEGV, [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
this->HandleSegFault(handler, signal, siginfo, uctx); this->HandleSegFault(handler, signal, siginfo, uctx);
}); });
} }
~HeapWalker() {} ~HeapWalker() {}
@ -68,15 +68,14 @@ class HeapWalker {
bool DetectLeaks(); bool DetectLeaks();
bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks, bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks, size_t* leak_bytes);
size_t* leak_bytes);
size_t Allocations(); size_t Allocations();
size_t AllocationBytes(); size_t AllocationBytes();
template<class F> template <class F>
void ForEachPtrInRange(const Range& range, F&& f); void ForEachPtrInRange(const Range& range, F&& f);
template<class F> template <class F>
void ForEachAllocation(F&& f); void ForEachAllocation(F&& f);
struct AllocationInfo { struct AllocationInfo {
@ -84,7 +83,6 @@ class HeapWalker {
}; };
private: private:
void RecurseRoot(const Range& root); void RecurseRoot(const Range& root);
bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info); bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*); void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*);
@ -103,7 +101,7 @@ class HeapWalker {
uintptr_t walking_ptr_; uintptr_t walking_ptr_;
}; };
template<class F> template <class F>
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) { inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1); uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer // TODO(ccross): we might need to consider a pointer to the end of a buffer
@ -118,7 +116,7 @@ inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
} }
} }
template<class F> template <class F>
inline void HeapWalker::ForEachAllocation(F&& f) { inline void HeapWalker::ForEachAllocation(F&& f) {
for (auto& it : allocations_) { for (auto& it : allocations_) {
const Range& range = it.first; const Range& range = it.first;

View file

@ -26,7 +26,7 @@
// as a key in std::unordered_map. // as a key in std::unordered_map.
namespace std { namespace std {
template<> template <>
struct hash<Leak::Backtrace> { struct hash<Leak::Backtrace> {
std::size_t operator()(const Leak::Backtrace& key) const { std::size_t operator()(const Leak::Backtrace& key) const {
std::size_t seed = 0; std::size_t seed = 0;
@ -40,7 +40,7 @@ struct hash<Leak::Backtrace> {
} }
private: private:
template<typename T> template <typename T>
inline void hash_combine(std::size_t& seed, const T& v) const { inline void hash_combine(std::size_t& seed, const T& v) const {
std::hash<T> hasher; std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
@ -51,7 +51,7 @@ struct hash<Leak::Backtrace> {
static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) { static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) {
return (lhs.num_frames == rhs.num_frames) && return (lhs.num_frames == rhs.num_frames) &&
memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0; memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
} }
#endif #endif

View file

@ -31,11 +31,11 @@ void LeakFolding::ComputeDAG() {
Allocator<SCCInfo> scc_allocator = allocator_; Allocator<SCCInfo> scc_allocator = allocator_;
for (auto& scc_nodes: scc_list) { for (auto& scc_nodes : scc_list) {
Allocator<SCCInfo>::unique_ptr leak_scc; Allocator<SCCInfo>::unique_ptr leak_scc;
leak_scc = scc_allocator.make_unique(scc_allocator); leak_scc = scc_allocator.make_unique(scc_allocator);
for (auto& node: scc_nodes) { for (auto& node : scc_nodes) {
node->ptr->scc = leak_scc.get(); node->ptr->scc = leak_scc.get();
leak_scc->count++; leak_scc->count++;
leak_scc->size += node->ptr->range.size(); leak_scc->size += node->ptr->range.size();
@ -46,7 +46,7 @@ void LeakFolding::ComputeDAG() {
for (auto& it : leak_map_) { for (auto& it : leak_map_) {
LeakInfo& leak = it.second; LeakInfo& leak = it.second;
for (auto& ref: leak.node.references_out) { for (auto& ref : leak.node.references_out) {
if (leak.scc != ref->ptr->scc) { if (leak.scc != ref->ptr->scc) {
leak.scc->node.Edge(&ref->ptr->scc->node); leak.scc->node.Edge(&ref->ptr->scc->node);
} }
@ -55,17 +55,14 @@ void LeakFolding::ComputeDAG() {
} }
void LeakFolding::AccumulateLeaks(SCCInfo* dominator) { void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_, std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_, [&](SCCInfo* scc) {
[&](SCCInfo* scc) { if (scc->accumulator != dominator) {
if (scc->accumulator != dominator) { scc->accumulator = dominator;
scc->accumulator = dominator; dominator->cuumulative_size += scc->size;
dominator->cuumulative_size += scc->size; dominator->cuumulative_count += scc->count;
dominator->cuumulative_count += scc->count; scc->node.Foreach([&](SCCInfo* ref) { walk(ref); });
scc->node.Foreach([&](SCCInfo* ref) { }
walk(ref); });
});
}
});
walk(dominator); walk(dominator);
} }
@ -73,27 +70,25 @@ bool LeakFolding::FoldLeaks() {
Allocator<LeakInfo> leak_allocator = allocator_; Allocator<LeakInfo> leak_allocator = allocator_;
// Find all leaked allocations insert them into leak_map_ and leak_graph_ // Find all leaked allocations insert them into leak_map_ and leak_graph_
heap_walker_.ForEachAllocation( heap_walker_.ForEachAllocation([&](const Range& range, HeapWalker::AllocationInfo& allocation) {
[&](const Range& range, HeapWalker::AllocationInfo& allocation) { if (!allocation.referenced_from_root) {
if (!allocation.referenced_from_root) { auto it = leak_map_.emplace(std::piecewise_construct, std::forward_as_tuple(range),
auto it = leak_map_.emplace(std::piecewise_construct, std::forward_as_tuple(range, allocator_));
std::forward_as_tuple(range), LeakInfo& leak = it.first->second;
std::forward_as_tuple(range, allocator_)); leak_graph_.push_back(&leak.node);
LeakInfo& leak = it.first->second; }
leak_graph_.push_back(&leak.node); });
}
});
// Find references between leaked allocations and connect them in leak_graph_ // Find references between leaked allocations and connect them in leak_graph_
for (auto& it : leak_map_) { for (auto& it : leak_map_) {
LeakInfo& leak = it.second; LeakInfo& leak = it.second;
heap_walker_.ForEachPtrInRange(leak.range, heap_walker_.ForEachPtrInRange(leak.range,
[&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) { [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
if (!ptr_info->referenced_from_root) { if (!ptr_info->referenced_from_root) {
LeakInfo* ptr_leak = &leak_map_.at(ptr_range); LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
leak.node.Edge(&ptr_leak->node); leak.node.Edge(&ptr_leak->node);
} }
}); });
} }
// Convert the cyclic graph to a DAG by grouping strongly connected components // Convert the cyclic graph to a DAG by grouping strongly connected components
@ -110,8 +105,8 @@ bool LeakFolding::FoldLeaks() {
return true; return true;
} }
bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked, bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked, size_t* num_leaks_out,
size_t* num_leaks_out, size_t* leak_bytes_out) { size_t* leak_bytes_out) {
size_t num_leaks = 0; size_t num_leaks = 0;
size_t leak_bytes = 0; size_t leak_bytes = 0;
for (auto& it : leak_map_) { for (auto& it : leak_map_) {
@ -123,9 +118,8 @@ bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
for (auto& it : leak_map_) { for (auto& it : leak_map_) {
const LeakInfo& leak = it.second; const LeakInfo& leak = it.second;
if (leak.scc->dominator) { if (leak.scc->dominator) {
leaked.emplace_back(Leak{leak.range, leaked.emplace_back(Leak{leak.range, leak.scc->cuumulative_count - 1,
leak.scc->cuumulative_count - 1, leak.scc->cuumulative_size - leak.range.size()});
leak.scc->cuumulative_size - leak.range.size()});
} }
} }

View file

@ -22,8 +22,11 @@
class LeakFolding { class LeakFolding {
public: public:
LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker) LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
: allocator_(allocator), heap_walker_(heap_walker), : allocator_(allocator),
leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {} heap_walker_(heap_walker),
leak_map_(allocator),
leak_graph_(allocator),
leak_scc_(allocator) {}
bool FoldLeaks(); bool FoldLeaks();
@ -33,8 +36,7 @@ class LeakFolding {
size_t referenced_size; size_t referenced_size;
}; };
bool Leaked(allocator::vector<Leak>& leaked, bool Leaked(allocator::vector<Leak>& leaked, size_t* num_leaks_out, size_t* leak_bytes_out);
size_t* num_leaks_out, size_t* leak_bytes_out);
private: private:
DISALLOW_COPY_AND_ASSIGN(LeakFolding); DISALLOW_COPY_AND_ASSIGN(LeakFolding);
@ -54,9 +56,15 @@ class LeakFolding {
bool dominator; bool dominator;
SCCInfo* accumulator; SCCInfo* accumulator;
explicit SCCInfo(Allocator<SCCInfo> allocator) : node(this, allocator), explicit SCCInfo(Allocator<SCCInfo> allocator)
count(0), size(0), cuumulative_count(0), cuumulative_size(0), : node(this, allocator),
dominator(false), accumulator(nullptr) {} count(0),
size(0),
cuumulative_count(0),
cuumulative_size(0),
dominator(false),
accumulator(nullptr) {}
private: private:
SCCInfo(SCCInfo&&) = delete; SCCInfo(SCCInfo&&) = delete;
DISALLOW_COPY_AND_ASSIGN(SCCInfo); DISALLOW_COPY_AND_ASSIGN(SCCInfo);
@ -71,8 +79,7 @@ class LeakFolding {
SCCInfo* scc; SCCInfo* scc;
LeakInfo(const Range& range, Allocator<LeakInfo> allocator) LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
: node(this, allocator), range(range), : node(this, allocator), range(range), scc(nullptr) {}
scc(nullptr) {}
private: private:
DISALLOW_COPY_AND_ASSIGN(LeakInfo); DISALLOW_COPY_AND_ASSIGN(LeakInfo);
@ -86,4 +93,4 @@ class LeakFolding {
allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_; allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
}; };
#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_ #endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_

View file

@ -22,8 +22,8 @@
#include "log.h" #include "log.h"
bool LeakPipe::SendFd(int sock, int fd) { bool LeakPipe::SendFd(int sock, int fd) {
struct msghdr hdr{}; struct msghdr hdr {};
struct iovec iov{}; struct iovec iov {};
unsigned int data = 0xfdfdfdfd; unsigned int data = 0xfdfdfdfd;
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))]; alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
@ -56,8 +56,8 @@ bool LeakPipe::SendFd(int sock, int fd) {
} }
int LeakPipe::ReceiveFd(int sock) { int LeakPipe::ReceiveFd(int sock) {
struct msghdr hdr{}; struct msghdr hdr {};
struct iovec iov{}; struct iovec iov {};
unsigned int data; unsigned int data;
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))]; alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];

View file

@ -34,15 +34,13 @@
class LeakPipe { class LeakPipe {
public: public:
LeakPipe() { LeakPipe() {
int ret = socketpair(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0, sv_); int ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sv_);
if (ret < 0) { if (ret < 0) {
MEM_LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno)); MEM_LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno));
} }
} }
~LeakPipe() { ~LeakPipe() { Close(); }
Close();
}
void Close() { void Close() {
close(sv_[0]); close(sv_[0]);
@ -77,13 +75,9 @@ class LeakPipe {
public: public:
LeakPipeBase() : fd_(-1) {} LeakPipeBase() : fd_(-1) {}
~LeakPipeBase() { ~LeakPipeBase() { Close(); }
Close();
}
void SetFd(int fd) { void SetFd(int fd) { fd_ = fd; }
fd_ = fd;
}
void Close() { void Close() {
close(fd_); close(fd_);
@ -101,7 +95,7 @@ class LeakPipe {
public: public:
using LeakPipeBase::LeakPipeBase; using LeakPipeBase::LeakPipeBase;
template<typename T> template <typename T>
bool Send(const T& value) { bool Send(const T& value) {
ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T))); ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T)));
if (ret < 0) { if (ret < 0) {
@ -115,7 +109,7 @@ class LeakPipe {
return true; return true;
} }
template<class T, class Alloc = std::allocator<T>> template <class T, class Alloc = std::allocator<T>>
bool SendVector(const std::vector<T, Alloc>& vector) { bool SendVector(const std::vector<T, Alloc>& vector) {
size_t size = vector.size() * sizeof(T); size_t size = vector.size() * sizeof(T);
if (!Send(size)) { if (!Send(size)) {
@ -139,7 +133,7 @@ class LeakPipe {
public: public:
using LeakPipeBase::LeakPipeBase; using LeakPipeBase::LeakPipeBase;
template<typename T> template <typename T>
bool Receive(T* value) { bool Receive(T* value) {
ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast<void*>(value), sizeof(T))); ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast<void*>(value), sizeof(T)));
if (ret < 0) { if (ret < 0) {
@ -153,7 +147,7 @@ class LeakPipe {
return true; return true;
} }
template<class T, class Alloc = std::allocator<T>> template <class T, class Alloc = std::allocator<T>>
bool ReceiveVector(std::vector<T, Alloc>& vector) { bool ReceiveVector(std::vector<T, Alloc>& vector) {
size_t size = 0; size_t size = 0;
if (!Receive(&size)) { if (!Receive(&size)) {
@ -178,16 +172,11 @@ class LeakPipe {
return true; return true;
} }
}; };
LeakPipeReceiver& Receiver() { LeakPipeReceiver& Receiver() { return receiver_; }
return receiver_;
}
LeakPipeSender& Sender() { LeakPipeSender& Sender() { return sender_; }
return sender_;
}
private: private:
LeakPipeReceiver receiver_; LeakPipeReceiver receiver_;
@ -198,4 +187,4 @@ class LeakPipe {
int sv_[2]; int sv_[2];
}; };
#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_ #endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_

View file

@ -23,8 +23,8 @@
#include "LineBuffer.h" #include "LineBuffer.h"
LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len) : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) { LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len)
} : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {}
bool LineBuffer::GetLine(char** line, size_t* line_len) { bool LineBuffer::GetLine(char** line, size_t* line_len) {
while (true) { while (true) {

View file

@ -33,4 +33,4 @@ class LineBuffer {
size_t bytes_ = 0; size_t bytes_ = 0;
}; };
#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H #endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H

View file

@ -17,44 +17,43 @@
#ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_ #ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_
#define LIBMEMUNREACHABLE_LINKED_LIST_H_ #define LIBMEMUNREACHABLE_LINKED_LIST_H_
template<class T> template <class T>
class LinkedList { class LinkedList {
public: public:
LinkedList() : next_(this), prev_(this), data_() {} LinkedList() : next_(this), prev_(this), data_() {}
explicit LinkedList(T data) : LinkedList() { explicit LinkedList(T data) : LinkedList() { data_ = data; }
data_ = data; ~LinkedList() {}
} void insert(LinkedList<T>& node) {
~LinkedList() {} assert(node.empty());
void insert(LinkedList<T>& node) { node.next_ = this->next_;
assert(node.empty()); node.next_->prev_ = &node;
node.next_ = this->next_; this->next_ = &node;
node.next_->prev_ = &node; node.prev_ = this;
this->next_ = &node; }
node.prev_ = this; void remove() {
} this->next_->prev_ = this->prev_;
void remove() { this->prev_->next_ = this->next_;
this->next_->prev_ = this->prev_; this->next_ = this;
this->prev_->next_ = this->next_; this->prev_ = this;
this->next_ = this; }
this->prev_ = this; T data() { return data_; }
} bool empty() { return next_ == this && prev_ == this; }
T data() { return data_; } LinkedList<T>* next() { return next_; }
bool empty() { return next_ == this && prev_ == this; }
LinkedList<T> *next() { return next_; } private:
private: LinkedList<T>* next_;
LinkedList<T> *next_; LinkedList<T>* prev_;
LinkedList<T> *prev_; T data_;
T data_;
}; };
template<class T> template <class T>
class LinkedListHead { class LinkedListHead {
public: public:
LinkedListHead() : node_() {} LinkedListHead() : node_() {}
~LinkedListHead() {} ~LinkedListHead() {}
private: private:
LinkedList<T> node_; LinkedList<T> node_;
}; };
#endif #endif

View file

@ -19,12 +19,12 @@
#include <functional> #include <functional>
#include <iomanip> #include <iomanip>
#include <mutex> #include <mutex>
#include <string>
#include <sstream> #include <sstream>
#include <string>
#include <unordered_map> #include <unordered_map>
#include <backtrace.h>
#include <android-base/macros.h> #include <android-base/macros.h>
#include <backtrace.h>
#include "Allocator.h" #include "Allocator.h"
#include "HeapWalker.h" #include "HeapWalker.h"
@ -37,9 +37,9 @@
#include "Semaphore.h" #include "Semaphore.h"
#include "ThreadCapture.h" #include "ThreadCapture.h"
#include "memunreachable/memunreachable.h"
#include "bionic.h" #include "bionic.h"
#include "log.h" #include "log.h"
#include "memunreachable/memunreachable.h"
const size_t Leak::contents_length; const size_t Leak::contents_length;
@ -47,20 +47,21 @@ using namespace std::chrono_literals;
class MemUnreachable { class MemUnreachable {
public: public:
MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator), MemUnreachable(pid_t pid, Allocator<void> allocator)
heap_walker_(allocator_) {} : pid_(pid), allocator_(allocator), heap_walker_(allocator_) {}
bool CollectAllocations(const allocator::vector<ThreadInfo>& threads, bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
const allocator::vector<Mapping>& mappings); const allocator::vector<Mapping>& mappings);
bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks,
size_t* num_leaks, size_t* leak_bytes); size_t* leak_bytes);
size_t Allocations() { return heap_walker_.Allocations(); } size_t Allocations() { return heap_walker_.Allocations(); }
size_t AllocationBytes() { return heap_walker_.AllocationBytes(); } size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
private: private:
bool ClassifyMappings(const allocator::vector<Mapping>& mappings, bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings, allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings, allocator::vector<Mapping>& anon_mappings,
allocator::vector<Mapping>& globals_mappings, allocator::vector<Mapping>& globals_mappings,
allocator::vector<Mapping>& stack_mappings); allocator::vector<Mapping>& stack_mappings);
DISALLOW_COPY_AND_ASSIGN(MemUnreachable); DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
pid_t pid_; pid_t pid_;
Allocator<void> allocator_; Allocator<void> allocator_;
@ -68,16 +69,17 @@ class MemUnreachable {
}; };
static void HeapIterate(const Mapping& heap_mapping, static void HeapIterate(const Mapping& heap_mapping,
const std::function<void(uintptr_t, size_t)>& func) { const std::function<void(uintptr_t, size_t)>& func) {
malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin, malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
[](uintptr_t base, size_t size, void* arg) { [](uintptr_t base, size_t size, void* arg) {
auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg); auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
(*f)(base, size); (*f)(base, size);
}, const_cast<void*>(reinterpret_cast<const void*>(&func))); },
const_cast<void*>(reinterpret_cast<const void*>(&func)));
} }
bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads, bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
const allocator::vector<Mapping>& mappings) { const allocator::vector<Mapping>& mappings) {
MEM_ALOGI("searching process %d for allocations", pid_); MEM_ALOGI("searching process %d for allocations", pid_);
allocator::vector<Mapping> heap_mappings{mappings}; allocator::vector<Mapping> heap_mappings{mappings};
allocator::vector<Mapping> anon_mappings{mappings}; allocator::vector<Mapping> anon_mappings{mappings};
@ -118,8 +120,8 @@ bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& thr
return true; return true;
} }
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
size_t limit, size_t* num_leaks, size_t* leak_bytes) { size_t* num_leaks, size_t* leak_bytes) {
MEM_ALOGI("sweeping process %d for unreachable memory", pid_); MEM_ALOGI("sweeping process %d for unreachable memory", pid_);
leaks.clear(); leaks.clear();
@ -127,7 +129,6 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
return false; return false;
} }
allocator::vector<Range> leaked1{allocator_}; allocator::vector<Range> leaked1{allocator_};
heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes); heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
@ -152,12 +153,12 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
// in backtrace_map. // in backtrace_map.
leaks.reserve(leaked.size()); leaks.reserve(leaked.size());
for (auto& it: leaked) { for (auto& it : leaked) {
leaks.emplace_back(); leaks.emplace_back();
Leak* leak = &leaks.back(); Leak* leak = &leaks.back();
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it.range.begin), ssize_t num_backtrace_frames = malloc_backtrace(
leak->backtrace.frames, leak->backtrace.max_frames); reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames);
if (num_backtrace_frames > 0) { if (num_backtrace_frames > 0) {
leak->backtrace.num_frames = num_backtrace_frames; leak->backtrace.num_frames = num_backtrace_frames;
@ -183,14 +184,13 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
leak->referenced_size = it.referenced_size; leak->referenced_size = it.referenced_size;
leak->total_size = leak->size + leak->referenced_size; leak->total_size = leak->size + leak->referenced_size;
memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin), memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
std::min(leak->size, Leak::contents_length)); std::min(leak->size, Leak::contents_length));
} }
MEM_ALOGI("folding done"); MEM_ALOGI("folding done");
std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) { std::sort(leaks.begin(), leaks.end(),
return a.total_size > b.total_size; [](const Leak& a, const Leak& b) { return a.total_size > b.total_size; });
});
if (leaks.size() > limit) { if (leaks.size() > limit) {
leaks.resize(limit); leaks.resize(limit);
@ -205,11 +205,10 @@ static bool has_prefix(const allocator::string& s, const char* prefix) {
} }
bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings, bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings, allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings, allocator::vector<Mapping>& anon_mappings,
allocator::vector<Mapping>& globals_mappings, allocator::vector<Mapping>& globals_mappings,
allocator::vector<Mapping>& stack_mappings) allocator::vector<Mapping>& stack_mappings) {
{
heap_mappings.clear(); heap_mappings.clear();
anon_mappings.clear(); anon_mappings.clear();
globals_mappings.clear(); globals_mappings.clear();
@ -245,7 +244,8 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings
stack_mappings.emplace_back(*it); stack_mappings.emplace_back(*it);
} else if (mapping_name.size() == 0) { } else if (mapping_name.size() == 0) {
globals_mappings.emplace_back(*it); globals_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") { } else if (has_prefix(mapping_name, "[anon:") &&
mapping_name != "[anon:leak_detector_malloc]") {
// TODO(ccross): it would be nice to treat named anonymous mappings as // TODO(ccross): it would be nice to treat named anonymous mappings as
// possible leaks, but naming something in a .bss or .data section makes // possible leaks, but naming something in a .bss or .data section makes
// it impossible to distinguish them from mmaped and then named mappings. // it impossible to distinguish them from mmaped and then named mappings.
@ -256,7 +256,7 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings
return true; return true;
} }
template<typename T> template <typename T>
static inline const char* plural(T val) { static inline const char* plural(T val) {
return (val == 1) ? "" : "s"; return (val == 1) ? "" : "s";
} }
@ -403,7 +403,6 @@ bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
} }
std::string Leak::ToString(bool log_contents) const { std::string Leak::ToString(bool log_contents) const {
std::ostringstream oss; std::ostringstream oss;
oss << " " << std::dec << size; oss << " " << std::dec << size;
@ -492,8 +491,8 @@ std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
oss << std::endl; oss << std::endl;
for (auto it = leaks.begin(); it != leaks.end(); it++) { for (auto it = leaks.begin(); it != leaks.end(); it++) {
oss << it->ToString(log_contents); oss << it->ToString(log_contents);
oss << std::endl; oss << std::endl;
} }
return oss.str(); return oss.str();
@ -523,7 +522,6 @@ bool LogUnreachableMemory(bool log_contents, size_t limit) {
return true; return true;
} }
bool NoLeaks() { bool NoLeaks() {
UnreachableMemoryInfo info; UnreachableMemoryInfo info;
if (!GetUnreachableMemory(info, 0)) { if (!GetUnreachableMemory(info, 0)) {

View file

@ -14,8 +14,8 @@
* limitations under the License. * limitations under the License.
*/ */
#include <inttypes.h>
#include <fcntl.h> #include <fcntl.h>
#include <inttypes.h>
#include <string.h> #include <string.h>
#include <unistd.h> #include <unistd.h>
@ -42,8 +42,8 @@ bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings) {
int name_pos; int name_pos;
char perms[5]; char perms[5];
Mapping mapping{}; Mapping mapping{};
if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n", if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n", &mapping.begin,
&mapping.begin, &mapping.end, perms, &name_pos) == 3) { &mapping.end, perms, &name_pos) == 3) {
if (perms[0] == 'r') { if (perms[0] == 'r') {
mapping.read = true; mapping.read = true;
} }

View file

@ -33,4 +33,4 @@ struct Mapping {
// the line data. // the line data.
bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings); bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings);
#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_ #endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_

View file

@ -23,17 +23,17 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <unistd.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/syscall.h> #include <sys/syscall.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/wait.h> #include <sys/wait.h>
#include <unistd.h>
#include "android-base/macros.h" #include "android-base/macros.h"
#include "PtracerThread.h"
#include "anon_vma_naming.h" #include "anon_vma_naming.h"
#include "log.h" #include "log.h"
#include "PtracerThread.h"
class Stack { class Stack {
public: public:
@ -41,7 +41,7 @@ class Stack {
int prot = PROT_READ | PROT_WRITE; int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS; int flags = MAP_PRIVATE | MAP_ANONYMOUS;
page_size_ = sysconf(_SC_PAGE_SIZE); page_size_ = sysconf(_SC_PAGE_SIZE);
size_ += page_size_*2; // guard pages size_ += page_size_ * 2; // guard pages
base_ = mmap(NULL, size_, prot, flags, -1, 0); base_ = mmap(NULL, size_, prot, flags, -1, 0);
if (base_ == MAP_FAILED) { if (base_ == MAP_FAILED) {
base_ = NULL; base_ = NULL;
@ -52,22 +52,20 @@ class Stack {
mprotect(base_, page_size_, PROT_NONE); mprotect(base_, page_size_, PROT_NONE);
mprotect(top(), page_size_, PROT_NONE); mprotect(top(), page_size_, PROT_NONE);
}; };
~Stack() { ~Stack() { munmap(base_, size_); };
munmap(base_, size_);
};
void* top() { void* top() {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base_) + size_ - page_size_); return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base_) + size_ - page_size_);
}; };
private: private:
DISALLOW_COPY_AND_ASSIGN(Stack); DISALLOW_COPY_AND_ASSIGN(Stack);
void *base_; void* base_;
size_t size_; size_t size_;
size_t page_size_; size_t page_size_;
}; };
PtracerThread::PtracerThread(const std::function<int()>& func) : PtracerThread::PtracerThread(const std::function<int()>& func) : child_pid_(0) {
child_pid_(0) {
stack_ = std::make_unique<Stack>(PTHREAD_STACK_MIN); stack_ = std::make_unique<Stack>(PTHREAD_STACK_MIN);
if (stack_->top() == nullptr) { if (stack_->top() == nullptr) {
MEM_LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno)); MEM_LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno));
@ -93,14 +91,13 @@ bool PtracerThread::Start() {
std::unique_lock<std::mutex> lk(m_); std::unique_lock<std::mutex> lk(m_);
// Convert from void(*)(void*) to lambda with captures // Convert from void(*)(void*) to lambda with captures
auto proxy = [](void *arg) -> int { auto proxy = [](void* arg) -> int {
prctl(PR_SET_NAME, "libmemunreachable ptrace thread"); prctl(PR_SET_NAME, "libmemunreachable ptrace thread");
return (*reinterpret_cast<std::function<int()>*>(arg))(); return (*reinterpret_cast<std::function<int()>*>(arg))();
}; };
child_pid_ = clone(proxy, stack_->top(), child_pid_ = clone(proxy, stack_->top(), CLONE_VM | CLONE_FS | CLONE_FILES /*|CLONE_UNTRACED*/,
CLONE_VM|CLONE_FS|CLONE_FILES/*|CLONE_UNTRACED*/, reinterpret_cast<void*>(&func_));
reinterpret_cast<void*>(&func_));
if (child_pid_ < 0) { if (child_pid_ < 0) {
MEM_ALOGE("failed to clone child: %s", strerror(errno)); MEM_ALOGE("failed to clone child: %s", strerror(errno));
return false; return false;

View file

@ -36,6 +36,7 @@ class PtracerThread {
~PtracerThread(); ~PtracerThread();
bool Start(); bool Start();
int Join(); int Join();
private: private:
void SetTracer(pid_t); void SetTracer(pid_t);
void ClearTracer(); void ClearTracer();
@ -47,4 +48,4 @@ class PtracerThread {
pid_t child_pid_; pid_t child_pid_;
}; };
#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_ #endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_

View file

@ -27,11 +27,9 @@ class ScopedAlarm {
public: public:
ScopedAlarm(std::chrono::microseconds us, std::function<void()> func) { ScopedAlarm(std::chrono::microseconds us, std::function<void()> func) {
func_ = func; func_ = func;
struct sigaction oldact{}; struct sigaction oldact {};
struct sigaction act{}; struct sigaction act {};
act.sa_handler = [](int) { act.sa_handler = [](int) { ScopedAlarm::func_(); };
ScopedAlarm::func_();
};
sigaction(SIGALRM, &act, &oldact); sigaction(SIGALRM, &act, &oldact);
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us); std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
@ -43,10 +41,11 @@ class ScopedAlarm {
~ScopedAlarm() { ~ScopedAlarm() {
itimerval t = itimerval{}; itimerval t = itimerval{};
setitimer(ITIMER_REAL, &t, NULL); setitimer(ITIMER_REAL, &t, NULL);
struct sigaction act{}; struct sigaction act {};
act.sa_handler = SIG_DFL; act.sa_handler = SIG_DFL;
sigaction(SIGALRM, &act, NULL); sigaction(SIGALRM, &act, NULL);
} }
private: private:
static std::function<void()> func_; static std::function<void()> func_;
}; };

View file

@ -21,16 +21,14 @@
#include "android-base/macros.h" #include "android-base/macros.h"
#include "ScopedAlarm.h"
#include "bionic.h" #include "bionic.h"
#include "log.h" #include "log.h"
#include "ScopedAlarm.h"
class DisableMallocGuard{ class DisableMallocGuard {
public: public:
DisableMallocGuard() : disabled_(false){} DisableMallocGuard() : disabled_(false) {}
~DisableMallocGuard() { ~DisableMallocGuard() { Enable(); }
Enable();
}
void Disable() { void Disable() {
if (!disabled_) { if (!disabled_) {
@ -45,6 +43,7 @@ class DisableMallocGuard{
disabled_ = false; disabled_ = false;
} }
} }
private: private:
DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard); DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard);
bool disabled_; bool disabled_;
@ -59,13 +58,9 @@ class DisableMallocGuard{
// here. // here.
class ScopedDisableMalloc { class ScopedDisableMalloc {
public: public:
ScopedDisableMalloc() { ScopedDisableMalloc() { disable_malloc_.Disable(); }
disable_malloc_.Disable();
}
~ScopedDisableMalloc() { ~ScopedDisableMalloc() { disable_malloc_.Enable(); }
disable_malloc_.Enable();
}
private: private:
DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc); DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc);
@ -74,18 +69,15 @@ class ScopedDisableMalloc {
class ScopedDisableMallocTimeout { class ScopedDisableMallocTimeout {
public: public:
explicit ScopedDisableMallocTimeout(std::chrono::milliseconds timeout = std::chrono::milliseconds(2000)) : explicit ScopedDisableMallocTimeout(
timeout_(timeout), timed_out_(false), disable_malloc_() { std::chrono::milliseconds timeout = std::chrono::milliseconds(2000))
: timeout_(timeout), timed_out_(false), disable_malloc_() {
Disable(); Disable();
} }
~ScopedDisableMallocTimeout() { ~ScopedDisableMallocTimeout() { Enable(); }
Enable();
}
bool timed_out() { bool timed_out() { return timed_out_; }
return timed_out_;
}
void Enable() { void Enable() {
disable_malloc_.Enable(); disable_malloc_.Enable();
@ -110,4 +102,4 @@ class ScopedDisableMallocTimeout {
DisableMallocGuard disable_malloc_; DisableMallocGuard disable_malloc_;
}; };
#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_ #endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_

View file

@ -29,28 +29,22 @@ class ScopedPipe {
MEM_LOG_ALWAYS_FATAL("failed to open pipe"); MEM_LOG_ALWAYS_FATAL("failed to open pipe");
} }
} }
~ScopedPipe() { ~ScopedPipe() { Close(); }
Close();
}
ScopedPipe(ScopedPipe&& other) { ScopedPipe(ScopedPipe&& other) {
SetReceiver(other.ReleaseReceiver()); SetReceiver(other.ReleaseReceiver());
SetSender(other.ReleaseSender()); SetSender(other.ReleaseSender());
} }
ScopedPipe& operator = (ScopedPipe&& other) { ScopedPipe& operator=(ScopedPipe&& other) {
SetReceiver(other.ReleaseReceiver()); SetReceiver(other.ReleaseReceiver());
SetSender(other.ReleaseSender()); SetSender(other.ReleaseSender());
return *this; return *this;
} }
void CloseReceiver() { void CloseReceiver() { close(ReleaseReceiver()); }
close(ReleaseReceiver());
}
void CloseSender() { void CloseSender() { close(ReleaseSender()); }
close(ReleaseSender());
}
void Close() { void Close() {
CloseReceiver(); CloseReceiver();

View file

@ -31,9 +31,7 @@ class ScopedSignalHandler {
using Fn = std::function<void(ScopedSignalHandler&, int, siginfo_t*, void*)>; using Fn = std::function<void(ScopedSignalHandler&, int, siginfo_t*, void*)>;
explicit ScopedSignalHandler(Allocator<Fn> allocator) : allocator_(allocator), signal_(-1) {} explicit ScopedSignalHandler(Allocator<Fn> allocator) : allocator_(allocator), signal_(-1) {}
~ScopedSignalHandler() { ~ScopedSignalHandler() { reset(); }
reset();
}
template <class F> template <class F>
void install(int signal, F&& f) { void install(int signal, F&& f) {
@ -65,7 +63,6 @@ class ScopedSignalHandler {
} }
} }
private: private:
using SignalFn = std::function<void(int, siginfo_t*, void*)>; using SignalFn = std::function<void(int, siginfo_t*, void*)>;
DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler); DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler);
@ -77,4 +74,4 @@ class ScopedSignalHandler {
static SignalFn handler_; static SignalFn handler_;
}; };
#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_ #endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_

View file

@ -29,7 +29,7 @@ class Semaphore {
void Wait(std::chrono::milliseconds ms) { void Wait(std::chrono::milliseconds ms) {
std::unique_lock<std::mutex> lk(m_); std::unique_lock<std::mutex> lk(m_);
cv_.wait_for(lk, ms, [&]{ cv_.wait_for(lk, ms, [&] {
if (count_ > 0) { if (count_ > 0) {
count_--; count_--;
return true; return true;
@ -44,6 +44,7 @@ class Semaphore {
} }
cv_.notify_one(); cv_.notify_one();
} }
private: private:
DISALLOW_COPY_AND_ASSIGN(Semaphore); DISALLOW_COPY_AND_ASSIGN(Semaphore);
@ -52,5 +53,4 @@ class Semaphore {
std::condition_variable cv_; std::condition_variable cv_;
}; };
#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_
#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_

View file

@ -24,7 +24,7 @@
#include "Allocator.h" #include "Allocator.h"
template<class T> template <class T>
class Node { class Node {
public: public:
allocator::set<Node<T>*> references_in; allocator::set<Node<T>*> references_in;
@ -34,39 +34,41 @@ class Node {
T* ptr; T* ptr;
Node(T* ptr, Allocator<Node> allocator) : references_in(allocator), references_out(allocator), Node(T* ptr, Allocator<Node> allocator)
ptr(ptr) {}; : references_in(allocator), references_out(allocator), ptr(ptr){};
Node(Node&& rhs) = default; Node(Node&& rhs) = default;
void Edge(Node<T>* ref) { void Edge(Node<T>* ref) {
references_out.emplace(ref); references_out.emplace(ref);
ref->references_in.emplace(this); ref->references_in.emplace(this);
} }
template<class F> template <class F>
void Foreach(F&& f) { void Foreach(F&& f) {
for (auto& node: references_out) { for (auto& node : references_out) {
f(node->ptr); f(node->ptr);
} }
} }
private: private:
DISALLOW_COPY_AND_ASSIGN(Node<T>); DISALLOW_COPY_AND_ASSIGN(Node<T>);
}; };
template<class T> template <class T>
using Graph = allocator::vector<Node<T>*>; using Graph = allocator::vector<Node<T>*>;
template<class T> template <class T>
using SCC = allocator::vector<Node<T>*>; using SCC = allocator::vector<Node<T>*>;
template<class T> template <class T>
using SCCList = allocator::vector<SCC<T>>; using SCCList = allocator::vector<SCC<T>>;
template<class T> template <class T>
class TarjanAlgorithm { class TarjanAlgorithm {
public: public:
explicit TarjanAlgorithm(Allocator<void> allocator) : index_(0), explicit TarjanAlgorithm(Allocator<void> allocator)
stack_(allocator), components_(allocator) {} : index_(0), stack_(allocator), components_(allocator) {}
void Execute(Graph<T>& graph, SCCList<T>& out); void Execute(Graph<T>& graph, SCCList<T>& out);
private: private:
static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1); static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1);
void Tarjan(Node<T>* vertex, Graph<T>& graph); void Tarjan(Node<T>* vertex, Graph<T>& graph);
@ -76,17 +78,17 @@ class TarjanAlgorithm {
SCCList<T> components_; SCCList<T> components_;
}; };
template<class T> template <class T>
void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) { void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
stack_.clear(); stack_.clear();
components_.clear(); components_.clear();
index_ = 0; index_ = 0;
for (auto& it: graph) { for (auto& it : graph) {
it->index = UNDEFINED_INDEX; it->index = UNDEFINED_INDEX;
it->lowlink = UNDEFINED_INDEX; it->lowlink = UNDEFINED_INDEX;
} }
for (auto& it: graph) { for (auto& it : graph) {
if (it->index == UNDEFINED_INDEX) { if (it->index == UNDEFINED_INDEX) {
Tarjan(it, graph); Tarjan(it, graph);
} }
@ -94,14 +96,14 @@ void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
out.swap(components_); out.swap(components_);
} }
template<class T> template <class T>
void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) { void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
assert(vertex->index == UNDEFINED_INDEX); assert(vertex->index == UNDEFINED_INDEX);
vertex->index = index_; vertex->index = index_;
vertex->lowlink = index_; vertex->lowlink = index_;
index_++; index_++;
stack_.push_back(vertex); stack_.push_back(vertex);
for (auto& it: vertex->references_out) { for (auto& it : vertex->references_out) {
Node<T>* vertex_next = it; Node<T>* vertex_next = it;
if (vertex_next->index == UNDEFINED_INDEX) { if (vertex_next->index == UNDEFINED_INDEX) {
Tarjan(vertex_next, graph); Tarjan(vertex_next, graph);
@ -123,10 +125,10 @@ void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
} }
} }
template<class T> template <class T>
void Tarjan(Graph<T>& graph, SCCList<T>& out) { void Tarjan(Graph<T>& graph, SCCList<T>& out) {
TarjanAlgorithm<T> tarjan{graph.get_allocator()}; TarjanAlgorithm<T> tarjan{graph.get_allocator()};
tarjan.Execute(graph, out); tarjan.Execute(graph, out);
} }
#endif // LIBMEMUNREACHABLE_TARJAN_H_ #endif // LIBMEMUNREACHABLE_TARJAN_H_

View file

@ -21,13 +21,13 @@
#include <fcntl.h> #include <fcntl.h>
#include <limits.h> #include <limits.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h>
#include <sys/ptrace.h> #include <sys/ptrace.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/syscall.h> #include <sys/syscall.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/uio.h> #include <sys/uio.h>
#include <sys/wait.h> #include <sys/wait.h>
#include <unistd.h>
#include <map> #include <map>
#include <memory> #include <memory>
@ -50,12 +50,12 @@
// Convert a pid > 0 to a string. sprintf might allocate, so we can't use it. // Convert a pid > 0 to a string. sprintf might allocate, so we can't use it.
// Returns a pointer somewhere in buf to a null terminated string, or NULL // Returns a pointer somewhere in buf to a null terminated string, or NULL
// on error. // on error.
static char *pid_to_str(char *buf, size_t len, pid_t pid) { static char* pid_to_str(char* buf, size_t len, pid_t pid) {
if (pid <= 0) { if (pid <= 0) {
return nullptr; return nullptr;
} }
char *ptr = buf + len - 1; char* ptr = buf + len - 1;
*ptr = 0; *ptr = 0;
while (pid > 0) { while (pid > 0) {
ptr--; ptr--;
@ -79,6 +79,7 @@ class ThreadCaptureImpl {
bool ReleaseThread(pid_t tid); bool ReleaseThread(pid_t tid);
bool CapturedThreadInfo(ThreadInfoList& threads); bool CapturedThreadInfo(ThreadInfoList& threads);
void InjectTestFunc(std::function<void(pid_t)>&& f) { inject_test_func_ = f; } void InjectTestFunc(std::function<void(pid_t)>&& f) { inject_test_func_ = f; }
private: private:
int CaptureThread(pid_t tid); int CaptureThread(pid_t tid);
bool ReleaseThread(pid_t tid, unsigned int signal); bool ReleaseThread(pid_t tid, unsigned int signal);
@ -92,9 +93,8 @@ class ThreadCaptureImpl {
std::function<void(pid_t)> inject_test_func_; std::function<void(pid_t)> inject_test_func_;
}; };
ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator) : ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator)
captured_threads_(allocator), allocator_(allocator), pid_(pid) { : captured_threads_(allocator), allocator_(allocator), pid_(pid) {}
}
bool ThreadCaptureImpl::ListThreads(TidList& tids) { bool ThreadCaptureImpl::ListThreads(TidList& tids) {
tids.clear(); tids.clear();
@ -115,11 +115,11 @@ bool ThreadCaptureImpl::ListThreads(TidList& tids) {
} }
struct linux_dirent64 { struct linux_dirent64 {
uint64_t d_ino; uint64_t d_ino;
int64_t d_off; int64_t d_off;
uint16_t d_reclen; uint16_t d_reclen;
char d_type; char d_type;
char d_name[]; char d_name[];
} __attribute((packed)); } __attribute((packed));
char dirent_buf[4096]; char dirent_buf[4096];
ssize_t nread; ssize_t nread;
@ -209,7 +209,7 @@ int ThreadCaptureImpl::PtraceAttach(pid_t tid) {
bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) { bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
thread_info.tid = tid; thread_info.tid = tid;
const unsigned int max_num_regs = 128; // larger than number of registers on any device const unsigned int max_num_regs = 128; // larger than number of registers on any device
uintptr_t regs[max_num_regs]; uintptr_t regs[max_num_regs];
struct iovec iovec; struct iovec iovec;
iovec.iov_base = &regs; iovec.iov_base = &regs;
@ -243,7 +243,7 @@ bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
thread_info.stack = std::pair<uintptr_t, uintptr_t>(regs[sp], 0); thread_info.stack = std::pair<uintptr_t, uintptr_t>(regs[sp], 0);
return true; return true;
} }
int ThreadCaptureImpl::CaptureThread(pid_t tid) { int ThreadCaptureImpl::CaptureThread(pid_t tid) {
@ -266,7 +266,7 @@ int ThreadCaptureImpl::CaptureThread(pid_t tid) {
unsigned int resume_signal = 0; unsigned int resume_signal = 0;
unsigned int signal = WSTOPSIG(status); unsigned int signal = WSTOPSIG(status);
if ((status >> 16) == PTRACE_EVENT_STOP) { if ((status >> 16) == PTRACE_EVENT_STOP) {
switch (signal) { switch (signal) {
case SIGSTOP: case SIGSTOP:
@ -307,7 +307,7 @@ bool ThreadCaptureImpl::ReleaseThread(pid_t tid, unsigned int signal) {
bool ThreadCaptureImpl::ReleaseThreads() { bool ThreadCaptureImpl::ReleaseThreads() {
bool ret = true; bool ret = true;
for (auto it = captured_threads_.begin(); it != captured_threads_.end(); ) { for (auto it = captured_threads_.begin(); it != captured_threads_.end();) {
if (ReleaseThread(it->first, it->second)) { if (ReleaseThread(it->first, it->second)) {
it = captured_threads_.erase(it); it = captured_threads_.erase(it);
} else { } else {

View file

@ -33,7 +33,7 @@ using ThreadInfoList = allocator::vector<ThreadInfo>;
class ThreadCaptureImpl; class ThreadCaptureImpl;
class ThreadCapture { class ThreadCapture {
public: public:
ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator); ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator);
~ThreadCapture(); ~ThreadCapture();
@ -44,7 +44,7 @@ public:
bool CapturedThreadInfo(ThreadInfoList& threads); bool CapturedThreadInfo(ThreadInfoList& threads);
void InjectTestFunc(std::function<void(pid_t)>&& f); void InjectTestFunc(std::function<void(pid_t)>&& f);
private: private:
ThreadCapture(const ThreadCapture&) = delete; ThreadCapture(const ThreadCapture&) = delete;
void operator=(const ThreadCapture&) = delete; void operator=(const ThreadCapture&) = delete;

View file

@ -19,7 +19,7 @@
#include <sys/prctl.h> #include <sys/prctl.h>
#define PR_SET_VMA 0x53564d41 #define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0 #define PR_SET_VMA_ANON_NAME 0
#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_ #endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_

View file

@ -17,9 +17,9 @@
#ifndef LIBMEMUNREACHABLE_BIONIC_H_ #ifndef LIBMEMUNREACHABLE_BIONIC_H_
#define LIBMEMUNREACHABLE_BIONIC_H_ #define LIBMEMUNREACHABLE_BIONIC_H_
#include <sys/cdefs.h>
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include <sys/cdefs.h>
__BEGIN_DECLS __BEGIN_DECLS
@ -27,9 +27,9 @@ __BEGIN_DECLS
extern void malloc_disable(); extern void malloc_disable();
extern void malloc_enable(); extern void malloc_enable();
extern int malloc_iterate(uintptr_t base, size_t size, extern int malloc_iterate(uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg); void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count); extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
__END_DECLS __END_DECLS
#endif // LIBMEMUNREACHABLE_BIONIC_H_ #endif // LIBMEMUNREACHABLE_BIONIC_H_

View file

@ -21,8 +21,8 @@
#ifdef __cplusplus #ifdef __cplusplus
#include <vector>
#include <string> #include <string>
#include <vector>
struct Leak { struct Leak {
uintptr_t begin; uintptr_t begin;
@ -83,4 +83,4 @@ bool NoLeaks();
__END_DECLS __END_DECLS
#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_ #endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_

View file

@ -43,4 +43,4 @@
#endif #endif
#endif // LIBMEMUNREACHABLE_LOG_H_ #endif // LIBMEMUNREACHABLE_LOG_H_

View file

@ -16,44 +16,42 @@
#include <Allocator.h> #include <Allocator.h>
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h> #include <ScopedDisableMalloc.h>
#include <gtest/gtest.h>
std::function<void()> ScopedAlarm::func_; std::function<void()> ScopedAlarm::func_;
class AllocatorTest : public testing::Test { class AllocatorTest : public testing::Test {
protected: protected:
AllocatorTest() : heap(), disable_malloc_() {} AllocatorTest() : heap(), disable_malloc_() {}
virtual void SetUp() { virtual void SetUp() { heap_count = 0; }
heap_count = 0;
}
virtual void TearDown() { virtual void TearDown() {
ASSERT_EQ(heap_count, 0); ASSERT_EQ(heap_count, 0);
ASSERT_TRUE(heap.empty()); ASSERT_TRUE(heap.empty());
ASSERT_FALSE(disable_malloc_.timed_out()); ASSERT_FALSE(disable_malloc_.timed_out());
} }
Heap heap; Heap heap;
private: private:
ScopedDisableMallocTimeout disable_malloc_; ScopedDisableMallocTimeout disable_malloc_;
}; };
TEST_F(AllocatorTest, simple) { TEST_F(AllocatorTest, simple) {
Allocator<char[100]> allocator(heap); Allocator<char[100]> allocator(heap);
void *ptr = allocator.allocate(); void* ptr = allocator.allocate();
ASSERT_TRUE(ptr != NULL); ASSERT_TRUE(ptr != NULL);
allocator.deallocate(ptr); allocator.deallocate(ptr);
} }
TEST_F(AllocatorTest, multiple) { TEST_F(AllocatorTest, multiple) {
Allocator<char[100]> allocator(heap); Allocator<char[100]> allocator(heap);
void *ptr1 = allocator.allocate(); void* ptr1 = allocator.allocate();
ASSERT_TRUE(ptr1 != NULL); ASSERT_TRUE(ptr1 != NULL);
void *ptr2 = allocator.allocate(); void* ptr2 = allocator.allocate();
ASSERT_TRUE(ptr2 != NULL); ASSERT_TRUE(ptr2 != NULL);
ASSERT_NE(ptr1, ptr2); ASSERT_NE(ptr1, ptr2);
allocator.deallocate(ptr1); allocator.deallocate(ptr1);
void *ptr3 = allocator.allocate(); void* ptr3 = allocator.allocate();
ASSERT_EQ(ptr1, ptr3); ASSERT_EQ(ptr1, ptr3);
allocator.deallocate(ptr3); allocator.deallocate(ptr3);
allocator.deallocate(ptr2); allocator.deallocate(ptr2);
@ -63,7 +61,7 @@ TEST_F(AllocatorTest, many) {
const int num = 4096; const int num = 4096;
const int size = 128; const int size = 128;
Allocator<char[size]> allocator(heap); Allocator<char[size]> allocator(heap);
void *ptr[num]; void* ptr[num];
for (int i = 0; i < num; i++) { for (int i = 0; i < num; i++) {
ptr[i] = allocator.allocate(); ptr[i] = allocator.allocate();
memset(ptr[i], 0xaa, size); memset(ptr[i], 0xaa, size);
@ -87,7 +85,7 @@ TEST_F(AllocatorTest, many) {
TEST_F(AllocatorTest, large) { TEST_F(AllocatorTest, large) {
const size_t size = 1024 * 1024; const size_t size = 1024 * 1024;
Allocator<char[size]> allocator(heap); Allocator<char[size]> allocator(heap);
void *ptr = allocator.allocate(); void* ptr = allocator.allocate();
memset(ptr, 0xaa, size); memset(ptr, 0xaa, size);
allocator.deallocate(ptr); allocator.deallocate(ptr);
} }
@ -96,7 +94,7 @@ TEST_F(AllocatorTest, many_large) {
const int num = 128; const int num = 128;
const int size = 1024 * 1024; const int size = 1024 * 1024;
Allocator<char[size]> allocator(heap); Allocator<char[size]> allocator(heap);
void *ptr[num]; void* ptr[num];
for (int i = 0; i < num; i++) { for (int i = 0; i < num; i++) {
ptr[i] = allocator.allocate(); ptr[i] = allocator.allocate();
memset(ptr[i], 0xaa, size); memset(ptr[i], 0xaa, size);

View file

@ -19,8 +19,8 @@
#include <chrono> #include <chrono>
#include <functional> #include <functional>
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h> #include <ScopedDisableMalloc.h>
#include <gtest/gtest.h>
using namespace std::chrono_literals; using namespace std::chrono_literals;
@ -36,75 +36,83 @@ class DisableMallocTest : public ::testing::Test {
}; };
TEST_F(DisableMallocTest, reenable) { TEST_F(DisableMallocTest, reenable) {
ASSERT_EXIT({ ASSERT_EXIT(
alarm(100ms); {
void *ptr1 = malloc(128); alarm(100ms);
ASSERT_NE(ptr1, nullptr); void* ptr1 = malloc(128);
free(ptr1); ASSERT_NE(ptr1, nullptr);
{ free(ptr1);
ScopedDisableMalloc disable_malloc; { ScopedDisableMalloc disable_malloc; }
} void* ptr2 = malloc(128);
void *ptr2 = malloc(128); ASSERT_NE(ptr2, nullptr);
ASSERT_NE(ptr2, nullptr); free(ptr2);
free(ptr2); _exit(1);
_exit(1); },
}, ::testing::ExitedWithCode(1), ""); ::testing::ExitedWithCode(1), "");
} }
TEST_F(DisableMallocTest, deadlock_allocate) { TEST_F(DisableMallocTest, deadlock_allocate) {
ASSERT_DEATH({ ASSERT_DEATH(
void *ptr = malloc(128); {
ASSERT_NE(ptr, nullptr); void* ptr = malloc(128);
free(ptr); ASSERT_NE(ptr, nullptr);
{ free(ptr);
alarm(100ms); {
ScopedDisableMalloc disable_malloc; alarm(100ms);
void* ptr = malloc(128); ScopedDisableMalloc disable_malloc;
ASSERT_NE(ptr, nullptr); void* ptr = malloc(128);
free(ptr); ASSERT_NE(ptr, nullptr);
} free(ptr);
}, ""); }
},
"");
} }
TEST_F(DisableMallocTest, deadlock_new) { TEST_F(DisableMallocTest, deadlock_new) {
ASSERT_DEATH({ ASSERT_DEATH(
char* ptr = new(char); {
ASSERT_NE(ptr, nullptr); char* ptr = new (char);
delete(ptr); ASSERT_NE(ptr, nullptr);
{ delete (ptr);
alarm(100ms); {
ScopedDisableMalloc disable_malloc; alarm(100ms);
char* ptr = new (std::nothrow)(char); ScopedDisableMalloc disable_malloc;
ASSERT_NE(ptr, nullptr); char* ptr = new (std::nothrow)(char);
delete(ptr); ASSERT_NE(ptr, nullptr);
} delete (ptr);
}, ""); }
},
"");
} }
TEST_F(DisableMallocTest, deadlock_delete) { TEST_F(DisableMallocTest, deadlock_delete) {
ASSERT_DEATH({ ASSERT_DEATH(
char* ptr = new(char); {
ASSERT_NE(ptr, nullptr); char* ptr = new (char);
{ ASSERT_NE(ptr, nullptr);
alarm(250ms); {
ScopedDisableMalloc disable_malloc; alarm(250ms);
delete(ptr); ScopedDisableMalloc disable_malloc;
// Force ptr usage or this code gets optimized away by the arm64 compiler. delete (ptr);
ASSERT_NE(ptr, nullptr); // Force ptr usage or this code gets optimized away by the arm64 compiler.
} ASSERT_NE(ptr, nullptr);
}, ""); }
},
"");
} }
TEST_F(DisableMallocTest, deadlock_free) { TEST_F(DisableMallocTest, deadlock_free) {
ASSERT_DEATH({ ASSERT_DEATH(
void *ptr = malloc(128); {
ASSERT_NE(ptr, nullptr); void* ptr = malloc(128);
{ ASSERT_NE(ptr, nullptr);
alarm(100ms); {
ScopedDisableMalloc disable_malloc; alarm(100ms);
free(ptr); ScopedDisableMalloc disable_malloc;
} free(ptr);
}, ""); }
},
"");
} }
TEST_F(DisableMallocTest, deadlock_fork) { TEST_F(DisableMallocTest, deadlock_fork) {
@ -113,6 +121,6 @@ TEST_F(DisableMallocTest, deadlock_fork) {
alarm(100ms); alarm(100ms);
ScopedDisableMalloc disable_malloc; ScopedDisableMalloc disable_malloc;
fork(); fork();
} }
}, ""); }, "");
} }

View file

@ -19,8 +19,8 @@
#include "HeapWalker.h" #include "HeapWalker.h"
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h> #include <ScopedDisableMalloc.h>
#include <gtest/gtest.h>
#include "Allocator.h" #include "Allocator.h"
class HeapWalkerTest : public ::testing::Test { class HeapWalkerTest : public ::testing::Test {
@ -172,20 +172,20 @@ TEST_F(HeapWalkerTest, cycle) {
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks); EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size()); ASSERT_EQ(2U, leaked.size());
} }
TEST_F(HeapWalkerTest, segv) { TEST_F(HeapWalkerTest, segv) {
const size_t page_size = sysconf(_SC_PAGE_SIZE); const size_t page_size = sysconf(_SC_PAGE_SIZE);
void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT_NE(buffer1, nullptr); ASSERT_NE(buffer1, nullptr);
void* buffer2; void* buffer2;
buffer2 = &buffer1; buffer2 = &buffer1;
HeapWalker heap_walker(heap_); HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1)+page_size); heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1) + page_size);
heap_walker.Root(buffer_begin(buffer2), buffer_end(buffer2)); heap_walker.Root(buffer_begin(buffer2), buffer_end(buffer2));
ASSERT_EQ(true, heap_walker.DetectLeaks()); ASSERT_EQ(true, heap_walker.DetectLeaks());

View file

@ -16,8 +16,6 @@
#include "bionic.h" #include "bionic.h"
void malloc_disable() { void malloc_disable() {}
}
void malloc_enable() { void malloc_enable() {}
}

View file

@ -14,11 +14,11 @@
* limitations under the License. * limitations under the License.
*/ */
#include "HeapWalker.h"
#include "LeakFolding.h" #include "LeakFolding.h"
#include "HeapWalker.h"
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h> #include <ScopedDisableMalloc.h>
#include <gtest/gtest.h>
#include "Allocator.h" #include "Allocator.h"
class LeakFoldingTest : public ::testing::Test { class LeakFoldingTest : public ::testing::Test {
@ -84,7 +84,7 @@ TEST_F(LeakFoldingTest, two) {
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks); EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size()); ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(0U, leaked[0].referenced_count); EXPECT_EQ(0U, leaked[0].referenced_count);
EXPECT_EQ(0U, leaked[0].referenced_size); EXPECT_EQ(0U, leaked[0].referenced_size);
@ -113,7 +113,7 @@ TEST_F(LeakFoldingTest, dominator) {
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks); EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size()); ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(1U, leaked[0].referenced_count); EXPECT_EQ(1U, leaked[0].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
@ -144,10 +144,10 @@ TEST_F(LeakFoldingTest, cycle) {
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(3U, num_leaks); EXPECT_EQ(3U, num_leaks);
EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(3 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size()); ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count); EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size);
} }
TEST_F(LeakFoldingTest, dominator_cycle) { TEST_F(LeakFoldingTest, dominator_cycle) {
@ -175,13 +175,13 @@ TEST_F(LeakFoldingTest, dominator_cycle) {
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(3U, num_leaks); EXPECT_EQ(3U, num_leaks);
EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size()); ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count); EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(2U, leaked[1].referenced_count); EXPECT_EQ(2U, leaked[1].referenced_count);
EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size); EXPECT_EQ(3 * sizeof(uintptr_t), leaked[1].referenced_size);
} }
TEST_F(LeakFoldingTest, two_cycles) { TEST_F(LeakFoldingTest, two_cycles) {
@ -218,12 +218,12 @@ TEST_F(LeakFoldingTest, two_cycles) {
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(6U, num_leaks); EXPECT_EQ(6U, num_leaks);
EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(6 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size()); ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count); EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(2U, leaked[1].referenced_count); EXPECT_EQ(2U, leaked[1].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size); EXPECT_EQ(2 * sizeof(uintptr_t), leaked[1].referenced_size);
} }
TEST_F(LeakFoldingTest, two_dominator_cycles) { TEST_F(LeakFoldingTest, two_dominator_cycles) {
@ -254,7 +254,7 @@ TEST_F(LeakFoldingTest, two_dominator_cycles) {
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks); EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes); EXPECT_EQ(4 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(4U, leaked.size()); ASSERT_EQ(4U, leaked.size());
EXPECT_EQ(1U, leaked[0].referenced_count); EXPECT_EQ(1U, leaked[0].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size); EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
@ -272,13 +272,13 @@ TEST_F(LeakFoldingTest, giant_dominator_cycle) {
HeapWalker heap_walker(heap_); HeapWalker heap_walker(heap_);
for (size_t i = 0; i < n; i ++) { for (size_t i = 0; i < n; i++) {
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]), ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
reinterpret_cast<uintptr_t>(&buffer[i+1]))); reinterpret_cast<uintptr_t>(&buffer[i + 1])));
} }
for (size_t i = 0; i < n - 1; i++) { for (size_t i = 0; i < n - 1; i++) {
buffer[i] = &buffer[i+1]; buffer[i] = &buffer[i + 1];
} }
buffer[n - 1] = &buffer[0]; buffer[n - 1] = &buffer[0];
@ -306,15 +306,15 @@ TEST_F(LeakFoldingTest, giant_cycle) {
HeapWalker heap_walker(heap_); HeapWalker heap_walker(heap_);
for (size_t i = 0; i < n - 1; i++) { for (size_t i = 0; i < n - 1; i++) {
buffer[i] = &buffer[i+1]; buffer[i] = &buffer[i + 1];
} }
buffer[n - 1] = &buffer[0]; buffer[n - 1] = &buffer[0];
buffer1[0] = &buffer[0]; buffer1[0] = &buffer[0];
for (size_t i = 0; i < n; i ++) { for (size_t i = 0; i < n; i++) {
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]), ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
reinterpret_cast<uintptr_t>(&buffer[i+1]))); reinterpret_cast<uintptr_t>(&buffer[i + 1])));
} }
ALLOCATION(heap_walker, buffer1); ALLOCATION(heap_walker, buffer1);

View file

@ -16,8 +16,8 @@
#include <fcntl.h> #include <fcntl.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <unistd.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
@ -25,23 +25,16 @@
class HiddenPointer { class HiddenPointer {
public: public:
explicit HiddenPointer(size_t size = 256) { explicit HiddenPointer(size_t size = 256) { Set(malloc(size)); }
Set(malloc(size)); ~HiddenPointer() { Free(); }
} void* Get() { return reinterpret_cast<void*>(~ptr_); }
~HiddenPointer() {
Free();
}
void* Get() {
return reinterpret_cast<void*>(~ptr_);
}
void Free() { void Free() {
free(Get()); free(Get());
Set(nullptr); Set(nullptr);
} }
private: private:
void Set(void* ptr) { void Set(void* ptr) { ptr_ = ~reinterpret_cast<uintptr_t>(ptr); }
ptr_ = ~reinterpret_cast<uintptr_t>(ptr);
}
volatile uintptr_t ptr_; volatile uintptr_t ptr_;
}; };

View file

@ -45,12 +45,10 @@ class ThreadListTest : public ::testing::TestWithParam<int> {
WaitForThreads(); WaitForThreads();
} }
virtual void TearDown() { virtual void TearDown() { ASSERT_TRUE(heap.empty()); }
ASSERT_TRUE(heap.empty());
}
protected: protected:
template<class Function> template <class Function>
void StartThreads(unsigned int threads, Function&& func) { void StartThreads(unsigned int threads, Function&& func) {
threads_.reserve(threads); threads_.reserve(threads);
tids_.reserve(threads); tids_.reserve(threads);
@ -68,14 +66,14 @@ class ThreadListTest : public ::testing::TestWithParam<int> {
{ {
std::unique_lock<std::mutex> lk(m_); std::unique_lock<std::mutex> lk(m_);
cv_stop_.wait(lk, [&] {return stop_;}); cv_stop_.wait(lk, [&] { return stop_; });
} }
}); });
} }
{ {
std::unique_lock<std::mutex> lk(m_); std::unique_lock<std::mutex> lk(m_);
cv_start_.wait(lk, [&]{ return tids_.size() == threads; }); cv_start_.wait(lk, [&] { return tids_.size() == threads; });
} }
} }
@ -93,9 +91,7 @@ class ThreadListTest : public ::testing::TestWithParam<int> {
tids_.clear(); tids_.clear();
} }
std::vector<pid_t>& tids() { std::vector<pid_t>& tids() { return tids_; }
return tids_;
}
Heap heap; Heap heap;
@ -143,7 +139,7 @@ TEST_F(ThreadListTest, list_one) {
TEST_P(ThreadListTest, list_some) { TEST_P(ThreadListTest, list_some) {
const unsigned int threads = GetParam() - 1; const unsigned int threads = GetParam() - 1;
StartThreads(threads, [](){}); StartThreads(threads, []() {});
std::vector<pid_t> expected_tids = tids(); std::vector<pid_t> expected_tids = tids();
expected_tids.push_back(getpid()); expected_tids.push_back(getpid());
@ -176,10 +172,8 @@ class ThreadCaptureTest : public ThreadListTest {
public: public:
ThreadCaptureTest() {} ThreadCaptureTest() {}
~ThreadCaptureTest() {} ~ThreadCaptureTest() {}
void Fork(std::function<void()>&& child_init, void Fork(std::function<void()>&& child_init, std::function<void()>&& child_cleanup,
std::function<void()>&& child_cleanup, std::function<void(pid_t)>&& parent) {
std::function<void(pid_t)>&& parent) {
ScopedPipe start_pipe; ScopedPipe start_pipe;
ScopedPipe stop_pipe; ScopedPipe stop_pipe;
@ -211,39 +205,40 @@ class ThreadCaptureTest : public ThreadListTest {
TEST_P(ThreadCaptureTest, capture_some) { TEST_P(ThreadCaptureTest, capture_some) {
const unsigned int threads = GetParam(); const unsigned int threads = GetParam();
Fork([&](){ Fork(
// child init [&]() {
StartThreads(threads - 1, [](){}); // child init
}, StartThreads(threads - 1, []() {});
[&](){ },
// child cleanup [&]() {
StopThreads(); // child cleanup
}, StopThreads();
[&](pid_t child){ },
// parent [&](pid_t child) {
ASSERT_GT(child, 0); // parent
ASSERT_GT(child, 0);
{ {
ScopedDisableMallocTimeout disable_malloc; ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(child, heap); ThreadCapture thread_capture(child, heap);
auto list_tids = allocator::vector<pid_t>(heap); auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids)); ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(threads, list_tids.size()); ASSERT_EQ(threads, list_tids.size());
ASSERT_TRUE(thread_capture.CaptureThreads()); ASSERT_TRUE(thread_capture.CaptureThreads());
auto thread_info = allocator::vector<ThreadInfo>(heap); auto thread_info = allocator::vector<ThreadInfo>(heap);
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info)); ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
ASSERT_EQ(threads, thread_info.size()); ASSERT_EQ(threads, thread_info.size());
ASSERT_TRUE(thread_capture.ReleaseThreads()); ASSERT_TRUE(thread_capture.ReleaseThreads());
if (!HasFailure()) { if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out()); ASSERT_FALSE(disable_malloc.timed_out());
} }
} }
}); });
} }
INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024)); INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024));
@ -262,7 +257,7 @@ TEST_F(ThreadCaptureTest, capture_kill) {
ScopedDisableMallocTimeout disable_malloc; ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(ret, heap); ThreadCapture thread_capture(ret, heap);
thread_capture.InjectTestFunc([&](pid_t tid){ thread_capture.InjectTestFunc([&](pid_t tid) {
syscall(SYS_tgkill, ret, tid, SIGKILL); syscall(SYS_tgkill, ret, tid, SIGKILL);
usleep(10000); usleep(10000);
}); });
@ -288,62 +283,63 @@ TEST_F(ThreadCaptureTest, capture_signal) {
// For signal handler // For signal handler
static ScopedPipe* g_pipe; static ScopedPipe* g_pipe;
Fork([&](){ Fork(
// child init [&]() {
pipe.CloseReceiver(); // child init
pipe.CloseReceiver();
g_pipe = &pipe; g_pipe = &pipe;
struct sigaction act{}; struct sigaction act {};
act.sa_handler = [](int){ act.sa_handler = [](int) {
char buf = '+'; char buf = '+';
write(g_pipe->Sender(), &buf, 1); write(g_pipe->Sender(), &buf, 1);
g_pipe->CloseSender(); g_pipe->CloseSender();
}; };
sigaction(sig, &act, NULL); sigaction(sig, &act, NULL);
sigset_t set; sigset_t set;
sigemptyset(&set); sigemptyset(&set);
sigaddset(&set, sig); sigaddset(&set, sig);
pthread_sigmask(SIG_UNBLOCK, &set, NULL); pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}, },
[&](){ [&]() {
// child cleanup // child cleanup
g_pipe = nullptr; g_pipe = nullptr;
pipe.Close(); pipe.Close();
}, },
[&](pid_t child){ [&](pid_t child) {
// parent // parent
ASSERT_GT(child, 0); ASSERT_GT(child, 0);
pipe.CloseSender(); pipe.CloseSender();
{ {
ScopedDisableMallocTimeout disable_malloc; ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(child, heap); ThreadCapture thread_capture(child, heap);
thread_capture.InjectTestFunc([&](pid_t tid){ thread_capture.InjectTestFunc([&](pid_t tid) {
syscall(SYS_tgkill, child, tid, sig); syscall(SYS_tgkill, child, tid, sig);
usleep(10000); usleep(10000);
});
auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(1U, list_tids.size());
ASSERT_TRUE(thread_capture.CaptureThreads());
auto thread_info = allocator::vector<ThreadInfo>(heap);
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
ASSERT_EQ(1U, thread_info.size());
ASSERT_TRUE(thread_capture.ReleaseThreads());
usleep(100000);
char buf;
ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
ASSERT_EQ(buf, '+');
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
}); });
auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(1U, list_tids.size());
ASSERT_TRUE(thread_capture.CaptureThreads());
auto thread_info = allocator::vector<ThreadInfo>(heap);
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
ASSERT_EQ(1U, thread_info.size());
ASSERT_TRUE(thread_capture.ReleaseThreads());
usleep(100000);
char buf;
ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
ASSERT_EQ(buf, '+');
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
});
} }