libutils: RefBase always disallow on stack
Before, we only did this in sp<> constructors, but we can always make this check during the initial incStrong. Since prebuilts call into the existing report race function declared in StrongPointer.h, we still call this function from RefBase.cpp. Bug: 232557259 Test: libutils_test Change-Id: I4080b1869b83ecf655fc9c182b6de768a6358adf
This commit is contained in:
parent
ccb1ce32cc
commit
c340a08b1b
2 changed files with 26 additions and 27 deletions
|
|
@ -149,6 +149,29 @@ namespace android {
|
|||
// Same for weak counts.
|
||||
#define BAD_WEAK(c) ((c) == 0 || ((c) & (~MAX_COUNT)) != 0)
|
||||
|
||||
// see utils/StrongPointer.h - declared there for legacy reasons
|
||||
void sp_report_stack_pointer();
|
||||
|
||||
// Check whether address is definitely on the calling stack. We actually check whether it is on
|
||||
// the same 4K page as the frame pointer.
|
||||
//
|
||||
// Assumptions:
|
||||
// - Pages are never smaller than 4K (MIN_PAGE_SIZE)
|
||||
// - Malloced memory never shares a page with a stack.
|
||||
//
|
||||
// It does not appear safe to broaden this check to include adjacent pages; apparently this code
|
||||
// is used in environments where there may not be a guard page below (at higher addresses than)
|
||||
// the bottom of the stack.
|
||||
static void check_not_on_stack(const void* ptr) {
|
||||
static constexpr int MIN_PAGE_SIZE = 0x1000; // 4K. Safer than including sys/user.h.
|
||||
static constexpr uintptr_t MIN_PAGE_MASK = ~static_cast<uintptr_t>(MIN_PAGE_SIZE - 1);
|
||||
uintptr_t my_frame_address =
|
||||
reinterpret_cast<uintptr_t>(__builtin_frame_address(0 /* this frame */));
|
||||
if (((reinterpret_cast<uintptr_t>(ptr) ^ my_frame_address) & MIN_PAGE_MASK) == 0) {
|
||||
sp_report_stack_pointer();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
class RefBase::weakref_impl : public RefBase::weakref_type
|
||||
|
|
@ -432,6 +455,8 @@ void RefBase::incStrong(const void* id) const
|
|||
return;
|
||||
}
|
||||
|
||||
check_not_on_stack(this);
|
||||
|
||||
int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed);
|
||||
// A decStrong() must still happen after us.
|
||||
ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old);
|
||||
|
|
|
|||
|
|
@ -120,7 +120,6 @@ private:
|
|||
template<typename Y> friend class sp;
|
||||
template<typename Y> friend class wp;
|
||||
void set_pointer(T* ptr);
|
||||
static inline void check_not_on_stack(const void* ptr);
|
||||
T* m_ptr;
|
||||
};
|
||||
|
||||
|
|
@ -190,27 +189,6 @@ void sp_report_stack_pointer();
|
|||
// ---------------------------------------------------------------------------
|
||||
// No user serviceable parts below here.
|
||||
|
||||
// Check whether address is definitely on the calling stack. We actually check whether it is on
|
||||
// the same 4K page as the frame pointer.
|
||||
//
|
||||
// Assumptions:
|
||||
// - Pages are never smaller than 4K (MIN_PAGE_SIZE)
|
||||
// - Malloced memory never shares a page with a stack.
|
||||
//
|
||||
// It does not appear safe to broaden this check to include adjacent pages; apparently this code
|
||||
// is used in environments where there may not be a guard page below (at higher addresses than)
|
||||
// the bottom of the stack.
|
||||
template <typename T>
|
||||
void sp<T>::check_not_on_stack(const void* ptr) {
|
||||
static constexpr int MIN_PAGE_SIZE = 0x1000; // 4K. Safer than including sys/user.h.
|
||||
static constexpr uintptr_t MIN_PAGE_MASK = ~static_cast<uintptr_t>(MIN_PAGE_SIZE - 1);
|
||||
uintptr_t my_frame_address =
|
||||
reinterpret_cast<uintptr_t>(__builtin_frame_address(0 /* this frame */));
|
||||
if (((reinterpret_cast<uintptr_t>(ptr) ^ my_frame_address) & MIN_PAGE_MASK) == 0) {
|
||||
sp_report_stack_pointer();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Ideally we should find a way to increment the reference count before running the
|
||||
// constructor, so that generating an sp<> to this in the constructor is no longer dangerous.
|
||||
template <typename T>
|
||||
|
|
@ -219,14 +197,13 @@ sp<T> sp<T>::make(Args&&... args) {
|
|||
T* t = new T(std::forward<Args>(args)...);
|
||||
sp<T> result;
|
||||
result.m_ptr = t;
|
||||
t->incStrong(t); // bypass check_not_on_stack for heap allocation
|
||||
t->incStrong(t);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
sp<T> sp<T>::fromExisting(T* other) {
|
||||
if (other) {
|
||||
check_not_on_stack(other);
|
||||
other->incStrongRequireStrong(other);
|
||||
sp<T> result;
|
||||
result.m_ptr = other;
|
||||
|
|
@ -240,7 +217,6 @@ template<typename T>
|
|||
sp<T>::sp(T* other)
|
||||
: m_ptr(other) {
|
||||
if (other) {
|
||||
check_not_on_stack(other);
|
||||
other->incStrong(this);
|
||||
}
|
||||
}
|
||||
|
|
@ -249,7 +225,6 @@ template <typename T>
|
|||
template <typename U>
|
||||
sp<T>::sp(U* other) : m_ptr(other) {
|
||||
if (other) {
|
||||
check_not_on_stack(other);
|
||||
(static_cast<T*>(other))->incStrong(this);
|
||||
}
|
||||
}
|
||||
|
|
@ -258,7 +233,6 @@ template <typename T>
|
|||
sp<T>& sp<T>::operator=(T* other) {
|
||||
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
|
||||
if (other) {
|
||||
check_not_on_stack(other);
|
||||
other->incStrong(this);
|
||||
}
|
||||
if (oldPtr) oldPtr->decStrong(this);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue