Merge "Use fallback linker allocator in trace_handler." into main
This commit is contained in:
commit
f62078fa17
2 changed files with 169 additions and 52 deletions
|
|
@ -1773,6 +1773,75 @@ TEST_F(CrasherTest, seccomp_crash_logcat) {
|
||||||
AssertDeath(SIGABRT);
|
AssertDeath(SIGABRT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern "C" void malloc_enable();
|
||||||
|
extern "C" void malloc_disable();
|
||||||
|
|
||||||
|
TEST_F(CrasherTest, seccomp_tombstone_no_allocation) {
|
||||||
|
int intercept_result;
|
||||||
|
unique_fd output_fd;
|
||||||
|
|
||||||
|
static const auto dump_type = kDebuggerdTombstone;
|
||||||
|
StartProcess(
|
||||||
|
[]() {
|
||||||
|
std::thread a(foo);
|
||||||
|
std::thread b(bar);
|
||||||
|
|
||||||
|
std::this_thread::sleep_for(100ms);
|
||||||
|
|
||||||
|
// Disable allocations to verify that nothing in the fallback
|
||||||
|
// signal handler does an allocation.
|
||||||
|
malloc_disable();
|
||||||
|
raise_debugger_signal(dump_type);
|
||||||
|
_exit(0);
|
||||||
|
},
|
||||||
|
&seccomp_fork);
|
||||||
|
|
||||||
|
StartIntercept(&output_fd, dump_type);
|
||||||
|
FinishCrasher();
|
||||||
|
AssertDeath(0);
|
||||||
|
FinishIntercept(&intercept_result);
|
||||||
|
ASSERT_EQ(1, intercept_result) << "tombstoned reported failure";
|
||||||
|
|
||||||
|
std::string result;
|
||||||
|
ConsumeFd(std::move(output_fd), &result);
|
||||||
|
ASSERT_BACKTRACE_FRAME(result, "raise_debugger_signal");
|
||||||
|
ASSERT_BACKTRACE_FRAME(result, "foo");
|
||||||
|
ASSERT_BACKTRACE_FRAME(result, "bar");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(CrasherTest, seccomp_backtrace_no_allocation) {
|
||||||
|
int intercept_result;
|
||||||
|
unique_fd output_fd;
|
||||||
|
|
||||||
|
static const auto dump_type = kDebuggerdNativeBacktrace;
|
||||||
|
StartProcess(
|
||||||
|
[]() {
|
||||||
|
std::thread a(foo);
|
||||||
|
std::thread b(bar);
|
||||||
|
|
||||||
|
std::this_thread::sleep_for(100ms);
|
||||||
|
|
||||||
|
// Disable allocations to verify that nothing in the fallback
|
||||||
|
// signal handler does an allocation.
|
||||||
|
malloc_disable();
|
||||||
|
raise_debugger_signal(dump_type);
|
||||||
|
_exit(0);
|
||||||
|
},
|
||||||
|
&seccomp_fork);
|
||||||
|
|
||||||
|
StartIntercept(&output_fd, dump_type);
|
||||||
|
FinishCrasher();
|
||||||
|
AssertDeath(0);
|
||||||
|
FinishIntercept(&intercept_result);
|
||||||
|
ASSERT_EQ(1, intercept_result) << "tombstoned reported failure";
|
||||||
|
|
||||||
|
std::string result;
|
||||||
|
ConsumeFd(std::move(output_fd), &result);
|
||||||
|
ASSERT_BACKTRACE_FRAME(result, "raise_debugger_signal");
|
||||||
|
ASSERT_BACKTRACE_FRAME(result, "foo");
|
||||||
|
ASSERT_BACKTRACE_FRAME(result, "bar");
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(CrasherTest, competing_tracer) {
|
TEST_F(CrasherTest, competing_tracer) {
|
||||||
int intercept_result;
|
int intercept_result;
|
||||||
unique_fd output_fd;
|
unique_fd output_fd;
|
||||||
|
|
|
||||||
|
|
@ -48,50 +48,69 @@ using android::base::unique_fd;
|
||||||
extern "C" bool __linker_enable_fallback_allocator();
|
extern "C" bool __linker_enable_fallback_allocator();
|
||||||
extern "C" void __linker_disable_fallback_allocator();
|
extern "C" void __linker_disable_fallback_allocator();
|
||||||
|
|
||||||
// This is incredibly sketchy to do inside of a signal handler, especially when libbacktrace
|
// This file implements a fallback path for processes that do not allow the
|
||||||
// uses the C++ standard library throughout, but this code runs in the linker, so we'll be using
|
// normal fork and exec of crash_dump to handle crashes/unwinds.
|
||||||
// the linker's malloc instead of the libc one. Switch it out for a replacement, just in case.
|
// The issue is that all of this happens from within a signal handler, which
|
||||||
//
|
// can cause problems since this code uses the linker allocator which is not
|
||||||
// This isn't the default method of dumping because it can fail in cases such as address space
|
// thread safe. In order to avoid any problems allocating, the code calls
|
||||||
// exhaustion.
|
// a function to switch to use a fallback allocator in the linker that will
|
||||||
|
// only be used for the current thread. All of the libunwindstack code does
|
||||||
|
// allocations using C++ stl, but should be fine since the code runs in the
|
||||||
|
// linker and should use the fallback handler.
|
||||||
|
|
||||||
|
// This method can still fail if the virtual space is exhausted on a 32 bit
|
||||||
|
// process or mmap failing due to hitting the maximum number of maps (65535
|
||||||
|
// total maps) on a 64 bit process.
|
||||||
|
|
||||||
|
// Class to handle automatically turning on and off the fallback allocator.
|
||||||
|
class ScopedUseFallbackAllocator {
|
||||||
|
public:
|
||||||
|
ScopedUseFallbackAllocator() { Enable(); }
|
||||||
|
|
||||||
|
~ScopedUseFallbackAllocator() { Disable(); }
|
||||||
|
|
||||||
|
bool Enable() {
|
||||||
|
if (!enabled_) {
|
||||||
|
enabled_ = __linker_enable_fallback_allocator();
|
||||||
|
if (!enabled_) {
|
||||||
|
async_safe_format_log(ANDROID_LOG_ERROR, "libc",
|
||||||
|
"Unable to enable fallback allocator, already in use.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return enabled_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Disable() {
|
||||||
|
if (enabled_) {
|
||||||
|
__linker_disable_fallback_allocator();
|
||||||
|
enabled_ = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool enabled() { return enabled_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool enabled_ = false;
|
||||||
|
};
|
||||||
|
|
||||||
static void debuggerd_fallback_trace(int output_fd, ucontext_t* ucontext) {
|
static void debuggerd_fallback_trace(int output_fd, ucontext_t* ucontext) {
|
||||||
if (!__linker_enable_fallback_allocator()) {
|
std::unique_ptr<unwindstack::Regs> regs;
|
||||||
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "fallback allocator already in use");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
ThreadInfo thread;
|
||||||
std::unique_ptr<unwindstack::Regs> regs;
|
thread.pid = getpid();
|
||||||
|
thread.tid = gettid();
|
||||||
|
thread.thread_name = get_thread_name(gettid());
|
||||||
|
thread.registers.reset(
|
||||||
|
unwindstack::Regs::CreateFromUcontext(unwindstack::Regs::CurrentArch(), ucontext));
|
||||||
|
|
||||||
ThreadInfo thread;
|
// Do not use the thread cache here because it will call pthread_key_create
|
||||||
thread.pid = getpid();
|
// which doesn't work in linker code. See b/189803009.
|
||||||
thread.tid = gettid();
|
// Use a normal cached object because the thread is stopped, and there
|
||||||
thread.thread_name = get_thread_name(gettid());
|
// is no chance of data changing between reads.
|
||||||
thread.registers.reset(
|
auto process_memory = unwindstack::Memory::CreateProcessMemoryCached(getpid());
|
||||||
unwindstack::Regs::CreateFromUcontext(unwindstack::Regs::CurrentArch(), ucontext));
|
// TODO: Create this once and store it in a global?
|
||||||
|
unwindstack::AndroidLocalUnwinder unwinder(process_memory);
|
||||||
// Do not use the thread cache here because it will call pthread_key_create
|
dump_backtrace_thread(output_fd, &unwinder, thread);
|
||||||
// which doesn't work in linker code. See b/189803009.
|
|
||||||
// Use a normal cached object because the thread is stopped, and there
|
|
||||||
// is no chance of data changing between reads.
|
|
||||||
auto process_memory = unwindstack::Memory::CreateProcessMemoryCached(getpid());
|
|
||||||
// TODO: Create this once and store it in a global?
|
|
||||||
unwindstack::AndroidLocalUnwinder unwinder(process_memory);
|
|
||||||
dump_backtrace_thread(output_fd, &unwinder, thread);
|
|
||||||
}
|
|
||||||
__linker_disable_fallback_allocator();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void debuggerd_fallback_tombstone(int output_fd, int proto_fd, ucontext_t* ucontext,
|
|
||||||
siginfo_t* siginfo, void* abort_message) {
|
|
||||||
if (!__linker_enable_fallback_allocator()) {
|
|
||||||
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "fallback allocator already in use");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
engrave_tombstone_ucontext(output_fd, proto_fd, reinterpret_cast<uintptr_t>(abort_message),
|
|
||||||
siginfo, ucontext);
|
|
||||||
__linker_disable_fallback_allocator();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool forward_output(int src_fd, int dst_fd, pid_t expected_tid) {
|
static bool forward_output(int src_fd, int dst_fd, pid_t expected_tid) {
|
||||||
|
|
@ -154,6 +173,11 @@ static std::pair<pid_t, int> unpack_thread_fd(uint64_t value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
|
static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
|
||||||
|
ScopedUseFallbackAllocator allocator;
|
||||||
|
if (!allocator.enabled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static std::atomic<uint64_t> trace_output(pack_thread_fd(-1, -1));
|
static std::atomic<uint64_t> trace_output(pack_thread_fd(-1, -1));
|
||||||
|
|
||||||
if (info->si_value.sival_ptr == kDebuggerdFallbackSivalPtrRequestDump) {
|
if (info->si_value.sival_ptr == kDebuggerdFallbackSivalPtrRequestDump) {
|
||||||
|
|
@ -181,6 +205,11 @@ static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
|
||||||
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to write to output fd");
|
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to write to output fd");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop using the fallback allocator before the close. This will prevent
|
||||||
|
// a race condition where the thread backtracing all of the threads tries
|
||||||
|
// to re-acquire the fallback allocator.
|
||||||
|
allocator.Disable();
|
||||||
|
|
||||||
close(fd);
|
close(fd);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -210,10 +239,15 @@ static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
|
||||||
|
|
||||||
// Send a signal to all of our siblings, asking them to dump their stack.
|
// Send a signal to all of our siblings, asking them to dump their stack.
|
||||||
pid_t current_tid = gettid();
|
pid_t current_tid = gettid();
|
||||||
if (!iterate_tids(current_tid, [&output_fd, ¤t_tid](pid_t tid) {
|
if (!iterate_tids(current_tid, [&allocator, &output_fd, ¤t_tid](pid_t tid) {
|
||||||
if (current_tid == tid) {
|
if (current_tid == tid) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!allocator.enabled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Use a pipe, to be able to detect situations where the thread gracefully exits before
|
// Use a pipe, to be able to detect situations where the thread gracefully exits before
|
||||||
// receiving our signal.
|
// receiving our signal.
|
||||||
unique_fd pipe_read, pipe_write;
|
unique_fd pipe_read, pipe_write;
|
||||||
|
|
@ -233,22 +267,29 @@ static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Disable our use of the fallback allocator while the target thread
|
||||||
|
// is getting the backtrace.
|
||||||
|
allocator.Disable();
|
||||||
|
|
||||||
siginfo_t siginfo = {};
|
siginfo_t siginfo = {};
|
||||||
siginfo.si_code = SI_QUEUE;
|
siginfo.si_code = SI_QUEUE;
|
||||||
siginfo.si_value.sival_ptr = kDebuggerdFallbackSivalPtrRequestDump;
|
siginfo.si_value.sival_ptr = kDebuggerdFallbackSivalPtrRequestDump;
|
||||||
siginfo.si_pid = getpid();
|
siginfo.si_pid = getpid();
|
||||||
siginfo.si_uid = getuid();
|
siginfo.si_uid = getuid();
|
||||||
|
|
||||||
if (syscall(__NR_rt_tgsigqueueinfo, getpid(), tid, BIONIC_SIGNAL_DEBUGGER, &siginfo) != 0) {
|
if (syscall(__NR_rt_tgsigqueueinfo, getpid(), tid, BIONIC_SIGNAL_DEBUGGER, &siginfo) == 0) {
|
||||||
|
if (!forward_output(pipe_read.get(), output_fd.get(), tid)) {
|
||||||
|
async_safe_format_log(ANDROID_LOG_ERROR, "libc",
|
||||||
|
"timeout expired while waiting for thread %d to dump", tid);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to send trace signal to %d: %s",
|
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to send trace signal to %d: %s",
|
||||||
tid, strerror(errno));
|
tid, strerror(errno));
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool success = forward_output(pipe_read.get(), output_fd.get(), tid);
|
// The thread should be finished now, so try and re-enable the fallback allocator.
|
||||||
if (!success) {
|
if (!allocator.Enable()) {
|
||||||
async_safe_format_log(ANDROID_LOG_ERROR, "libc",
|
return;
|
||||||
"timeout expired while waiting for thread %d to dump", tid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Regardless of whether the poll succeeds, check to see if the thread took fd ownership.
|
// Regardless of whether the poll succeeds, check to see if the thread took fd ownership.
|
||||||
|
|
@ -260,14 +301,15 @@ static void trace_handler(siginfo_t* info, ucontext_t* ucontext) {
|
||||||
close(fd);
|
close(fd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
|
||||||
})) {
|
})) {
|
||||||
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to open /proc/%d/task: %s",
|
async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to open /proc/%d/task: %s",
|
||||||
current_tid, strerror(errno));
|
current_tid, strerror(errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
dump_backtrace_footer(output_fd.get());
|
if (allocator.enabled()) {
|
||||||
|
dump_backtrace_footer(output_fd.get());
|
||||||
|
}
|
||||||
|
|
||||||
tombstoned_notify_completion(tombstone_socket.get());
|
tombstoned_notify_completion(tombstone_socket.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -295,7 +337,13 @@ static void crash_handler(siginfo_t* info, ucontext_t* ucontext, void* abort_mes
|
||||||
unique_fd tombstone_socket, output_fd, proto_fd;
|
unique_fd tombstone_socket, output_fd, proto_fd;
|
||||||
bool tombstoned_connected = tombstoned_connect(getpid(), &tombstone_socket, &output_fd, &proto_fd,
|
bool tombstoned_connected = tombstoned_connect(getpid(), &tombstone_socket, &output_fd, &proto_fd,
|
||||||
kDebuggerdTombstoneProto);
|
kDebuggerdTombstoneProto);
|
||||||
debuggerd_fallback_tombstone(output_fd.get(), proto_fd.get(), ucontext, info, abort_message);
|
{
|
||||||
|
ScopedUseFallbackAllocator allocator;
|
||||||
|
if (allocator.enabled()) {
|
||||||
|
engrave_tombstone_ucontext(output_fd.get(), proto_fd.get(),
|
||||||
|
reinterpret_cast<uintptr_t>(abort_message), info, ucontext);
|
||||||
|
}
|
||||||
|
}
|
||||||
if (tombstoned_connected) {
|
if (tombstoned_connected) {
|
||||||
tombstoned_notify_completion(tombstone_socket.get());
|
tombstoned_notify_completion(tombstone_socket.get());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue