From ba1a798fd8073b6069165cc03289fb9399f0495b Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 13 Sep 2016 07:28:21 -0700 Subject: [PATCH] liblog: free event tag map in __android_log_close() There is no leak since a reference always remained and could get reused. It just makes sense to also close the event tag map as well if logging is closed. If we close we also have to fix a tagArray leak in android_closeEventTagMap(). NB: __android_log_close() already makes an assumption that another thread is not logging at the same time, which is true in all callers at this time. There are some partial mitigation strategies if __android_log_close() is called asynchronously, but not completely solved at the cost of a lock for every logging call. Test: gTest liblog-unit-tests Bug: 30963384 Bug: 31456426 Change-Id: Ib76ad9302dba4d3f35250213f4dfef22498af238 --- liblog/event_tag_map.c | 1 + liblog/logger_write.c | 36 ++++++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/liblog/event_tag_map.c b/liblog/event_tag_map.c index 345f0d360..3e0b6b5f8 100644 --- a/liblog/event_tag_map.c +++ b/liblog/event_tag_map.c @@ -120,6 +120,7 @@ LIBLOG_ABI_PUBLIC void android_closeEventTagMap(EventTagMap* map) return; munmap(map->mapAddr, map->mapLen); + free(map->tagArray); free(map); } diff --git a/liblog/logger_write.c b/liblog/logger_write.c index c7b5a8415..08e634892 100644 --- a/liblog/logger_write.c +++ b/liblog/logger_write.c @@ -132,12 +132,20 @@ LIBLOG_ABI_PUBLIC int __android_log_dev_available() } return kLogNotAvailable; } + +#if defined(__BIONIC__) +static atomic_uintptr_t tagMap; +#endif + /* * Release any logger resources. A new log write will immediately re-acquire. */ LIBLOG_ABI_PUBLIC void __android_log_close() { struct android_log_transport_write *transport; +#if defined(__BIONIC__) + EventTagMap *m; +#endif __android_log_lock(); @@ -165,7 +173,28 @@ LIBLOG_ABI_PUBLIC void __android_log_close() } } +#if defined(__BIONIC__) + /* + * Additional risk here somewhat mitigated by immediately unlock flushing + * the processor cache. The multi-threaded race that we choose to accept, + * to minimize locking, is an atomic_load in a writer picking up a value + * just prior to entering this routine. There will be an use after free. + * + * Again, anyone calling this is doing so to release the logging resources + * is most probably going to quiesce then shut down; or to restart after + * a fork so the risk should be non-existent. For this reason we + * choose a mitigation stance for efficiency instead of incuring the cost + * of a lock for every log write. + */ + m = (EventTagMap *)atomic_exchange(&tagMap, (uintptr_t)0); +#endif + __android_log_unlock(); + +#if defined(__BIONIC__) + android_closeEventTagMap(m); +#endif + } /* log_init_lock assumed */ @@ -250,7 +279,6 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) return -EPERM; } } else if (log_id == LOG_ID_EVENTS) { - static atomic_uintptr_t map; const char *tag; EventTagMap *m, *f; @@ -260,11 +288,11 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) tag = NULL; f = NULL; - m = (EventTagMap *)atomic_load(&map); + m = (EventTagMap *)atomic_load(&tagMap); if (!m) { ret = __android_log_trylock(); - m = (EventTagMap *)atomic_load(&map); /* trylock flush cache */ + m = (EventTagMap *)atomic_load(&tagMap); /* trylock flush cache */ if (!m) { m = android_openEventTagMap(EVENT_TAG_MAP_FILE); if (ret) { /* trylock failed, use local copy, mark for close */ @@ -273,7 +301,7 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) if (!m) { /* One chance to open map file */ m = (EventTagMap *)(uintptr_t)-1LL; } - atomic_store(&map, (uintptr_t)m); + atomic_store(&tagMap, (uintptr_t)m); } } if (!ret) { /* trylock succeeded, unlock */