liblog: free event tag map in __android_log_close()

There is no leak since a reference always remained and could get
reused.  It just makes sense to also close the event tag map as well
if logging is closed.  If we close we also have to fix a tagArray
leak in android_closeEventTagMap().

NB: __android_log_close() already makes an assumption that another
thread is not logging at the same time, which is true in all callers
at this time.  There are some partial mitigation strategies if
__android_log_close() is called asynchronously, but not completely
solved at the cost of a lock for every logging call.

Test: gTest liblog-unit-tests
Bug: 30963384
Bug: 31456426
Change-Id: Ib76ad9302dba4d3f35250213f4dfef22498af238
This commit is contained in:
Mark Salyzyn 2016-09-13 07:28:21 -07:00
parent aa00f5852f
commit ba1a798fd8
2 changed files with 33 additions and 4 deletions

View file

@ -120,6 +120,7 @@ LIBLOG_ABI_PUBLIC void android_closeEventTagMap(EventTagMap* map)
return;
munmap(map->mapAddr, map->mapLen);
free(map->tagArray);
free(map);
}

View file

@ -132,12 +132,20 @@ LIBLOG_ABI_PUBLIC int __android_log_dev_available()
}
return kLogNotAvailable;
}
#if defined(__BIONIC__)
static atomic_uintptr_t tagMap;
#endif
/*
* Release any logger resources. A new log write will immediately re-acquire.
*/
LIBLOG_ABI_PUBLIC void __android_log_close()
{
struct android_log_transport_write *transport;
#if defined(__BIONIC__)
EventTagMap *m;
#endif
__android_log_lock();
@ -165,7 +173,28 @@ LIBLOG_ABI_PUBLIC void __android_log_close()
}
}
#if defined(__BIONIC__)
/*
* Additional risk here somewhat mitigated by immediately unlock flushing
* the processor cache. The multi-threaded race that we choose to accept,
* to minimize locking, is an atomic_load in a writer picking up a value
* just prior to entering this routine. There will be an use after free.
*
* Again, anyone calling this is doing so to release the logging resources
* is most probably going to quiesce then shut down; or to restart after
* a fork so the risk should be non-existent. For this reason we
* choose a mitigation stance for efficiency instead of incuring the cost
* of a lock for every log write.
*/
m = (EventTagMap *)atomic_exchange(&tagMap, (uintptr_t)0);
#endif
__android_log_unlock();
#if defined(__BIONIC__)
android_closeEventTagMap(m);
#endif
}
/* log_init_lock assumed */
@ -250,7 +279,6 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
return -EPERM;
}
} else if (log_id == LOG_ID_EVENTS) {
static atomic_uintptr_t map;
const char *tag;
EventTagMap *m, *f;
@ -260,11 +288,11 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
tag = NULL;
f = NULL;
m = (EventTagMap *)atomic_load(&map);
m = (EventTagMap *)atomic_load(&tagMap);
if (!m) {
ret = __android_log_trylock();
m = (EventTagMap *)atomic_load(&map); /* trylock flush cache */
m = (EventTagMap *)atomic_load(&tagMap); /* trylock flush cache */
if (!m) {
m = android_openEventTagMap(EVENT_TAG_MAP_FILE);
if (ret) { /* trylock failed, use local copy, mark for close */
@ -273,7 +301,7 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
if (!m) { /* One chance to open map file */
m = (EventTagMap *)(uintptr_t)-1LL;
}
atomic_store(&map, (uintptr_t)m);
atomic_store(&tagMap, (uintptr_t)m);
}
}
if (!ret) { /* trylock succeeded, unlock */