Merge "liblog: free event tag map in __android_log_close()"

This commit is contained in:
Treehugger Robot 2016-09-28 14:58:39 +00:00 committed by Gerrit Code Review
commit 1dfa8112ce
2 changed files with 33 additions and 4 deletions

View file

@ -120,6 +120,7 @@ LIBLOG_ABI_PUBLIC void android_closeEventTagMap(EventTagMap* map)
return; return;
munmap(map->mapAddr, map->mapLen); munmap(map->mapAddr, map->mapLen);
free(map->tagArray);
free(map); free(map);
} }

View file

@ -132,12 +132,20 @@ LIBLOG_ABI_PUBLIC int __android_log_dev_available()
} }
return kLogNotAvailable; return kLogNotAvailable;
} }
#if defined(__BIONIC__)
static atomic_uintptr_t tagMap;
#endif
/* /*
* Release any logger resources. A new log write will immediately re-acquire. * Release any logger resources. A new log write will immediately re-acquire.
*/ */
LIBLOG_ABI_PUBLIC void __android_log_close() LIBLOG_ABI_PUBLIC void __android_log_close()
{ {
struct android_log_transport_write *transport; struct android_log_transport_write *transport;
#if defined(__BIONIC__)
EventTagMap *m;
#endif
__android_log_lock(); __android_log_lock();
@ -165,7 +173,28 @@ LIBLOG_ABI_PUBLIC void __android_log_close()
} }
} }
#if defined(__BIONIC__)
/*
* Additional risk here somewhat mitigated by immediately unlock flushing
* the processor cache. The multi-threaded race that we choose to accept,
* to minimize locking, is an atomic_load in a writer picking up a value
* just prior to entering this routine. There will be an use after free.
*
* Again, anyone calling this is doing so to release the logging resources
* is most probably going to quiesce then shut down; or to restart after
* a fork so the risk should be non-existent. For this reason we
* choose a mitigation stance for efficiency instead of incuring the cost
* of a lock for every log write.
*/
m = (EventTagMap *)atomic_exchange(&tagMap, (uintptr_t)0);
#endif
__android_log_unlock(); __android_log_unlock();
#if defined(__BIONIC__)
android_closeEventTagMap(m);
#endif
} }
/* log_init_lock assumed */ /* log_init_lock assumed */
@ -250,7 +279,6 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
return -EPERM; return -EPERM;
} }
} else if (log_id == LOG_ID_EVENTS) { } else if (log_id == LOG_ID_EVENTS) {
static atomic_uintptr_t map;
const char *tag; const char *tag;
EventTagMap *m, *f; EventTagMap *m, *f;
@ -260,11 +288,11 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
tag = NULL; tag = NULL;
f = NULL; f = NULL;
m = (EventTagMap *)atomic_load(&map); m = (EventTagMap *)atomic_load(&tagMap);
if (!m) { if (!m) {
ret = __android_log_trylock(); ret = __android_log_trylock();
m = (EventTagMap *)atomic_load(&map); /* trylock flush cache */ m = (EventTagMap *)atomic_load(&tagMap); /* trylock flush cache */
if (!m) { if (!m) {
m = android_openEventTagMap(EVENT_TAG_MAP_FILE); m = android_openEventTagMap(EVENT_TAG_MAP_FILE);
if (ret) { /* trylock failed, use local copy, mark for close */ if (ret) { /* trylock failed, use local copy, mark for close */
@ -273,7 +301,7 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
if (!m) { /* One chance to open map file */ if (!m) { /* One chance to open map file */
m = (EventTagMap *)(uintptr_t)-1LL; m = (EventTagMap *)(uintptr_t)-1LL;
} }
atomic_store(&map, (uintptr_t)m); atomic_store(&tagMap, (uintptr_t)m);
} }
} }
if (!ret) { /* trylock succeeded, unlock */ if (!ret) { /* trylock succeeded, unlock */