Merge changes I8e27f8b9,I78d2dc79 into pi-dev

* changes:
  lmkd: Optimize frequent file reads by keeping file descriptors open
  lmkd: Fix usage of ro.lmk.kill_heaviest_task property
This commit is contained in:
Suren Baghdasaryan 2018-04-17 00:09:15 +00:00 committed by Android (Google) Code Review
commit 7cb8aade20

View file

@ -171,6 +171,11 @@ struct proc {
struct proc *pidhash_next;
};
struct reread_data {
const char* const filename;
int fd;
};
#ifdef LMKD_LOG_STATS
static bool enable_stats_log;
static android_log_context log_ctx;
@ -186,12 +191,27 @@ static struct adjslot_list procadjslot_list[ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1];
/* PAGE_SIZE / 1024 */
static long page_k;
static bool parse_int64(const char* str, int64_t* ret) {
char* endptr;
long long val = strtoll(str, &endptr, 10);
if (str == endptr || val > INT64_MAX) {
return false;
}
*ret = (int64_t)val;
return true;
}
/*
* Read file content from the beginning up to max_len bytes or EOF
* whichever happens first.
*/
static ssize_t read_all(int fd, char *buf, size_t max_len)
{
ssize_t ret = 0;
off_t offset = 0;
while (max_len > 0) {
ssize_t r = read(fd, buf, max_len);
ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
if (r == 0) {
break;
}
@ -200,12 +220,44 @@ static ssize_t read_all(int fd, char *buf, size_t max_len)
}
ret += r;
buf += r;
offset += r;
max_len -= r;
}
return ret;
}
/*
* Read a new or already opened file from the beginning.
* If the file has not been opened yet data->fd should be set to -1.
* To be used with files which are read often and possibly during high
* memory pressure to minimize file opening which by itself requires kernel
* memory allocation and might result in a stall on memory stressed system.
*/
static int reread_file(struct reread_data *data, char *buf, size_t buf_size) {
ssize_t size;
if (data->fd == -1) {
data->fd = open(data->filename, O_RDONLY | O_CLOEXEC);
if (data->fd == -1) {
ALOGE("%s open: %s", data->filename, strerror(errno));
return -1;
}
}
size = read_all(data->fd, buf, buf_size - 1);
if (size < 0) {
ALOGE("%s read: %s", data->filename, strerror(errno));
close(data->fd);
data->fd = -1;
return -1;
}
ALOG_ASSERT((size_t)size < buf_size - 1, data->filename " too large");
buf[size] = 0;
return 0;
}
static struct proc *pid_lookup(int pid) {
struct proc *procp;
@ -442,7 +494,7 @@ static void ctrl_data_close(int dsock_idx) {
static int ctrl_data_read(int dsock_idx, char *buf, size_t bufsz) {
int ret = 0;
ret = read(data_sock[dsock_idx].sock, buf, bufsz);
ret = TEMP_FAILURE_RETRY(read(data_sock[dsock_idx].sock, buf, bufsz));
if (ret == -1) {
ALOGE("control data socket read failed; errno=%d", errno);
@ -771,10 +823,8 @@ static int find_and_kill_processes(enum vmpressure_level level,
struct proc *procp;
while (true) {
if (low_ram_device)
procp = proc_adj_lru(i);
else
procp = proc_get_heaviest(i);
procp = kill_heaviest_task ?
proc_get_heaviest(i) : proc_adj_lru(i);
if (!procp)
break;
@ -805,23 +855,19 @@ static int find_and_kill_processes(enum vmpressure_level level,
return pages_freed;
}
static int64_t get_memory_usage(const char* path) {
static int64_t get_memory_usage(struct reread_data *file_data) {
int ret;
int64_t mem_usage;
char buf[32];
int fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd == -1) {
ALOGE("%s open: errno=%d", path, errno);
if (reread_file(file_data, buf, sizeof(buf)) < 0) {
return -1;
}
ret = read_all(fd, buf, sizeof(buf) - 1);
close(fd);
if (ret < 0) {
ALOGE("%s error: errno=%d", path, errno);
if (!parse_int64(buf, &mem_usage)) {
ALOGE("%s parse error", file_data->filename);
return -1;
}
sscanf(buf, "%" SCNd64, &mem_usage);
if (mem_usage == 0) {
ALOGE("No memory!");
return -1;
@ -881,6 +927,14 @@ static void mp_event_common(int data, uint32_t events __unused) {
static struct timeval last_report_tm;
static unsigned long skip_count = 0;
enum vmpressure_level level = (enum vmpressure_level)data;
static struct reread_data mem_usage_file_data = {
.filename = MEMCG_MEMORY_USAGE,
.fd = -1,
};
static struct reread_data memsw_usage_file_data = {
.filename = MEMCG_MEMORYSW_USAGE,
.fd = -1,
};
/*
* Check all event counters from low to critical
@ -889,7 +943,8 @@ static void mp_event_common(int data, uint32_t events __unused) {
*/
for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
if (mpevfd[lvl] != -1 &&
read(mpevfd[lvl], &evcount, sizeof(evcount)) > 0 &&
TEMP_FAILURE_RETRY(read(mpevfd[lvl],
&evcount, sizeof(evcount))) > 0 &&
evcount > 0 && lvl > level) {
level = lvl;
}
@ -926,9 +981,10 @@ static void mp_event_common(int data, uint32_t events __unused) {
return;
}
mem_usage = get_memory_usage(MEMCG_MEMORY_USAGE);
memsw_usage = get_memory_usage(MEMCG_MEMORYSW_USAGE);
if (memsw_usage < 0 || mem_usage < 0) {
if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
goto do_kill;
}
if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
goto do_kill;
}
@ -1198,7 +1254,7 @@ int main(int argc __unused, char **argv __unused) {
downgrade_pressure =
(int64_t)property_get_int32("ro.lmk.downgrade_pressure", 100);
kill_heaviest_task =
property_get_bool("ro.lmk.kill_heaviest_task", true);
property_get_bool("ro.lmk.kill_heaviest_task", false);
low_ram_device = property_get_bool("ro.config.low_ram", false);
kill_timeout_ms =
(unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);