blob: 92cdaa0e79d93065a7e287afbc0436c83a955b26 [file] [log] [blame]
Todd Poynor3948f802013-07-09 19:35:14 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "lowmemorykiller"
18
Wei Wang6f2198b2018-11-21 00:11:44 -080019#include <dirent.h>
Todd Poynor3948f802013-07-09 19:35:14 -070020#include <errno.h>
Robert Beneac47f2992017-08-21 15:18:31 -070021#include <inttypes.h>
Mark Salyzyncfd5b082016-10-17 14:28:00 -070022#include <sched.h>
Todd Poynor3948f802013-07-09 19:35:14 -070023#include <signal.h>
Todd Poynor3948f802013-07-09 19:35:14 -070024#include <stdlib.h>
25#include <string.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070026#include <sys/cdefs.h>
Todd Poynor3948f802013-07-09 19:35:14 -070027#include <sys/epoll.h>
28#include <sys/eventfd.h>
Colin Crossb28ff912014-07-11 17:15:44 -070029#include <sys/mman.h>
Wei Wang6f2198b2018-11-21 00:11:44 -080030#include <sys/resource.h>
Todd Poynor3948f802013-07-09 19:35:14 -070031#include <sys/socket.h>
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -080032#include <sys/sysinfo.h>
Wei Wang6f2198b2018-11-21 00:11:44 -080033#include <sys/time.h>
Tim Murrayafb3a152018-10-25 17:05:41 -070034#include <sys/types.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070035#include <unistd.h>
36
Robert Benea58891d52017-07-31 17:15:20 -070037#include <cutils/properties.h>
Wei Wang6f2198b2018-11-21 00:11:44 -080038#include <cutils/sched_policy.h>
Todd Poynor3948f802013-07-09 19:35:14 -070039#include <cutils/sockets.h>
Suren Baghdasaryan0f100512018-01-24 16:51:41 -080040#include <lmkd.h>
Mark Salyzyn30f991f2017-01-10 13:19:54 -080041#include <log/log.h>
Wei Wang6f2198b2018-11-21 00:11:44 -080042#include <system/thread_defs.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070043
Rajeev Kumar70450032018-01-31 17:54:56 -080044#ifdef LMKD_LOG_STATS
Yao Chen389aee12018-05-02 11:19:27 -070045#include "statslog.h"
Rajeev Kumar70450032018-01-31 17:54:56 -080046#endif
47
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080048/*
49 * Define LMKD_TRACE_KILLS to record lmkd kills in kernel traces
50 * to profile and correlate with OOM kills
51 */
52#ifdef LMKD_TRACE_KILLS
53
54#define ATRACE_TAG ATRACE_TAG_ALWAYS
55#include <cutils/trace.h>
56
57#define TRACE_KILL_START(pid) ATRACE_INT(__FUNCTION__, pid);
58#define TRACE_KILL_END() ATRACE_INT(__FUNCTION__, 0);
59
60#else /* LMKD_TRACE_KILLS */
61
Daniel Colascione347f6b42018-02-12 11:24:47 -080062#define TRACE_KILL_START(pid) ((void)(pid))
63#define TRACE_KILL_END() ((void)0)
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080064
65#endif /* LMKD_TRACE_KILLS */
66
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070067#ifndef __unused
68#define __unused __attribute__((__unused__))
69#endif
Todd Poynor3948f802013-07-09 19:35:14 -070070
71#define MEMCG_SYSFS_PATH "/dev/memcg/"
Robert Beneac47f2992017-08-21 15:18:31 -070072#define MEMCG_MEMORY_USAGE "/dev/memcg/memory.usage_in_bytes"
73#define MEMCG_MEMORYSW_USAGE "/dev/memcg/memory.memsw.usage_in_bytes"
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -070074#define ZONEINFO_PATH "/proc/zoneinfo"
75#define MEMINFO_PATH "/proc/meminfo"
Todd Poynor3948f802013-07-09 19:35:14 -070076#define LINE_MAX 128
77
78#define INKERNEL_MINFREE_PATH "/sys/module/lowmemorykiller/parameters/minfree"
79#define INKERNEL_ADJ_PATH "/sys/module/lowmemorykiller/parameters/adj"
80
81#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
Robert Benea673e2762017-06-01 16:32:31 -070082#define EIGHT_MEGA (1 << 23)
Todd Poynor3948f802013-07-09 19:35:14 -070083
Greg Kaiserd6d84712018-03-23 14:16:12 -070084#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
85#define STRINGIFY_INTERNAL(x) #x
86
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -070087#define FAIL_REPORT_RLIMIT_MS 1000
88
Todd Poynor3948f802013-07-09 19:35:14 -070089/* default to old in-kernel interface if no memory pressure events */
90static int use_inkernel_interface = 1;
Robert Benea164baeb2017-09-11 16:53:28 -070091static bool has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -070092
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080093/* memory pressure levels */
94enum vmpressure_level {
95 VMPRESS_LEVEL_LOW = 0,
96 VMPRESS_LEVEL_MEDIUM,
97 VMPRESS_LEVEL_CRITICAL,
98 VMPRESS_LEVEL_COUNT
99};
Todd Poynor3948f802013-07-09 19:35:14 -0700100
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800101static const char *level_name[] = {
102 "low",
103 "medium",
104 "critical"
105};
106
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800107struct {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -0700108 int64_t min_nr_free_pages; /* recorded but not used yet */
109 int64_t max_nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800110} low_pressure_mem = { -1, -1 };
111
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800112static int level_oomadj[VMPRESS_LEVEL_COUNT];
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -0800113static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 };
Robert Beneac47f2992017-08-21 15:18:31 -0700114static bool debug_process_killing;
115static bool enable_pressure_upgrade;
116static int64_t upgrade_pressure;
Robert Benea6e8e7102017-09-13 15:20:30 -0700117static int64_t downgrade_pressure;
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -0700118static bool low_ram_device;
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800119static bool kill_heaviest_task;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -0800120static unsigned long kill_timeout_ms;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -0700121static bool use_minfree_levels;
Rajeev Kumar244ace62018-10-05 12:34:59 -0700122static bool per_app_memcg;
Robert Benea58891d52017-07-31 17:15:20 -0700123
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800124/* data required to handle events */
125struct event_handler_info {
126 int data;
127 void (*handler)(int data, uint32_t events);
128};
Todd Poynor3948f802013-07-09 19:35:14 -0700129
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800130/* data required to handle socket events */
131struct sock_event_handler_info {
132 int sock;
133 struct event_handler_info handler_info;
134};
135
136/* max supported number of data connections */
137#define MAX_DATA_CONN 2
138
139/* socket event handler data */
140static struct sock_event_handler_info ctrl_sock;
141static struct sock_event_handler_info data_sock[MAX_DATA_CONN];
142
143/* vmpressure event handler data */
144static struct event_handler_info vmpressure_hinfo[VMPRESS_LEVEL_COUNT];
145
146/* 3 memory pressure levels, 1 ctrl listen socket, 2 ctrl data socket */
147#define MAX_EPOLL_EVENTS (1 + MAX_DATA_CONN + VMPRESS_LEVEL_COUNT)
Todd Poynor3948f802013-07-09 19:35:14 -0700148static int epollfd;
149static int maxevents;
150
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700151/* OOM score values used by both kernel and framework */
Todd Poynor16b60992013-09-16 19:26:47 -0700152#define OOM_SCORE_ADJ_MIN (-1000)
153#define OOM_SCORE_ADJ_MAX 1000
154
Todd Poynor3948f802013-07-09 19:35:14 -0700155static int lowmem_adj[MAX_TARGETS];
156static int lowmem_minfree[MAX_TARGETS];
157static int lowmem_targets_size;
158
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700159/* Fields to parse in /proc/zoneinfo */
160enum zoneinfo_field {
161 ZI_NR_FREE_PAGES = 0,
162 ZI_NR_FILE_PAGES,
163 ZI_NR_SHMEM,
164 ZI_NR_UNEVICTABLE,
165 ZI_WORKINGSET_REFAULT,
166 ZI_HIGH,
167 ZI_FIELD_COUNT
168};
169
170static const char* const zoneinfo_field_names[ZI_FIELD_COUNT] = {
171 "nr_free_pages",
172 "nr_file_pages",
173 "nr_shmem",
174 "nr_unevictable",
175 "workingset_refault",
176 "high",
177};
178
179union zoneinfo {
180 struct {
181 int64_t nr_free_pages;
182 int64_t nr_file_pages;
183 int64_t nr_shmem;
184 int64_t nr_unevictable;
185 int64_t workingset_refault;
186 int64_t high;
187 /* fields below are calculated rather than read from the file */
188 int64_t totalreserve_pages;
189 } field;
190 int64_t arr[ZI_FIELD_COUNT];
191};
192
193/* Fields to parse in /proc/meminfo */
194enum meminfo_field {
195 MI_NR_FREE_PAGES = 0,
196 MI_CACHED,
197 MI_SWAP_CACHED,
198 MI_BUFFERS,
199 MI_SHMEM,
200 MI_UNEVICTABLE,
201 MI_FREE_SWAP,
202 MI_DIRTY,
203 MI_FIELD_COUNT
204};
205
206static const char* const meminfo_field_names[MI_FIELD_COUNT] = {
207 "MemFree:",
208 "Cached:",
209 "SwapCached:",
210 "Buffers:",
211 "Shmem:",
212 "Unevictable:",
213 "SwapFree:",
214 "Dirty:",
215};
216
217union meminfo {
218 struct {
219 int64_t nr_free_pages;
220 int64_t cached;
221 int64_t swap_cached;
222 int64_t buffers;
223 int64_t shmem;
224 int64_t unevictable;
225 int64_t free_swap;
226 int64_t dirty;
227 /* fields below are calculated rather than read from the file */
228 int64_t nr_file_pages;
229 } field;
230 int64_t arr[MI_FIELD_COUNT];
231};
232
233enum field_match_result {
234 NO_MATCH,
235 PARSE_FAIL,
236 PARSE_SUCCESS
237};
238
Todd Poynor3948f802013-07-09 19:35:14 -0700239struct adjslot_list {
240 struct adjslot_list *next;
241 struct adjslot_list *prev;
242};
243
244struct proc {
245 struct adjslot_list asl;
246 int pid;
Colin Crossfbb78c62014-06-13 14:52:43 -0700247 uid_t uid;
Todd Poynor3948f802013-07-09 19:35:14 -0700248 int oomadj;
249 struct proc *pidhash_next;
250};
251
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700252struct reread_data {
253 const char* const filename;
254 int fd;
255};
256
Rajeev Kumar70450032018-01-31 17:54:56 -0800257#ifdef LMKD_LOG_STATS
Rajeev Kumar70450032018-01-31 17:54:56 -0800258static bool enable_stats_log;
259static android_log_context log_ctx;
260#endif
261
Todd Poynor3948f802013-07-09 19:35:14 -0700262#define PIDHASH_SZ 1024
263static struct proc *pidhash[PIDHASH_SZ];
264#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
265
Chih-Hung Hsiehdaa13ea2016-05-19 16:02:22 -0700266#define ADJTOSLOT(adj) ((adj) + -OOM_SCORE_ADJ_MIN)
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700267static struct adjslot_list procadjslot_list[ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1];
Todd Poynor3948f802013-07-09 19:35:14 -0700268
Todd Poynor3948f802013-07-09 19:35:14 -0700269/* PAGE_SIZE / 1024 */
270static long page_k;
271
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700272static bool parse_int64(const char* str, int64_t* ret) {
273 char* endptr;
274 long long val = strtoll(str, &endptr, 10);
275 if (str == endptr || val > INT64_MAX) {
276 return false;
277 }
278 *ret = (int64_t)val;
279 return true;
280}
281
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700282static enum field_match_result match_field(const char* cp, const char* ap,
283 const char* const field_names[],
284 int field_count, int64_t* field,
285 int *field_idx) {
286 int64_t val;
287 int i;
288
289 for (i = 0; i < field_count; i++) {
290 if (!strcmp(cp, field_names[i])) {
291 *field_idx = i;
292 return parse_int64(ap, field) ? PARSE_SUCCESS : PARSE_FAIL;
293 }
294 }
295 return NO_MATCH;
296}
297
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700298/*
299 * Read file content from the beginning up to max_len bytes or EOF
300 * whichever happens first.
301 */
Colin Crossce85d952014-07-11 17:53:27 -0700302static ssize_t read_all(int fd, char *buf, size_t max_len)
303{
304 ssize_t ret = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700305 off_t offset = 0;
Colin Crossce85d952014-07-11 17:53:27 -0700306
307 while (max_len > 0) {
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700308 ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
Colin Crossce85d952014-07-11 17:53:27 -0700309 if (r == 0) {
310 break;
311 }
312 if (r == -1) {
313 return -1;
314 }
315 ret += r;
316 buf += r;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700317 offset += r;
Colin Crossce85d952014-07-11 17:53:27 -0700318 max_len -= r;
319 }
320
321 return ret;
322}
323
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700324/*
325 * Read a new or already opened file from the beginning.
326 * If the file has not been opened yet data->fd should be set to -1.
327 * To be used with files which are read often and possibly during high
328 * memory pressure to minimize file opening which by itself requires kernel
329 * memory allocation and might result in a stall on memory stressed system.
330 */
331static int reread_file(struct reread_data *data, char *buf, size_t buf_size) {
332 ssize_t size;
333
334 if (data->fd == -1) {
335 data->fd = open(data->filename, O_RDONLY | O_CLOEXEC);
336 if (data->fd == -1) {
337 ALOGE("%s open: %s", data->filename, strerror(errno));
338 return -1;
339 }
340 }
341
342 size = read_all(data->fd, buf, buf_size - 1);
343 if (size < 0) {
344 ALOGE("%s read: %s", data->filename, strerror(errno));
345 close(data->fd);
346 data->fd = -1;
347 return -1;
348 }
349 ALOG_ASSERT((size_t)size < buf_size - 1, data->filename " too large");
350 buf[size] = 0;
351
352 return 0;
353}
354
Todd Poynor3948f802013-07-09 19:35:14 -0700355static struct proc *pid_lookup(int pid) {
356 struct proc *procp;
357
358 for (procp = pidhash[pid_hashfn(pid)]; procp && procp->pid != pid;
359 procp = procp->pidhash_next)
360 ;
361
362 return procp;
363}
364
365static void adjslot_insert(struct adjslot_list *head, struct adjslot_list *new)
366{
367 struct adjslot_list *next = head->next;
368 new->prev = head;
369 new->next = next;
370 next->prev = new;
371 head->next = new;
372}
373
374static void adjslot_remove(struct adjslot_list *old)
375{
376 struct adjslot_list *prev = old->prev;
377 struct adjslot_list *next = old->next;
378 next->prev = prev;
379 prev->next = next;
380}
381
382static struct adjslot_list *adjslot_tail(struct adjslot_list *head) {
383 struct adjslot_list *asl = head->prev;
384
385 return asl == head ? NULL : asl;
386}
387
388static void proc_slot(struct proc *procp) {
389 int adjslot = ADJTOSLOT(procp->oomadj);
390
391 adjslot_insert(&procadjslot_list[adjslot], &procp->asl);
392}
393
394static void proc_unslot(struct proc *procp) {
395 adjslot_remove(&procp->asl);
396}
397
398static void proc_insert(struct proc *procp) {
399 int hval = pid_hashfn(procp->pid);
400
401 procp->pidhash_next = pidhash[hval];
402 pidhash[hval] = procp;
403 proc_slot(procp);
404}
405
406static int pid_remove(int pid) {
407 int hval = pid_hashfn(pid);
408 struct proc *procp;
409 struct proc *prevp;
410
411 for (procp = pidhash[hval], prevp = NULL; procp && procp->pid != pid;
412 procp = procp->pidhash_next)
413 prevp = procp;
414
415 if (!procp)
416 return -1;
417
418 if (!prevp)
419 pidhash[hval] = procp->pidhash_next;
420 else
421 prevp->pidhash_next = procp->pidhash_next;
422
423 proc_unslot(procp);
424 free(procp);
425 return 0;
426}
427
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800428static void writefilestring(const char *path, char *s) {
Nick Kralevichc68c8862015-12-18 20:52:37 -0800429 int fd = open(path, O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -0700430 int len = strlen(s);
431 int ret;
432
433 if (fd < 0) {
434 ALOGE("Error opening %s; errno=%d", path, errno);
435 return;
436 }
437
438 ret = write(fd, s, len);
439 if (ret < 0) {
440 ALOGE("Error writing %s; errno=%d", path, errno);
441 } else if (ret < len) {
442 ALOGE("Short write on %s; length=%d", path, ret);
443 }
444
445 close(fd);
446}
447
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800448static void cmd_procprio(LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700449 struct proc *procp;
450 char path[80];
451 char val[20];
Robert Benea673e2762017-06-01 16:32:31 -0700452 int soft_limit_mult;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800453 struct lmk_procprio params;
Todd Poynor3948f802013-07-09 19:35:14 -0700454
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800455 lmkd_pack_get_procprio(packet, &params);
456
457 if (params.oomadj < OOM_SCORE_ADJ_MIN ||
458 params.oomadj > OOM_SCORE_ADJ_MAX) {
459 ALOGE("Invalid PROCPRIO oomadj argument %d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700460 return;
461 }
462
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800463 snprintf(path, sizeof(path), "/proc/%d/oom_score_adj", params.pid);
464 snprintf(val, sizeof(val), "%d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700465 writefilestring(path, val);
466
467 if (use_inkernel_interface)
468 return;
469
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700470 if (low_ram_device) {
471 if (params.oomadj >= 900) {
472 soft_limit_mult = 0;
473 } else if (params.oomadj >= 800) {
474 soft_limit_mult = 0;
475 } else if (params.oomadj >= 700) {
476 soft_limit_mult = 0;
477 } else if (params.oomadj >= 600) {
478 // Launcher should be perceptible, don't kill it.
479 params.oomadj = 200;
480 soft_limit_mult = 1;
481 } else if (params.oomadj >= 500) {
482 soft_limit_mult = 0;
483 } else if (params.oomadj >= 400) {
484 soft_limit_mult = 0;
485 } else if (params.oomadj >= 300) {
486 soft_limit_mult = 1;
487 } else if (params.oomadj >= 200) {
488 soft_limit_mult = 2;
489 } else if (params.oomadj >= 100) {
490 soft_limit_mult = 10;
491 } else if (params.oomadj >= 0) {
492 soft_limit_mult = 20;
493 } else {
494 // Persistent processes will have a large
495 // soft limit 512MB.
496 soft_limit_mult = 64;
497 }
Robert Benea673e2762017-06-01 16:32:31 -0700498
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700499 snprintf(path, sizeof(path),
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800500 "/dev/memcg/apps/uid_%d/pid_%d/memory.soft_limit_in_bytes",
501 params.uid, params.pid);
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700502 snprintf(val, sizeof(val), "%d", soft_limit_mult * EIGHT_MEGA);
503 writefilestring(path, val);
504 }
Robert Benea673e2762017-06-01 16:32:31 -0700505
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800506 procp = pid_lookup(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700507 if (!procp) {
508 procp = malloc(sizeof(struct proc));
509 if (!procp) {
510 // Oh, the irony. May need to rebuild our state.
511 return;
512 }
513
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800514 procp->pid = params.pid;
515 procp->uid = params.uid;
516 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700517 proc_insert(procp);
518 } else {
519 proc_unslot(procp);
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800520 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700521 proc_slot(procp);
522 }
523}
524
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800525static void cmd_procremove(LMKD_CTRL_PACKET packet) {
526 struct lmk_procremove params;
527
Todd Poynor3948f802013-07-09 19:35:14 -0700528 if (use_inkernel_interface)
529 return;
530
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800531 lmkd_pack_get_procremove(packet, &params);
Rajeev Kumar244ace62018-10-05 12:34:59 -0700532 /*
533 * WARNING: After pid_remove() procp is freed and can't be used!
534 * Therefore placed at the end of the function.
535 */
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800536 pid_remove(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700537}
538
Suren Baghdasaryanc89be172018-10-10 14:17:17 -0700539static void cmd_procpurge() {
540 int i;
541 struct proc *procp;
542 struct proc *next;
543
544 if (use_inkernel_interface) {
545 return;
546 }
547
548 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
549 procadjslot_list[i].next = &procadjslot_list[i];
550 procadjslot_list[i].prev = &procadjslot_list[i];
551 }
552
553 for (i = 0; i < PIDHASH_SZ; i++) {
554 procp = pidhash[i];
555 while (procp) {
556 next = procp->pidhash_next;
557 free(procp);
558 procp = next;
559 }
560 }
561 memset(&pidhash[0], 0, sizeof(pidhash));
562}
563
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800564static void cmd_target(int ntargets, LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700565 int i;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800566 struct lmk_target target;
Todd Poynor3948f802013-07-09 19:35:14 -0700567
568 if (ntargets > (int)ARRAY_SIZE(lowmem_adj))
569 return;
570
571 for (i = 0; i < ntargets; i++) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800572 lmkd_pack_get_target(packet, i, &target);
573 lowmem_minfree[i] = target.minfree;
574 lowmem_adj[i] = target.oom_adj_score;
Todd Poynor3948f802013-07-09 19:35:14 -0700575 }
576
577 lowmem_targets_size = ntargets;
578
Robert Benea164baeb2017-09-11 16:53:28 -0700579 if (has_inkernel_module) {
Todd Poynor3948f802013-07-09 19:35:14 -0700580 char minfreestr[128];
581 char killpriostr[128];
582
583 minfreestr[0] = '\0';
584 killpriostr[0] = '\0';
585
586 for (i = 0; i < lowmem_targets_size; i++) {
587 char val[40];
588
589 if (i) {
590 strlcat(minfreestr, ",", sizeof(minfreestr));
591 strlcat(killpriostr, ",", sizeof(killpriostr));
592 }
593
Robert Benea164baeb2017-09-11 16:53:28 -0700594 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_minfree[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700595 strlcat(minfreestr, val, sizeof(minfreestr));
Robert Benea164baeb2017-09-11 16:53:28 -0700596 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_adj[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700597 strlcat(killpriostr, val, sizeof(killpriostr));
598 }
599
600 writefilestring(INKERNEL_MINFREE_PATH, minfreestr);
601 writefilestring(INKERNEL_ADJ_PATH, killpriostr);
602 }
603}
604
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800605static void ctrl_data_close(int dsock_idx) {
606 struct epoll_event epev;
607
608 ALOGI("closing lmkd data connection");
609 if (epoll_ctl(epollfd, EPOLL_CTL_DEL, data_sock[dsock_idx].sock, &epev) == -1) {
610 // Log a warning and keep going
611 ALOGW("epoll_ctl for data connection socket failed; errno=%d", errno);
612 }
Todd Poynor3948f802013-07-09 19:35:14 -0700613 maxevents--;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800614
615 close(data_sock[dsock_idx].sock);
616 data_sock[dsock_idx].sock = -1;
Todd Poynor3948f802013-07-09 19:35:14 -0700617}
618
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800619static int ctrl_data_read(int dsock_idx, char *buf, size_t bufsz) {
Todd Poynor3948f802013-07-09 19:35:14 -0700620 int ret = 0;
621
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700622 ret = TEMP_FAILURE_RETRY(read(data_sock[dsock_idx].sock, buf, bufsz));
Todd Poynor3948f802013-07-09 19:35:14 -0700623
624 if (ret == -1) {
625 ALOGE("control data socket read failed; errno=%d", errno);
626 } else if (ret == 0) {
627 ALOGE("Got EOF on control data socket");
628 ret = -1;
629 }
630
631 return ret;
632}
633
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800634static void ctrl_command_handler(int dsock_idx) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800635 LMKD_CTRL_PACKET packet;
Todd Poynor3948f802013-07-09 19:35:14 -0700636 int len;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800637 enum lmk_cmd cmd;
Todd Poynor3948f802013-07-09 19:35:14 -0700638 int nargs;
639 int targets;
640
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800641 len = ctrl_data_read(dsock_idx, (char *)packet, CTRL_PACKET_MAX_SIZE);
Todd Poynor3948f802013-07-09 19:35:14 -0700642 if (len <= 0)
643 return;
644
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800645 if (len < (int)sizeof(int)) {
646 ALOGE("Wrong control socket read length len=%d", len);
647 return;
648 }
649
650 cmd = lmkd_pack_get_cmd(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700651 nargs = len / sizeof(int) - 1;
652 if (nargs < 0)
653 goto wronglen;
654
Todd Poynor3948f802013-07-09 19:35:14 -0700655 switch(cmd) {
656 case LMK_TARGET:
657 targets = nargs / 2;
658 if (nargs & 0x1 || targets > (int)ARRAY_SIZE(lowmem_adj))
659 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800660 cmd_target(targets, packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700661 break;
662 case LMK_PROCPRIO:
Colin Crossfbb78c62014-06-13 14:52:43 -0700663 if (nargs != 3)
Todd Poynor3948f802013-07-09 19:35:14 -0700664 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800665 cmd_procprio(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700666 break;
667 case LMK_PROCREMOVE:
668 if (nargs != 1)
669 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800670 cmd_procremove(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700671 break;
Suren Baghdasaryanc89be172018-10-10 14:17:17 -0700672 case LMK_PROCPURGE:
673 if (nargs != 0)
674 goto wronglen;
675 cmd_procpurge();
676 break;
Todd Poynor3948f802013-07-09 19:35:14 -0700677 default:
678 ALOGE("Received unknown command code %d", cmd);
679 return;
680 }
681
682 return;
683
684wronglen:
685 ALOGE("Wrong control socket read length cmd=%d len=%d", cmd, len);
686}
687
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800688static void ctrl_data_handler(int data, uint32_t events) {
689 if (events & EPOLLIN) {
690 ctrl_command_handler(data);
Todd Poynor3948f802013-07-09 19:35:14 -0700691 }
692}
693
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800694static int get_free_dsock() {
695 for (int i = 0; i < MAX_DATA_CONN; i++) {
696 if (data_sock[i].sock < 0) {
697 return i;
698 }
699 }
700 return -1;
701}
Todd Poynor3948f802013-07-09 19:35:14 -0700702
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800703static void ctrl_connect_handler(int data __unused, uint32_t events __unused) {
704 struct epoll_event epev;
705 int free_dscock_idx = get_free_dsock();
706
707 if (free_dscock_idx < 0) {
708 /*
709 * Number of data connections exceeded max supported. This should not
710 * happen but if it does we drop all existing connections and accept
711 * the new one. This prevents inactive connections from monopolizing
712 * data socket and if we drop ActivityManager connection it will
713 * immediately reconnect.
714 */
715 for (int i = 0; i < MAX_DATA_CONN; i++) {
716 ctrl_data_close(i);
717 }
718 free_dscock_idx = 0;
Todd Poynor3948f802013-07-09 19:35:14 -0700719 }
720
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800721 data_sock[free_dscock_idx].sock = accept(ctrl_sock.sock, NULL, NULL);
722 if (data_sock[free_dscock_idx].sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700723 ALOGE("lmkd control socket accept failed; errno=%d", errno);
724 return;
725 }
726
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800727 ALOGI("lmkd data connection established");
728 /* use data to store data connection idx */
729 data_sock[free_dscock_idx].handler_info.data = free_dscock_idx;
730 data_sock[free_dscock_idx].handler_info.handler = ctrl_data_handler;
Todd Poynor3948f802013-07-09 19:35:14 -0700731 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800732 epev.data.ptr = (void *)&(data_sock[free_dscock_idx].handler_info);
733 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, data_sock[free_dscock_idx].sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -0700734 ALOGE("epoll_ctl for data connection socket failed; errno=%d", errno);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800735 ctrl_data_close(free_dscock_idx);
Todd Poynor3948f802013-07-09 19:35:14 -0700736 return;
737 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800738 maxevents++;
Todd Poynor3948f802013-07-09 19:35:14 -0700739}
740
Rajeev Kumar70450032018-01-31 17:54:56 -0800741#ifdef LMKD_LOG_STATS
Rajeev Kumar244ace62018-10-05 12:34:59 -0700742static void memory_stat_parse_line(char* line, struct memory_stat* mem_st) {
Greg Kaiserd6d84712018-03-23 14:16:12 -0700743 char key[LINE_MAX + 1];
Rajeev Kumar70450032018-01-31 17:54:56 -0800744 int64_t value;
745
Greg Kaiserd6d84712018-03-23 14:16:12 -0700746 sscanf(line, "%" STRINGIFY(LINE_MAX) "s %" SCNd64 "", key, &value);
Rajeev Kumar70450032018-01-31 17:54:56 -0800747
748 if (strcmp(key, "total_") < 0) {
749 return;
750 }
751
752 if (!strcmp(key, "total_pgfault"))
753 mem_st->pgfault = value;
754 else if (!strcmp(key, "total_pgmajfault"))
755 mem_st->pgmajfault = value;
756 else if (!strcmp(key, "total_rss"))
757 mem_st->rss_in_bytes = value;
758 else if (!strcmp(key, "total_cache"))
759 mem_st->cache_in_bytes = value;
760 else if (!strcmp(key, "total_swap"))
761 mem_st->swap_in_bytes = value;
762}
763
Rajeev Kumar244ace62018-10-05 12:34:59 -0700764static int memory_stat_from_cgroup(struct memory_stat* mem_st, int pid, uid_t uid) {
765 FILE* fp;
766 char buf[PATH_MAX];
Rajeev Kumar70450032018-01-31 17:54:56 -0800767
Rajeev Kumar244ace62018-10-05 12:34:59 -0700768 snprintf(buf, sizeof(buf), MEMCG_PROCESS_MEMORY_STAT_PATH, uid, pid);
Rajeev Kumar70450032018-01-31 17:54:56 -0800769
Rajeev Kumar244ace62018-10-05 12:34:59 -0700770 fp = fopen(buf, "r");
Rajeev Kumar70450032018-01-31 17:54:56 -0800771
Rajeev Kumar244ace62018-10-05 12:34:59 -0700772 if (fp == NULL) {
773 ALOGE("%s open failed: %s", buf, strerror(errno));
774 return -1;
775 }
Rajeev Kumar70450032018-01-31 17:54:56 -0800776
Rajeev Kumar244ace62018-10-05 12:34:59 -0700777 while (fgets(buf, PAGE_SIZE, fp) != NULL) {
778 memory_stat_parse_line(buf, mem_st);
779 }
780 fclose(fp);
Rajeev Kumar70450032018-01-31 17:54:56 -0800781
Rajeev Kumar244ace62018-10-05 12:34:59 -0700782 return 0;
783}
784
785static int memory_stat_from_procfs(struct memory_stat* mem_st, int pid) {
786 char path[PATH_MAX];
787 char buffer[PROC_STAT_BUFFER_SIZE];
788 int fd, ret;
789
790 snprintf(path, sizeof(path), PROC_STAT_FILE_PATH, pid);
791 if ((fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
792 ALOGE("%s open failed: %s", path, strerror(errno));
793 return -1;
794 }
795
796 ret = read(fd, buffer, sizeof(buffer));
797 if (ret < 0) {
798 ALOGE("%s read failed: %s", path, strerror(errno));
799 close(fd);
800 return -1;
801 }
802 close(fd);
803
804 // field 10 is pgfault
805 // field 12 is pgmajfault
806 // field 24 is rss_in_pages
807 int64_t pgfault = 0, pgmajfault = 0, rss_in_pages = 0;
808 if (sscanf(buffer,
809 "%*u %*s %*s %*d %*d %*d %*d %*d %*d %" SCNd64 " %*d "
810 "%" SCNd64 " %*d %*u %*u %*d %*d %*d %*d %*d %*d "
811 "%*d %*d %" SCNd64 "",
812 &pgfault, &pgmajfault, &rss_in_pages) != 3) {
813 return -1;
814 }
815 mem_st->pgfault = pgfault;
816 mem_st->pgmajfault = pgmajfault;
817 mem_st->rss_in_bytes = (rss_in_pages * PAGE_SIZE);
818
819 return 0;
Rajeev Kumar70450032018-01-31 17:54:56 -0800820}
821#endif
822
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700823/* /prop/zoneinfo parsing routines */
824static int64_t zoneinfo_parse_protection(char *cp) {
825 int64_t max = 0;
826 long long zoneval;
827 char *save_ptr;
828
829 for (cp = strtok_r(cp, "(), ", &save_ptr); cp;
830 cp = strtok_r(NULL, "), ", &save_ptr)) {
831 zoneval = strtoll(cp, &cp, 0);
832 if (zoneval > max) {
833 max = (zoneval > INT64_MAX) ? INT64_MAX : zoneval;
834 }
835 }
836
837 return max;
838}
839
840static bool zoneinfo_parse_line(char *line, union zoneinfo *zi) {
841 char *cp = line;
842 char *ap;
843 char *save_ptr;
844 int64_t val;
845 int field_idx;
846
847 cp = strtok_r(line, " ", &save_ptr);
848 if (!cp) {
849 return true;
850 }
851
852 if (!strcmp(cp, "protection:")) {
853 ap = strtok_r(NULL, ")", &save_ptr);
854 } else {
855 ap = strtok_r(NULL, " ", &save_ptr);
856 }
857
858 if (!ap) {
859 return true;
860 }
861
862 switch (match_field(cp, ap, zoneinfo_field_names,
863 ZI_FIELD_COUNT, &val, &field_idx)) {
864 case (PARSE_SUCCESS):
865 zi->arr[field_idx] += val;
866 break;
867 case (NO_MATCH):
868 if (!strcmp(cp, "protection:")) {
869 zi->field.totalreserve_pages +=
870 zoneinfo_parse_protection(ap);
871 }
872 break;
873 case (PARSE_FAIL):
874 default:
875 return false;
876 }
877 return true;
878}
879
880static int zoneinfo_parse(union zoneinfo *zi) {
881 static struct reread_data file_data = {
882 .filename = ZONEINFO_PATH,
883 .fd = -1,
884 };
885 char buf[PAGE_SIZE];
886 char *save_ptr;
887 char *line;
888
889 memset(zi, 0, sizeof(union zoneinfo));
890
891 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
892 return -1;
893 }
894
895 for (line = strtok_r(buf, "\n", &save_ptr); line;
896 line = strtok_r(NULL, "\n", &save_ptr)) {
897 if (!zoneinfo_parse_line(line, zi)) {
898 ALOGE("%s parse error", file_data.filename);
899 return -1;
900 }
901 }
902 zi->field.totalreserve_pages += zi->field.high;
903
904 return 0;
905}
906
907/* /prop/meminfo parsing routines */
908static bool meminfo_parse_line(char *line, union meminfo *mi) {
909 char *cp = line;
910 char *ap;
911 char *save_ptr;
912 int64_t val;
913 int field_idx;
914 enum field_match_result match_res;
915
916 cp = strtok_r(line, " ", &save_ptr);
917 if (!cp) {
918 return false;
919 }
920
921 ap = strtok_r(NULL, " ", &save_ptr);
922 if (!ap) {
923 return false;
924 }
925
926 match_res = match_field(cp, ap, meminfo_field_names, MI_FIELD_COUNT,
927 &val, &field_idx);
928 if (match_res == PARSE_SUCCESS) {
929 mi->arr[field_idx] = val / page_k;
930 }
931 return (match_res != PARSE_FAIL);
932}
933
934static int meminfo_parse(union meminfo *mi) {
935 static struct reread_data file_data = {
936 .filename = MEMINFO_PATH,
937 .fd = -1,
938 };
939 char buf[PAGE_SIZE];
940 char *save_ptr;
941 char *line;
942
943 memset(mi, 0, sizeof(union meminfo));
944
945 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
946 return -1;
947 }
948
949 for (line = strtok_r(buf, "\n", &save_ptr); line;
950 line = strtok_r(NULL, "\n", &save_ptr)) {
951 if (!meminfo_parse_line(line, mi)) {
952 ALOGE("%s parse error", file_data.filename);
953 return -1;
954 }
955 }
956 mi->field.nr_file_pages = mi->field.cached + mi->field.swap_cached +
957 mi->field.buffers;
958
959 return 0;
960}
961
Todd Poynor3948f802013-07-09 19:35:14 -0700962static int proc_get_size(int pid) {
963 char path[PATH_MAX];
964 char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700965 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700966 int rss = 0;
967 int total;
Colin Crossce85d952014-07-11 17:53:27 -0700968 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700969
970 snprintf(path, PATH_MAX, "/proc/%d/statm", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800971 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700972 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700973 return -1;
Colin Crossce85d952014-07-11 17:53:27 -0700974
975 ret = read_all(fd, line, sizeof(line) - 1);
976 if (ret < 0) {
977 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700978 return -1;
979 }
980
981 sscanf(line, "%d %d ", &total, &rss);
Colin Crossce85d952014-07-11 17:53:27 -0700982 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700983 return rss;
984}
985
986static char *proc_get_name(int pid) {
987 char path[PATH_MAX];
988 static char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700989 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700990 char *cp;
Colin Crossce85d952014-07-11 17:53:27 -0700991 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700992
993 snprintf(path, PATH_MAX, "/proc/%d/cmdline", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800994 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700995 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700996 return NULL;
Colin Crossce85d952014-07-11 17:53:27 -0700997 ret = read_all(fd, line, sizeof(line) - 1);
998 close(fd);
999 if (ret < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001000 return NULL;
1001 }
1002
1003 cp = strchr(line, ' ');
1004 if (cp)
1005 *cp = '\0';
1006
1007 return line;
1008}
1009
1010static struct proc *proc_adj_lru(int oomadj) {
1011 return (struct proc *)adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
1012}
1013
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001014static struct proc *proc_get_heaviest(int oomadj) {
1015 struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
1016 struct adjslot_list *curr = head->next;
1017 struct proc *maxprocp = NULL;
1018 int maxsize = 0;
1019 while (curr != head) {
1020 int pid = ((struct proc *)curr)->pid;
1021 int tasksize = proc_get_size(pid);
1022 if (tasksize <= 0) {
1023 struct adjslot_list *next = curr->next;
1024 pid_remove(pid);
1025 curr = next;
1026 } else {
1027 if (tasksize > maxsize) {
1028 maxsize = tasksize;
1029 maxprocp = (struct proc *)curr;
1030 }
1031 curr = curr->next;
1032 }
1033 }
1034 return maxprocp;
1035}
1036
Wei Wang6f2198b2018-11-21 00:11:44 -08001037static void set_process_group_and_prio(int pid, SchedPolicy sp, int prio) {
1038 DIR* d;
1039 char proc_path[PATH_MAX];
1040 struct dirent* de;
1041
1042 snprintf(proc_path, sizeof(proc_path), "/proc/%d/task", pid);
1043 if (!(d = opendir(proc_path))) {
1044 ALOGW("Failed to open %s; errno=%d: process pid(%d) might have died", proc_path, errno, pid);
1045 return;
1046 }
1047
1048 while ((de = readdir(d))) {
1049 int t_pid;
1050
1051 if (de->d_name[0] == '.') continue;
1052 t_pid = atoi(de->d_name);
1053
1054 if (!t_pid) {
1055 ALOGW("Failed to get t_pid for '%s' of pid(%d)", de->d_name, pid);
1056 continue;
1057 }
1058
1059 if (setpriority(PRIO_PROCESS, t_pid, prio) && errno != ESRCH) {
1060 ALOGW("Unable to raise priority of killing t_pid (%d): errno=%d", t_pid, errno);
1061 }
1062
1063 if (set_cpuset_policy(t_pid, sp)) {
1064 ALOGW("Failed to set_cpuset_policy on pid(%d) t_pid(%d) to %d", pid, t_pid, (int)sp);
1065 continue;
1066 }
1067 }
1068 closedir(d);
1069}
1070
Tim Murrayafb3a152018-10-25 17:05:41 -07001071static int last_killed_pid = -1;
1072
Colin Cross16b09462014-07-14 12:39:56 -07001073/* Kill one process specified by procp. Returns the size of the process killed */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001074static int kill_one_process(struct proc* procp) {
Colin Cross16b09462014-07-14 12:39:56 -07001075 int pid = procp->pid;
1076 uid_t uid = procp->uid;
1077 char *taskname;
1078 int tasksize;
1079 int r;
Rajeev Kumar244ace62018-10-05 12:34:59 -07001080 int result = -1;
Colin Cross16b09462014-07-14 12:39:56 -07001081
Rajeev Kumar70450032018-01-31 17:54:56 -08001082#ifdef LMKD_LOG_STATS
Rajeev Kumar92b659b2018-02-21 19:08:15 -08001083 struct memory_stat mem_st = {};
Rajeev Kumar70450032018-01-31 17:54:56 -08001084 int memory_stat_parse_result = -1;
1085#endif
1086
Colin Cross16b09462014-07-14 12:39:56 -07001087 taskname = proc_get_name(pid);
1088 if (!taskname) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001089 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001090 }
1091
1092 tasksize = proc_get_size(pid);
1093 if (tasksize <= 0) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001094 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001095 }
1096
Rajeev Kumar70450032018-01-31 17:54:56 -08001097#ifdef LMKD_LOG_STATS
1098 if (enable_stats_log) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001099 if (per_app_memcg) {
1100 memory_stat_parse_result = memory_stat_from_cgroup(&mem_st, pid, uid);
1101 } else {
1102 memory_stat_parse_result = memory_stat_from_procfs(&mem_st, pid);
1103 }
Rajeev Kumar70450032018-01-31 17:54:56 -08001104 }
1105#endif
1106
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001107 TRACE_KILL_START(pid);
1108
Rajeev Kumar244ace62018-10-05 12:34:59 -07001109 /* CAP_KILL required */
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001110 r = kill(pid, SIGKILL);
Wei Wang6f2198b2018-11-21 00:11:44 -08001111
1112 set_process_group_and_prio(pid, SP_FOREGROUND, ANDROID_PRIORITY_HIGHEST);
1113
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001114 ALOGI("Kill '%s' (%d), uid %d, oom_adj %d to free %ldkB",
1115 taskname, pid, uid, procp->oomadj, tasksize * page_k);
Colin Cross16b09462014-07-14 12:39:56 -07001116
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001117 TRACE_KILL_END();
1118
Tim Murrayafb3a152018-10-25 17:05:41 -07001119 last_killed_pid = pid;
1120
Colin Cross16b09462014-07-14 12:39:56 -07001121 if (r) {
Mark Salyzyn919f5382018-02-04 15:27:23 -08001122 ALOGE("kill(%d): errno=%d", pid, errno);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001123 goto out;
Rajeev Kumar70450032018-01-31 17:54:56 -08001124 } else {
1125#ifdef LMKD_LOG_STATS
1126 if (memory_stat_parse_result == 0) {
1127 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname,
1128 procp->oomadj, mem_st.pgfault, mem_st.pgmajfault, mem_st.rss_in_bytes,
1129 mem_st.cache_in_bytes, mem_st.swap_in_bytes);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001130 } else if (enable_stats_log) {
1131 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname, procp->oomadj,
1132 -1, -1, tasksize * BYTES_IN_KILOBYTE, -1, -1);
Rajeev Kumar70450032018-01-31 17:54:56 -08001133 }
1134#endif
Rajeev Kumar244ace62018-10-05 12:34:59 -07001135 result = tasksize;
Colin Cross16b09462014-07-14 12:39:56 -07001136 }
Mark Salyzyn919f5382018-02-04 15:27:23 -08001137
Rajeev Kumar244ace62018-10-05 12:34:59 -07001138out:
1139 /*
1140 * WARNING: After pid_remove() procp is freed and can't be used!
1141 * Therefore placed at the end of the function.
1142 */
1143 pid_remove(pid);
1144 return result;
Colin Cross16b09462014-07-14 12:39:56 -07001145}
1146
1147/*
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001148 * Find processes to kill to free required number of pages.
1149 * If pages_to_free is set to 0 only one process will be killed.
1150 * Returns the size of the killed processes.
Colin Cross16b09462014-07-14 12:39:56 -07001151 */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001152static int find_and_kill_processes(int min_score_adj, int pages_to_free) {
Colin Cross16b09462014-07-14 12:39:56 -07001153 int i;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001154 int killed_size;
1155 int pages_freed = 0;
Colin Cross16b09462014-07-14 12:39:56 -07001156
Rajeev Kumar70450032018-01-31 17:54:56 -08001157#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001158 bool lmk_state_change_start = false;
Rajeev Kumar70450032018-01-31 17:54:56 -08001159#endif
1160
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001161 for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) {
Colin Cross16b09462014-07-14 12:39:56 -07001162 struct proc *procp;
1163
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001164 while (true) {
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001165 procp = kill_heaviest_task ?
1166 proc_get_heaviest(i) : proc_adj_lru(i);
Colin Cross16b09462014-07-14 12:39:56 -07001167
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001168 if (!procp)
1169 break;
1170
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001171 killed_size = kill_one_process(procp);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001172 if (killed_size >= 0) {
Yang Lu5564f4e2018-05-15 04:59:44 +00001173#ifdef LMKD_LOG_STATS
1174 if (enable_stats_log && !lmk_state_change_start) {
1175 lmk_state_change_start = true;
1176 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1177 LMK_STATE_CHANGE_START);
1178 }
1179#endif
1180
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001181 pages_freed += killed_size;
1182 if (pages_freed >= pages_to_free) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001183
1184#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001185 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001186 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1187 LMK_STATE_CHANGE_STOP);
1188 }
1189#endif
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001190 return pages_freed;
1191 }
Colin Cross16b09462014-07-14 12:39:56 -07001192 }
1193 }
1194 }
1195
Rajeev Kumar70450032018-01-31 17:54:56 -08001196#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001197 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001198 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED, LMK_STATE_CHANGE_STOP);
1199 }
1200#endif
1201
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001202 return pages_freed;
Colin Cross16b09462014-07-14 12:39:56 -07001203}
1204
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001205static int64_t get_memory_usage(struct reread_data *file_data) {
Robert Beneac47f2992017-08-21 15:18:31 -07001206 int ret;
1207 int64_t mem_usage;
1208 char buf[32];
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001209
1210 if (reread_file(file_data, buf, sizeof(buf)) < 0) {
Robert Beneac47f2992017-08-21 15:18:31 -07001211 return -1;
1212 }
1213
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001214 if (!parse_int64(buf, &mem_usage)) {
1215 ALOGE("%s parse error", file_data->filename);
Robert Beneac47f2992017-08-21 15:18:31 -07001216 return -1;
1217 }
Robert Beneac47f2992017-08-21 15:18:31 -07001218 if (mem_usage == 0) {
1219 ALOGE("No memory!");
1220 return -1;
1221 }
1222 return mem_usage;
1223}
1224
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001225void record_low_pressure_levels(union meminfo *mi) {
1226 if (low_pressure_mem.min_nr_free_pages == -1 ||
1227 low_pressure_mem.min_nr_free_pages > mi->field.nr_free_pages) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001228 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001229 ALOGI("Low pressure min memory update from %" PRId64 " to %" PRId64,
1230 low_pressure_mem.min_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001231 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001232 low_pressure_mem.min_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001233 }
1234 /*
1235 * Free memory at low vmpressure events occasionally gets spikes,
1236 * possibly a stale low vmpressure event with memory already
1237 * freed up (no memory pressure should have been reported).
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001238 * Ignore large jumps in max_nr_free_pages that would mess up our stats.
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001239 */
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001240 if (low_pressure_mem.max_nr_free_pages == -1 ||
1241 (low_pressure_mem.max_nr_free_pages < mi->field.nr_free_pages &&
1242 mi->field.nr_free_pages - low_pressure_mem.max_nr_free_pages <
1243 low_pressure_mem.max_nr_free_pages * 0.1)) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001244 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001245 ALOGI("Low pressure max memory update from %" PRId64 " to %" PRId64,
1246 low_pressure_mem.max_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001247 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001248 low_pressure_mem.max_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001249 }
1250}
1251
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001252enum vmpressure_level upgrade_level(enum vmpressure_level level) {
1253 return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ?
1254 level + 1 : level);
1255}
1256
1257enum vmpressure_level downgrade_level(enum vmpressure_level level) {
1258 return (enum vmpressure_level)((level > VMPRESS_LEVEL_LOW) ?
1259 level - 1 : level);
1260}
1261
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001262static inline unsigned long get_time_diff_ms(struct timeval *from,
1263 struct timeval *to) {
1264 return (to->tv_sec - from->tv_sec) * 1000 +
1265 (to->tv_usec - from->tv_usec) / 1000;
1266}
1267
Tim Murrayafb3a152018-10-25 17:05:41 -07001268static bool is_kill_pending(void) {
1269 char buf[24];
1270 if (last_killed_pid < 0) {
1271 return false;
1272 }
1273
1274 snprintf(buf, sizeof(buf), "/proc/%d/", last_killed_pid);
1275 if (access(buf, F_OK) == 0) {
1276 return true;
1277 }
1278
1279 // reset last killed PID because there's nothing pending
1280 last_killed_pid = -1;
1281 return false;
1282}
1283
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001284static void mp_event_common(int data, uint32_t events __unused) {
Todd Poynor3948f802013-07-09 19:35:14 -07001285 int ret;
1286 unsigned long long evcount;
Robert Beneac47f2992017-08-21 15:18:31 -07001287 int64_t mem_usage, memsw_usage;
Robert Benea6e8e7102017-09-13 15:20:30 -07001288 int64_t mem_pressure;
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001289 enum vmpressure_level lvl;
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001290 union meminfo mi;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001291 union zoneinfo zi;
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001292 struct timeval curr_tm;
1293 static struct timeval last_kill_tm;
1294 static unsigned long kill_skip_count = 0;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001295 enum vmpressure_level level = (enum vmpressure_level)data;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001296 long other_free = 0, other_file = 0;
1297 int min_score_adj;
1298 int pages_to_free = 0;
1299 int minfree = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001300 static struct reread_data mem_usage_file_data = {
1301 .filename = MEMCG_MEMORY_USAGE,
1302 .fd = -1,
1303 };
1304 static struct reread_data memsw_usage_file_data = {
1305 .filename = MEMCG_MEMORYSW_USAGE,
1306 .fd = -1,
1307 };
Todd Poynor3948f802013-07-09 19:35:14 -07001308
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001309 /*
1310 * Check all event counters from low to critical
1311 * and upgrade to the highest priority one. By reading
1312 * eventfd we also reset the event counters.
1313 */
1314 for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
1315 if (mpevfd[lvl] != -1 &&
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001316 TEMP_FAILURE_RETRY(read(mpevfd[lvl],
1317 &evcount, sizeof(evcount))) > 0 &&
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001318 evcount > 0 && lvl > level) {
1319 level = lvl;
1320 }
1321 }
Todd Poynor3948f802013-07-09 19:35:14 -07001322
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001323 gettimeofday(&curr_tm, NULL);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001324 if (kill_timeout_ms) {
Tim Murrayafb3a152018-10-25 17:05:41 -07001325 // If we're within the timeout, see if there's pending reclaim work
1326 // from the last killed process. If there is (as evidenced by
1327 // /proc/<pid> continuing to exist), skip killing for now.
1328 if ((get_time_diff_ms(&last_kill_tm, &curr_tm) < kill_timeout_ms) &&
1329 (low_ram_device || is_kill_pending())) {
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001330 kill_skip_count++;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001331 return;
1332 }
1333 }
1334
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001335 if (kill_skip_count > 0) {
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001336 ALOGI("%lu memory pressure events were skipped after a kill!",
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001337 kill_skip_count);
1338 kill_skip_count = 0;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001339 }
1340
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001341 if (meminfo_parse(&mi) < 0 || zoneinfo_parse(&zi) < 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001342 ALOGE("Failed to get free memory!");
1343 return;
1344 }
1345
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001346 if (use_minfree_levels) {
1347 int i;
1348
1349 other_free = mi.field.nr_free_pages - zi.field.totalreserve_pages;
1350 if (mi.field.nr_file_pages > (mi.field.shmem + mi.field.unevictable + mi.field.swap_cached)) {
1351 other_file = (mi.field.nr_file_pages - mi.field.shmem -
1352 mi.field.unevictable - mi.field.swap_cached);
1353 } else {
1354 other_file = 0;
1355 }
1356
1357 min_score_adj = OOM_SCORE_ADJ_MAX + 1;
1358 for (i = 0; i < lowmem_targets_size; i++) {
1359 minfree = lowmem_minfree[i];
1360 if (other_free < minfree && other_file < minfree) {
1361 min_score_adj = lowmem_adj[i];
1362 break;
1363 }
1364 }
1365
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001366 if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
1367 if (debug_process_killing) {
1368 ALOGI("Ignore %s memory pressure event "
1369 "(free memory=%ldkB, cache=%ldkB, limit=%ldkB)",
1370 level_name[level], other_free * page_k, other_file * page_k,
1371 (long)lowmem_minfree[lowmem_targets_size - 1] * page_k);
1372 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001373 return;
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001374 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001375
1376 /* Free up enough pages to push over the highest minfree level */
1377 pages_to_free = lowmem_minfree[lowmem_targets_size - 1] -
1378 ((other_free < other_file) ? other_free : other_file);
1379 goto do_kill;
1380 }
1381
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001382 if (level == VMPRESS_LEVEL_LOW) {
1383 record_low_pressure_levels(&mi);
1384 }
1385
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001386 if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) {
1387 /* Do not monitor this pressure level */
1388 return;
1389 }
1390
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001391 if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
1392 goto do_kill;
1393 }
1394 if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001395 goto do_kill;
Robert Benea6e8e7102017-09-13 15:20:30 -07001396 }
Robert Beneac47f2992017-08-21 15:18:31 -07001397
Robert Benea6e8e7102017-09-13 15:20:30 -07001398 // Calculate percent for swappinness.
1399 mem_pressure = (mem_usage * 100) / memsw_usage;
1400
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001401 if (enable_pressure_upgrade && level != VMPRESS_LEVEL_CRITICAL) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001402 // We are swapping too much.
1403 if (mem_pressure < upgrade_pressure) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001404 level = upgrade_level(level);
1405 if (debug_process_killing) {
1406 ALOGI("Event upgraded to %s", level_name[level]);
1407 }
Robert Beneac47f2992017-08-21 15:18:31 -07001408 }
1409 }
1410
Robert Benea6e8e7102017-09-13 15:20:30 -07001411 // If the pressure is larger than downgrade_pressure lmk will not
1412 // kill any process, since enough memory is available.
1413 if (mem_pressure > downgrade_pressure) {
1414 if (debug_process_killing) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001415 ALOGI("Ignore %s memory pressure", level_name[level]);
Robert Benea6e8e7102017-09-13 15:20:30 -07001416 }
1417 return;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001418 } else if (level == VMPRESS_LEVEL_CRITICAL &&
1419 mem_pressure > upgrade_pressure) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001420 if (debug_process_killing) {
1421 ALOGI("Downgrade critical memory pressure");
1422 }
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001423 // Downgrade event, since enough memory available.
1424 level = downgrade_level(level);
Robert Benea6e8e7102017-09-13 15:20:30 -07001425 }
1426
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001427do_kill:
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001428 if (low_ram_device) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001429 /* For Go devices kill only one task */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001430 if (find_and_kill_processes(level_oomadj[level], 0) == 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001431 if (debug_process_killing) {
1432 ALOGI("Nothing to kill");
1433 }
1434 }
1435 } else {
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001436 int pages_freed;
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001437 static struct timeval last_report_tm;
1438 static unsigned long report_skip_count = 0;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001439
1440 if (!use_minfree_levels) {
1441 /* If pressure level is less than critical and enough free swap then ignore */
1442 if (level < VMPRESS_LEVEL_CRITICAL &&
1443 mi.field.free_swap > low_pressure_mem.max_nr_free_pages) {
1444 if (debug_process_killing) {
1445 ALOGI("Ignoring pressure since %" PRId64
1446 " swap pages are available ",
1447 mi.field.free_swap);
1448 }
1449 return;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001450 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001451 /* Free up enough memory to downgrate the memory pressure to low level */
1452 if (mi.field.nr_free_pages < low_pressure_mem.max_nr_free_pages) {
1453 pages_to_free = low_pressure_mem.max_nr_free_pages -
1454 mi.field.nr_free_pages;
1455 } else {
1456 if (debug_process_killing) {
1457 ALOGI("Ignoring pressure since more memory is "
1458 "available (%" PRId64 ") than watermark (%" PRId64 ")",
1459 mi.field.nr_free_pages, low_pressure_mem.max_nr_free_pages);
1460 }
1461 return;
1462 }
1463 min_score_adj = level_oomadj[level];
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001464 }
1465
Tim Murrayafb3a152018-10-25 17:05:41 -07001466 pages_freed = find_and_kill_processes(min_score_adj, 0);
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001467
1468 if (pages_freed == 0) {
1469 /* Rate limit kill reports when nothing was reclaimed */
1470 if (get_time_diff_ms(&last_report_tm, &curr_tm) < FAIL_REPORT_RLIMIT_MS) {
1471 report_skip_count++;
1472 return;
1473 }
Tim Murrayafb3a152018-10-25 17:05:41 -07001474 } else {
1475 /* If we killed anything, update the last killed timestamp. */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001476 last_kill_tm = curr_tm;
1477 }
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001478
1479 if (use_minfree_levels) {
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001480 ALOGI("Killing to reclaim %ldkB, reclaimed %ldkB, cache(%ldkB) and "
1481 "free(%" PRId64 "kB)-reserved(%" PRId64 "kB) below min(%ldkB) for oom_adj %d",
1482 pages_to_free * page_k, pages_freed * page_k,
1483 other_file * page_k, mi.field.nr_free_pages * page_k,
1484 zi.field.totalreserve_pages * page_k,
1485 minfree * page_k, min_score_adj);
1486 } else {
1487 ALOGI("Killing to reclaim %ldkB, reclaimed %ldkB at oom_adj %d",
1488 pages_to_free * page_k, pages_freed * page_k, min_score_adj);
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001489 }
1490
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001491 if (report_skip_count > 0) {
1492 ALOGI("Suppressed %lu failed kill reports", report_skip_count);
1493 report_skip_count = 0;
Robert Beneacaeaa652017-08-11 16:03:20 -07001494 }
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001495
1496 last_report_tm = curr_tm;
Colin Crossf8857cc2014-07-11 17:16:56 -07001497 }
Todd Poynor3948f802013-07-09 19:35:14 -07001498}
1499
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001500static bool init_mp_common(enum vmpressure_level level) {
Todd Poynor3948f802013-07-09 19:35:14 -07001501 int mpfd;
1502 int evfd;
1503 int evctlfd;
1504 char buf[256];
1505 struct epoll_event epev;
1506 int ret;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001507 int level_idx = (int)level;
1508 const char *levelstr = level_name[level_idx];
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001509
Nick Kralevichc68c8862015-12-18 20:52:37 -08001510 mpfd = open(MEMCG_SYSFS_PATH "memory.pressure_level", O_RDONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001511 if (mpfd < 0) {
1512 ALOGI("No kernel memory.pressure_level support (errno=%d)", errno);
1513 goto err_open_mpfd;
1514 }
1515
Nick Kralevichc68c8862015-12-18 20:52:37 -08001516 evctlfd = open(MEMCG_SYSFS_PATH "cgroup.event_control", O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001517 if (evctlfd < 0) {
1518 ALOGI("No kernel memory cgroup event control (errno=%d)", errno);
1519 goto err_open_evctlfd;
1520 }
1521
Nick Kralevichc68c8862015-12-18 20:52:37 -08001522 evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001523 if (evfd < 0) {
1524 ALOGE("eventfd failed for level %s; errno=%d", levelstr, errno);
1525 goto err_eventfd;
1526 }
1527
1528 ret = snprintf(buf, sizeof(buf), "%d %d %s", evfd, mpfd, levelstr);
1529 if (ret >= (ssize_t)sizeof(buf)) {
1530 ALOGE("cgroup.event_control line overflow for level %s", levelstr);
1531 goto err;
1532 }
1533
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001534 ret = TEMP_FAILURE_RETRY(write(evctlfd, buf, strlen(buf) + 1));
Todd Poynor3948f802013-07-09 19:35:14 -07001535 if (ret == -1) {
1536 ALOGE("cgroup.event_control write failed for level %s; errno=%d",
1537 levelstr, errno);
1538 goto err;
1539 }
1540
1541 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001542 /* use data to store event level */
1543 vmpressure_hinfo[level_idx].data = level_idx;
1544 vmpressure_hinfo[level_idx].handler = mp_event_common;
1545 epev.data.ptr = (void *)&vmpressure_hinfo[level_idx];
Todd Poynor3948f802013-07-09 19:35:14 -07001546 ret = epoll_ctl(epollfd, EPOLL_CTL_ADD, evfd, &epev);
1547 if (ret == -1) {
1548 ALOGE("epoll_ctl for level %s failed; errno=%d", levelstr, errno);
1549 goto err;
1550 }
1551 maxevents++;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001552 mpevfd[level] = evfd;
Suren Baghdasaryan1bd2fc42018-01-04 08:54:53 -08001553 close(evctlfd);
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001554 return true;
Todd Poynor3948f802013-07-09 19:35:14 -07001555
1556err:
1557 close(evfd);
1558err_eventfd:
1559 close(evctlfd);
1560err_open_evctlfd:
1561 close(mpfd);
1562err_open_mpfd:
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001563 return false;
Robert Benea673e2762017-06-01 16:32:31 -07001564}
1565
Todd Poynor3948f802013-07-09 19:35:14 -07001566static int init(void) {
1567 struct epoll_event epev;
1568 int i;
1569 int ret;
1570
1571 page_k = sysconf(_SC_PAGESIZE);
1572 if (page_k == -1)
1573 page_k = PAGE_SIZE;
1574 page_k /= 1024;
1575
1576 epollfd = epoll_create(MAX_EPOLL_EVENTS);
1577 if (epollfd == -1) {
1578 ALOGE("epoll_create failed (errno=%d)", errno);
1579 return -1;
1580 }
1581
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001582 // mark data connections as not connected
1583 for (int i = 0; i < MAX_DATA_CONN; i++) {
1584 data_sock[i].sock = -1;
1585 }
1586
1587 ctrl_sock.sock = android_get_control_socket("lmkd");
1588 if (ctrl_sock.sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001589 ALOGE("get lmkd control socket failed");
1590 return -1;
1591 }
1592
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001593 ret = listen(ctrl_sock.sock, MAX_DATA_CONN);
Todd Poynor3948f802013-07-09 19:35:14 -07001594 if (ret < 0) {
1595 ALOGE("lmkd control socket listen failed (errno=%d)", errno);
1596 return -1;
1597 }
1598
1599 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001600 ctrl_sock.handler_info.handler = ctrl_connect_handler;
1601 epev.data.ptr = (void *)&(ctrl_sock.handler_info);
1602 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ctrl_sock.sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07001603 ALOGE("epoll_ctl for lmkd control socket failed (errno=%d)", errno);
1604 return -1;
1605 }
1606 maxevents++;
1607
Robert Benea164baeb2017-09-11 16:53:28 -07001608 has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK);
Suren Baghdasaryan979591b2018-01-18 17:27:30 -08001609 use_inkernel_interface = has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -07001610
1611 if (use_inkernel_interface) {
1612 ALOGI("Using in-kernel low memory killer interface");
1613 } else {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001614 if (!init_mp_common(VMPRESS_LEVEL_LOW) ||
1615 !init_mp_common(VMPRESS_LEVEL_MEDIUM) ||
1616 !init_mp_common(VMPRESS_LEVEL_CRITICAL)) {
Todd Poynor3948f802013-07-09 19:35:14 -07001617 ALOGE("Kernel does not support memory pressure events or in-kernel low memory killer");
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001618 return -1;
1619 }
Todd Poynor3948f802013-07-09 19:35:14 -07001620 }
1621
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001622 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
Todd Poynor3948f802013-07-09 19:35:14 -07001623 procadjslot_list[i].next = &procadjslot_list[i];
1624 procadjslot_list[i].prev = &procadjslot_list[i];
1625 }
1626
1627 return 0;
1628}
1629
1630static void mainloop(void) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001631 struct event_handler_info* handler_info;
1632 struct epoll_event *evt;
1633
Todd Poynor3948f802013-07-09 19:35:14 -07001634 while (1) {
1635 struct epoll_event events[maxevents];
1636 int nevents;
1637 int i;
1638
Todd Poynor3948f802013-07-09 19:35:14 -07001639 nevents = epoll_wait(epollfd, events, maxevents, -1);
1640
1641 if (nevents == -1) {
1642 if (errno == EINTR)
1643 continue;
1644 ALOGE("epoll_wait failed (errno=%d)", errno);
1645 continue;
1646 }
1647
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001648 /*
1649 * First pass to see if any data socket connections were dropped.
1650 * Dropped connection should be handled before any other events
1651 * to deallocate data connection and correctly handle cases when
1652 * connection gets dropped and reestablished in the same epoll cycle.
1653 * In such cases it's essential to handle connection closures first.
1654 */
1655 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1656 if ((evt->events & EPOLLHUP) && evt->data.ptr) {
1657 ALOGI("lmkd data connection dropped");
1658 handler_info = (struct event_handler_info*)evt->data.ptr;
1659 ctrl_data_close(handler_info->data);
1660 }
1661 }
1662
1663 /* Second pass to handle all other events */
1664 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1665 if (evt->events & EPOLLERR)
Todd Poynor3948f802013-07-09 19:35:14 -07001666 ALOGD("EPOLLERR on event #%d", i);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001667 if (evt->events & EPOLLHUP) {
1668 /* This case was handled in the first pass */
1669 continue;
1670 }
1671 if (evt->data.ptr) {
1672 handler_info = (struct event_handler_info*)evt->data.ptr;
1673 handler_info->handler(handler_info->data, evt->events);
1674 }
Todd Poynor3948f802013-07-09 19:35:14 -07001675 }
1676 }
1677}
1678
Mark Salyzyne6ed68b2014-04-30 13:36:35 -07001679int main(int argc __unused, char **argv __unused) {
Colin Cross1a0d9be2014-07-14 14:31:15 -07001680 struct sched_param param = {
1681 .sched_priority = 1,
1682 };
1683
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001684 /* By default disable low level vmpressure events */
1685 level_oomadj[VMPRESS_LEVEL_LOW] =
1686 property_get_int32("ro.lmk.low", OOM_SCORE_ADJ_MAX + 1);
1687 level_oomadj[VMPRESS_LEVEL_MEDIUM] =
1688 property_get_int32("ro.lmk.medium", 800);
1689 level_oomadj[VMPRESS_LEVEL_CRITICAL] =
1690 property_get_int32("ro.lmk.critical", 0);
Robert Beneacaeaa652017-08-11 16:03:20 -07001691 debug_process_killing = property_get_bool("ro.lmk.debug", false);
Suren Baghdasaryanad2fd912017-12-08 13:08:41 -08001692
1693 /* By default disable upgrade/downgrade logic */
1694 enable_pressure_upgrade =
1695 property_get_bool("ro.lmk.critical_upgrade", false);
1696 upgrade_pressure =
1697 (int64_t)property_get_int32("ro.lmk.upgrade_pressure", 100);
1698 downgrade_pressure =
1699 (int64_t)property_get_int32("ro.lmk.downgrade_pressure", 100);
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001700 kill_heaviest_task =
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001701 property_get_bool("ro.lmk.kill_heaviest_task", false);
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001702 low_ram_device = property_get_bool("ro.config.low_ram", false);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001703 kill_timeout_ms =
1704 (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001705 use_minfree_levels =
1706 property_get_bool("ro.lmk.use_minfree_levels", false);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001707 per_app_memcg = property_get_bool("ro.config.per_app_memcg", low_ram_device);
Rajeev Kumar70450032018-01-31 17:54:56 -08001708#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001709 statslog_init(&log_ctx, &enable_stats_log);
Rajeev Kumar70450032018-01-31 17:54:56 -08001710#endif
1711
Daniel Colascioned39adf22018-01-05 14:59:55 -08001712 // MCL_ONFAULT pins pages as they fault instead of loading
1713 // everything immediately all at once. (Which would be bad,
1714 // because as of this writing, we have a lot of mapped pages we
1715 // never use.) Old kernels will see MCL_ONFAULT and fail with
1716 // EINVAL; we ignore this failure.
1717 //
1718 // N.B. read the man page for mlockall. MCL_CURRENT | MCL_ONFAULT
1719 // pins ⊆ MCL_CURRENT, converging to just MCL_CURRENT as we fault
1720 // in pages.
1721 if (mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT) && errno != EINVAL)
Daniel Colascione4dd5d002018-01-03 12:01:02 -08001722 ALOGW("mlockall failed: errno=%d", errno);
1723
Colin Cross1a0d9be2014-07-14 14:31:15 -07001724 sched_setscheduler(0, SCHED_FIFO, &param);
Todd Poynor3948f802013-07-09 19:35:14 -07001725 if (!init())
1726 mainloop();
1727
Rajeev Kumar70450032018-01-31 17:54:56 -08001728#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001729 statslog_destroy(&log_ctx);
Rajeev Kumar70450032018-01-31 17:54:56 -08001730#endif
1731
Todd Poynor3948f802013-07-09 19:35:14 -07001732 ALOGI("exiting");
1733 return 0;
1734}