blob: ef1bf10eaaf78e1ba714f8483c133469c4158b88 [file] [log] [blame]
Todd Poynor3948f802013-07-09 19:35:14 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "lowmemorykiller"
18
19#include <errno.h>
Robert Beneac47f2992017-08-21 15:18:31 -070020#include <inttypes.h>
Mark Salyzyncfd5b082016-10-17 14:28:00 -070021#include <sched.h>
Todd Poynor3948f802013-07-09 19:35:14 -070022#include <signal.h>
Todd Poynor3948f802013-07-09 19:35:14 -070023#include <stdlib.h>
24#include <string.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070025#include <sys/cdefs.h>
Todd Poynor3948f802013-07-09 19:35:14 -070026#include <sys/epoll.h>
27#include <sys/eventfd.h>
Colin Crossb28ff912014-07-11 17:15:44 -070028#include <sys/mman.h>
Todd Poynor3948f802013-07-09 19:35:14 -070029#include <sys/socket.h>
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -080030#include <sys/sysinfo.h>
Tim Murrayafb3a152018-10-25 17:05:41 -070031#include <sys/types.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070032#include <unistd.h>
33
Robert Benea58891d52017-07-31 17:15:20 -070034#include <cutils/properties.h>
Todd Poynor3948f802013-07-09 19:35:14 -070035#include <cutils/sockets.h>
Suren Baghdasaryan0f100512018-01-24 16:51:41 -080036#include <lmkd.h>
Mark Salyzyn30f991f2017-01-10 13:19:54 -080037#include <log/log.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070038
Rajeev Kumar70450032018-01-31 17:54:56 -080039#ifdef LMKD_LOG_STATS
Yao Chen389aee12018-05-02 11:19:27 -070040#include "statslog.h"
Rajeev Kumar70450032018-01-31 17:54:56 -080041#endif
42
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080043/*
44 * Define LMKD_TRACE_KILLS to record lmkd kills in kernel traces
45 * to profile and correlate with OOM kills
46 */
47#ifdef LMKD_TRACE_KILLS
48
49#define ATRACE_TAG ATRACE_TAG_ALWAYS
50#include <cutils/trace.h>
51
52#define TRACE_KILL_START(pid) ATRACE_INT(__FUNCTION__, pid);
53#define TRACE_KILL_END() ATRACE_INT(__FUNCTION__, 0);
54
55#else /* LMKD_TRACE_KILLS */
56
Daniel Colascione347f6b42018-02-12 11:24:47 -080057#define TRACE_KILL_START(pid) ((void)(pid))
58#define TRACE_KILL_END() ((void)0)
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080059
60#endif /* LMKD_TRACE_KILLS */
61
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070062#ifndef __unused
63#define __unused __attribute__((__unused__))
64#endif
Todd Poynor3948f802013-07-09 19:35:14 -070065
66#define MEMCG_SYSFS_PATH "/dev/memcg/"
Robert Beneac47f2992017-08-21 15:18:31 -070067#define MEMCG_MEMORY_USAGE "/dev/memcg/memory.usage_in_bytes"
68#define MEMCG_MEMORYSW_USAGE "/dev/memcg/memory.memsw.usage_in_bytes"
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -070069#define ZONEINFO_PATH "/proc/zoneinfo"
70#define MEMINFO_PATH "/proc/meminfo"
Todd Poynor3948f802013-07-09 19:35:14 -070071#define LINE_MAX 128
72
73#define INKERNEL_MINFREE_PATH "/sys/module/lowmemorykiller/parameters/minfree"
74#define INKERNEL_ADJ_PATH "/sys/module/lowmemorykiller/parameters/adj"
75
76#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
Robert Benea673e2762017-06-01 16:32:31 -070077#define EIGHT_MEGA (1 << 23)
Todd Poynor3948f802013-07-09 19:35:14 -070078
Greg Kaiserd6d84712018-03-23 14:16:12 -070079#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
80#define STRINGIFY_INTERNAL(x) #x
81
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -070082#define FAIL_REPORT_RLIMIT_MS 1000
83
Todd Poynor3948f802013-07-09 19:35:14 -070084/* default to old in-kernel interface if no memory pressure events */
85static int use_inkernel_interface = 1;
Robert Benea164baeb2017-09-11 16:53:28 -070086static bool has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -070087
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080088/* memory pressure levels */
89enum vmpressure_level {
90 VMPRESS_LEVEL_LOW = 0,
91 VMPRESS_LEVEL_MEDIUM,
92 VMPRESS_LEVEL_CRITICAL,
93 VMPRESS_LEVEL_COUNT
94};
Todd Poynor3948f802013-07-09 19:35:14 -070095
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080096static const char *level_name[] = {
97 "low",
98 "medium",
99 "critical"
100};
101
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800102struct {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -0700103 int64_t min_nr_free_pages; /* recorded but not used yet */
104 int64_t max_nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800105} low_pressure_mem = { -1, -1 };
106
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800107static int level_oomadj[VMPRESS_LEVEL_COUNT];
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -0800108static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 };
Robert Beneac47f2992017-08-21 15:18:31 -0700109static bool debug_process_killing;
110static bool enable_pressure_upgrade;
111static int64_t upgrade_pressure;
Robert Benea6e8e7102017-09-13 15:20:30 -0700112static int64_t downgrade_pressure;
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -0700113static bool low_ram_device;
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800114static bool kill_heaviest_task;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -0800115static unsigned long kill_timeout_ms;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -0700116static bool use_minfree_levels;
Rajeev Kumar244ace62018-10-05 12:34:59 -0700117static bool per_app_memcg;
Robert Benea58891d52017-07-31 17:15:20 -0700118
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800119/* data required to handle events */
120struct event_handler_info {
121 int data;
122 void (*handler)(int data, uint32_t events);
123};
Todd Poynor3948f802013-07-09 19:35:14 -0700124
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800125/* data required to handle socket events */
126struct sock_event_handler_info {
127 int sock;
128 struct event_handler_info handler_info;
129};
130
131/* max supported number of data connections */
132#define MAX_DATA_CONN 2
133
134/* socket event handler data */
135static struct sock_event_handler_info ctrl_sock;
136static struct sock_event_handler_info data_sock[MAX_DATA_CONN];
137
138/* vmpressure event handler data */
139static struct event_handler_info vmpressure_hinfo[VMPRESS_LEVEL_COUNT];
140
141/* 3 memory pressure levels, 1 ctrl listen socket, 2 ctrl data socket */
142#define MAX_EPOLL_EVENTS (1 + MAX_DATA_CONN + VMPRESS_LEVEL_COUNT)
Todd Poynor3948f802013-07-09 19:35:14 -0700143static int epollfd;
144static int maxevents;
145
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700146/* OOM score values used by both kernel and framework */
Todd Poynor16b60992013-09-16 19:26:47 -0700147#define OOM_SCORE_ADJ_MIN (-1000)
148#define OOM_SCORE_ADJ_MAX 1000
149
Todd Poynor3948f802013-07-09 19:35:14 -0700150static int lowmem_adj[MAX_TARGETS];
151static int lowmem_minfree[MAX_TARGETS];
152static int lowmem_targets_size;
153
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700154/* Fields to parse in /proc/zoneinfo */
155enum zoneinfo_field {
156 ZI_NR_FREE_PAGES = 0,
157 ZI_NR_FILE_PAGES,
158 ZI_NR_SHMEM,
159 ZI_NR_UNEVICTABLE,
160 ZI_WORKINGSET_REFAULT,
161 ZI_HIGH,
162 ZI_FIELD_COUNT
163};
164
165static const char* const zoneinfo_field_names[ZI_FIELD_COUNT] = {
166 "nr_free_pages",
167 "nr_file_pages",
168 "nr_shmem",
169 "nr_unevictable",
170 "workingset_refault",
171 "high",
172};
173
174union zoneinfo {
175 struct {
176 int64_t nr_free_pages;
177 int64_t nr_file_pages;
178 int64_t nr_shmem;
179 int64_t nr_unevictable;
180 int64_t workingset_refault;
181 int64_t high;
182 /* fields below are calculated rather than read from the file */
183 int64_t totalreserve_pages;
184 } field;
185 int64_t arr[ZI_FIELD_COUNT];
186};
187
188/* Fields to parse in /proc/meminfo */
189enum meminfo_field {
190 MI_NR_FREE_PAGES = 0,
191 MI_CACHED,
192 MI_SWAP_CACHED,
193 MI_BUFFERS,
194 MI_SHMEM,
195 MI_UNEVICTABLE,
196 MI_FREE_SWAP,
197 MI_DIRTY,
198 MI_FIELD_COUNT
199};
200
201static const char* const meminfo_field_names[MI_FIELD_COUNT] = {
202 "MemFree:",
203 "Cached:",
204 "SwapCached:",
205 "Buffers:",
206 "Shmem:",
207 "Unevictable:",
208 "SwapFree:",
209 "Dirty:",
210};
211
212union meminfo {
213 struct {
214 int64_t nr_free_pages;
215 int64_t cached;
216 int64_t swap_cached;
217 int64_t buffers;
218 int64_t shmem;
219 int64_t unevictable;
220 int64_t free_swap;
221 int64_t dirty;
222 /* fields below are calculated rather than read from the file */
223 int64_t nr_file_pages;
224 } field;
225 int64_t arr[MI_FIELD_COUNT];
226};
227
228enum field_match_result {
229 NO_MATCH,
230 PARSE_FAIL,
231 PARSE_SUCCESS
232};
233
Todd Poynor3948f802013-07-09 19:35:14 -0700234struct adjslot_list {
235 struct adjslot_list *next;
236 struct adjslot_list *prev;
237};
238
239struct proc {
240 struct adjslot_list asl;
241 int pid;
Colin Crossfbb78c62014-06-13 14:52:43 -0700242 uid_t uid;
Todd Poynor3948f802013-07-09 19:35:14 -0700243 int oomadj;
244 struct proc *pidhash_next;
245};
246
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700247struct reread_data {
248 const char* const filename;
249 int fd;
250};
251
Rajeev Kumar70450032018-01-31 17:54:56 -0800252#ifdef LMKD_LOG_STATS
Rajeev Kumar70450032018-01-31 17:54:56 -0800253static bool enable_stats_log;
254static android_log_context log_ctx;
255#endif
256
Todd Poynor3948f802013-07-09 19:35:14 -0700257#define PIDHASH_SZ 1024
258static struct proc *pidhash[PIDHASH_SZ];
259#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
260
Chih-Hung Hsiehdaa13ea2016-05-19 16:02:22 -0700261#define ADJTOSLOT(adj) ((adj) + -OOM_SCORE_ADJ_MIN)
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700262static struct adjslot_list procadjslot_list[ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1];
Todd Poynor3948f802013-07-09 19:35:14 -0700263
Todd Poynor3948f802013-07-09 19:35:14 -0700264/* PAGE_SIZE / 1024 */
265static long page_k;
266
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700267static bool parse_int64(const char* str, int64_t* ret) {
268 char* endptr;
269 long long val = strtoll(str, &endptr, 10);
270 if (str == endptr || val > INT64_MAX) {
271 return false;
272 }
273 *ret = (int64_t)val;
274 return true;
275}
276
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700277static enum field_match_result match_field(const char* cp, const char* ap,
278 const char* const field_names[],
279 int field_count, int64_t* field,
280 int *field_idx) {
281 int64_t val;
282 int i;
283
284 for (i = 0; i < field_count; i++) {
285 if (!strcmp(cp, field_names[i])) {
286 *field_idx = i;
287 return parse_int64(ap, field) ? PARSE_SUCCESS : PARSE_FAIL;
288 }
289 }
290 return NO_MATCH;
291}
292
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700293/*
294 * Read file content from the beginning up to max_len bytes or EOF
295 * whichever happens first.
296 */
Colin Crossce85d952014-07-11 17:53:27 -0700297static ssize_t read_all(int fd, char *buf, size_t max_len)
298{
299 ssize_t ret = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700300 off_t offset = 0;
Colin Crossce85d952014-07-11 17:53:27 -0700301
302 while (max_len > 0) {
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700303 ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
Colin Crossce85d952014-07-11 17:53:27 -0700304 if (r == 0) {
305 break;
306 }
307 if (r == -1) {
308 return -1;
309 }
310 ret += r;
311 buf += r;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700312 offset += r;
Colin Crossce85d952014-07-11 17:53:27 -0700313 max_len -= r;
314 }
315
316 return ret;
317}
318
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700319/*
320 * Read a new or already opened file from the beginning.
321 * If the file has not been opened yet data->fd should be set to -1.
322 * To be used with files which are read often and possibly during high
323 * memory pressure to minimize file opening which by itself requires kernel
324 * memory allocation and might result in a stall on memory stressed system.
325 */
326static int reread_file(struct reread_data *data, char *buf, size_t buf_size) {
327 ssize_t size;
328
329 if (data->fd == -1) {
330 data->fd = open(data->filename, O_RDONLY | O_CLOEXEC);
331 if (data->fd == -1) {
332 ALOGE("%s open: %s", data->filename, strerror(errno));
333 return -1;
334 }
335 }
336
337 size = read_all(data->fd, buf, buf_size - 1);
338 if (size < 0) {
339 ALOGE("%s read: %s", data->filename, strerror(errno));
340 close(data->fd);
341 data->fd = -1;
342 return -1;
343 }
344 ALOG_ASSERT((size_t)size < buf_size - 1, data->filename " too large");
345 buf[size] = 0;
346
347 return 0;
348}
349
Todd Poynor3948f802013-07-09 19:35:14 -0700350static struct proc *pid_lookup(int pid) {
351 struct proc *procp;
352
353 for (procp = pidhash[pid_hashfn(pid)]; procp && procp->pid != pid;
354 procp = procp->pidhash_next)
355 ;
356
357 return procp;
358}
359
360static void adjslot_insert(struct adjslot_list *head, struct adjslot_list *new)
361{
362 struct adjslot_list *next = head->next;
363 new->prev = head;
364 new->next = next;
365 next->prev = new;
366 head->next = new;
367}
368
369static void adjslot_remove(struct adjslot_list *old)
370{
371 struct adjslot_list *prev = old->prev;
372 struct adjslot_list *next = old->next;
373 next->prev = prev;
374 prev->next = next;
375}
376
377static struct adjslot_list *adjslot_tail(struct adjslot_list *head) {
378 struct adjslot_list *asl = head->prev;
379
380 return asl == head ? NULL : asl;
381}
382
383static void proc_slot(struct proc *procp) {
384 int adjslot = ADJTOSLOT(procp->oomadj);
385
386 adjslot_insert(&procadjslot_list[adjslot], &procp->asl);
387}
388
389static void proc_unslot(struct proc *procp) {
390 adjslot_remove(&procp->asl);
391}
392
393static void proc_insert(struct proc *procp) {
394 int hval = pid_hashfn(procp->pid);
395
396 procp->pidhash_next = pidhash[hval];
397 pidhash[hval] = procp;
398 proc_slot(procp);
399}
400
401static int pid_remove(int pid) {
402 int hval = pid_hashfn(pid);
403 struct proc *procp;
404 struct proc *prevp;
405
406 for (procp = pidhash[hval], prevp = NULL; procp && procp->pid != pid;
407 procp = procp->pidhash_next)
408 prevp = procp;
409
410 if (!procp)
411 return -1;
412
413 if (!prevp)
414 pidhash[hval] = procp->pidhash_next;
415 else
416 prevp->pidhash_next = procp->pidhash_next;
417
418 proc_unslot(procp);
419 free(procp);
420 return 0;
421}
422
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800423static void writefilestring(const char *path, char *s) {
Nick Kralevichc68c8862015-12-18 20:52:37 -0800424 int fd = open(path, O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -0700425 int len = strlen(s);
426 int ret;
427
428 if (fd < 0) {
429 ALOGE("Error opening %s; errno=%d", path, errno);
430 return;
431 }
432
433 ret = write(fd, s, len);
434 if (ret < 0) {
435 ALOGE("Error writing %s; errno=%d", path, errno);
436 } else if (ret < len) {
437 ALOGE("Short write on %s; length=%d", path, ret);
438 }
439
440 close(fd);
441}
442
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800443static void cmd_procprio(LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700444 struct proc *procp;
445 char path[80];
446 char val[20];
Robert Benea673e2762017-06-01 16:32:31 -0700447 int soft_limit_mult;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800448 struct lmk_procprio params;
Todd Poynor3948f802013-07-09 19:35:14 -0700449
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800450 lmkd_pack_get_procprio(packet, &params);
451
452 if (params.oomadj < OOM_SCORE_ADJ_MIN ||
453 params.oomadj > OOM_SCORE_ADJ_MAX) {
454 ALOGE("Invalid PROCPRIO oomadj argument %d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700455 return;
456 }
457
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800458 snprintf(path, sizeof(path), "/proc/%d/oom_score_adj", params.pid);
459 snprintf(val, sizeof(val), "%d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700460 writefilestring(path, val);
461
462 if (use_inkernel_interface)
463 return;
464
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700465 if (low_ram_device) {
466 if (params.oomadj >= 900) {
467 soft_limit_mult = 0;
468 } else if (params.oomadj >= 800) {
469 soft_limit_mult = 0;
470 } else if (params.oomadj >= 700) {
471 soft_limit_mult = 0;
472 } else if (params.oomadj >= 600) {
473 // Launcher should be perceptible, don't kill it.
474 params.oomadj = 200;
475 soft_limit_mult = 1;
476 } else if (params.oomadj >= 500) {
477 soft_limit_mult = 0;
478 } else if (params.oomadj >= 400) {
479 soft_limit_mult = 0;
480 } else if (params.oomadj >= 300) {
481 soft_limit_mult = 1;
482 } else if (params.oomadj >= 200) {
483 soft_limit_mult = 2;
484 } else if (params.oomadj >= 100) {
485 soft_limit_mult = 10;
486 } else if (params.oomadj >= 0) {
487 soft_limit_mult = 20;
488 } else {
489 // Persistent processes will have a large
490 // soft limit 512MB.
491 soft_limit_mult = 64;
492 }
Robert Benea673e2762017-06-01 16:32:31 -0700493
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700494 snprintf(path, sizeof(path),
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800495 "/dev/memcg/apps/uid_%d/pid_%d/memory.soft_limit_in_bytes",
496 params.uid, params.pid);
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700497 snprintf(val, sizeof(val), "%d", soft_limit_mult * EIGHT_MEGA);
498 writefilestring(path, val);
499 }
Robert Benea673e2762017-06-01 16:32:31 -0700500
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800501 procp = pid_lookup(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700502 if (!procp) {
503 procp = malloc(sizeof(struct proc));
504 if (!procp) {
505 // Oh, the irony. May need to rebuild our state.
506 return;
507 }
508
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800509 procp->pid = params.pid;
510 procp->uid = params.uid;
511 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700512 proc_insert(procp);
513 } else {
514 proc_unslot(procp);
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800515 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700516 proc_slot(procp);
517 }
518}
519
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800520static void cmd_procremove(LMKD_CTRL_PACKET packet) {
521 struct lmk_procremove params;
522
Todd Poynor3948f802013-07-09 19:35:14 -0700523 if (use_inkernel_interface)
524 return;
525
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800526 lmkd_pack_get_procremove(packet, &params);
Rajeev Kumar244ace62018-10-05 12:34:59 -0700527 /*
528 * WARNING: After pid_remove() procp is freed and can't be used!
529 * Therefore placed at the end of the function.
530 */
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800531 pid_remove(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700532}
533
Suren Baghdasaryanc89be172018-10-10 14:17:17 -0700534static void cmd_procpurge() {
535 int i;
536 struct proc *procp;
537 struct proc *next;
538
539 if (use_inkernel_interface) {
540 return;
541 }
542
543 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
544 procadjslot_list[i].next = &procadjslot_list[i];
545 procadjslot_list[i].prev = &procadjslot_list[i];
546 }
547
548 for (i = 0; i < PIDHASH_SZ; i++) {
549 procp = pidhash[i];
550 while (procp) {
551 next = procp->pidhash_next;
552 free(procp);
553 procp = next;
554 }
555 }
556 memset(&pidhash[0], 0, sizeof(pidhash));
557}
558
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800559static void cmd_target(int ntargets, LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700560 int i;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800561 struct lmk_target target;
Todd Poynor3948f802013-07-09 19:35:14 -0700562
563 if (ntargets > (int)ARRAY_SIZE(lowmem_adj))
564 return;
565
566 for (i = 0; i < ntargets; i++) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800567 lmkd_pack_get_target(packet, i, &target);
568 lowmem_minfree[i] = target.minfree;
569 lowmem_adj[i] = target.oom_adj_score;
Todd Poynor3948f802013-07-09 19:35:14 -0700570 }
571
572 lowmem_targets_size = ntargets;
573
Robert Benea164baeb2017-09-11 16:53:28 -0700574 if (has_inkernel_module) {
Todd Poynor3948f802013-07-09 19:35:14 -0700575 char minfreestr[128];
576 char killpriostr[128];
577
578 minfreestr[0] = '\0';
579 killpriostr[0] = '\0';
580
581 for (i = 0; i < lowmem_targets_size; i++) {
582 char val[40];
583
584 if (i) {
585 strlcat(minfreestr, ",", sizeof(minfreestr));
586 strlcat(killpriostr, ",", sizeof(killpriostr));
587 }
588
Robert Benea164baeb2017-09-11 16:53:28 -0700589 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_minfree[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700590 strlcat(minfreestr, val, sizeof(minfreestr));
Robert Benea164baeb2017-09-11 16:53:28 -0700591 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_adj[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700592 strlcat(killpriostr, val, sizeof(killpriostr));
593 }
594
595 writefilestring(INKERNEL_MINFREE_PATH, minfreestr);
596 writefilestring(INKERNEL_ADJ_PATH, killpriostr);
597 }
598}
599
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800600static void ctrl_data_close(int dsock_idx) {
601 struct epoll_event epev;
602
603 ALOGI("closing lmkd data connection");
604 if (epoll_ctl(epollfd, EPOLL_CTL_DEL, data_sock[dsock_idx].sock, &epev) == -1) {
605 // Log a warning and keep going
606 ALOGW("epoll_ctl for data connection socket failed; errno=%d", errno);
607 }
Todd Poynor3948f802013-07-09 19:35:14 -0700608 maxevents--;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800609
610 close(data_sock[dsock_idx].sock);
611 data_sock[dsock_idx].sock = -1;
Todd Poynor3948f802013-07-09 19:35:14 -0700612}
613
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800614static int ctrl_data_read(int dsock_idx, char *buf, size_t bufsz) {
Todd Poynor3948f802013-07-09 19:35:14 -0700615 int ret = 0;
616
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700617 ret = TEMP_FAILURE_RETRY(read(data_sock[dsock_idx].sock, buf, bufsz));
Todd Poynor3948f802013-07-09 19:35:14 -0700618
619 if (ret == -1) {
620 ALOGE("control data socket read failed; errno=%d", errno);
621 } else if (ret == 0) {
622 ALOGE("Got EOF on control data socket");
623 ret = -1;
624 }
625
626 return ret;
627}
628
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800629static void ctrl_command_handler(int dsock_idx) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800630 LMKD_CTRL_PACKET packet;
Todd Poynor3948f802013-07-09 19:35:14 -0700631 int len;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800632 enum lmk_cmd cmd;
Todd Poynor3948f802013-07-09 19:35:14 -0700633 int nargs;
634 int targets;
635
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800636 len = ctrl_data_read(dsock_idx, (char *)packet, CTRL_PACKET_MAX_SIZE);
Todd Poynor3948f802013-07-09 19:35:14 -0700637 if (len <= 0)
638 return;
639
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800640 if (len < (int)sizeof(int)) {
641 ALOGE("Wrong control socket read length len=%d", len);
642 return;
643 }
644
645 cmd = lmkd_pack_get_cmd(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700646 nargs = len / sizeof(int) - 1;
647 if (nargs < 0)
648 goto wronglen;
649
Todd Poynor3948f802013-07-09 19:35:14 -0700650 switch(cmd) {
651 case LMK_TARGET:
652 targets = nargs / 2;
653 if (nargs & 0x1 || targets > (int)ARRAY_SIZE(lowmem_adj))
654 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800655 cmd_target(targets, packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700656 break;
657 case LMK_PROCPRIO:
Colin Crossfbb78c62014-06-13 14:52:43 -0700658 if (nargs != 3)
Todd Poynor3948f802013-07-09 19:35:14 -0700659 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800660 cmd_procprio(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700661 break;
662 case LMK_PROCREMOVE:
663 if (nargs != 1)
664 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800665 cmd_procremove(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700666 break;
Suren Baghdasaryanc89be172018-10-10 14:17:17 -0700667 case LMK_PROCPURGE:
668 if (nargs != 0)
669 goto wronglen;
670 cmd_procpurge();
671 break;
Todd Poynor3948f802013-07-09 19:35:14 -0700672 default:
673 ALOGE("Received unknown command code %d", cmd);
674 return;
675 }
676
677 return;
678
679wronglen:
680 ALOGE("Wrong control socket read length cmd=%d len=%d", cmd, len);
681}
682
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800683static void ctrl_data_handler(int data, uint32_t events) {
684 if (events & EPOLLIN) {
685 ctrl_command_handler(data);
Todd Poynor3948f802013-07-09 19:35:14 -0700686 }
687}
688
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800689static int get_free_dsock() {
690 for (int i = 0; i < MAX_DATA_CONN; i++) {
691 if (data_sock[i].sock < 0) {
692 return i;
693 }
694 }
695 return -1;
696}
Todd Poynor3948f802013-07-09 19:35:14 -0700697
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800698static void ctrl_connect_handler(int data __unused, uint32_t events __unused) {
699 struct epoll_event epev;
700 int free_dscock_idx = get_free_dsock();
701
702 if (free_dscock_idx < 0) {
703 /*
704 * Number of data connections exceeded max supported. This should not
705 * happen but if it does we drop all existing connections and accept
706 * the new one. This prevents inactive connections from monopolizing
707 * data socket and if we drop ActivityManager connection it will
708 * immediately reconnect.
709 */
710 for (int i = 0; i < MAX_DATA_CONN; i++) {
711 ctrl_data_close(i);
712 }
713 free_dscock_idx = 0;
Todd Poynor3948f802013-07-09 19:35:14 -0700714 }
715
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800716 data_sock[free_dscock_idx].sock = accept(ctrl_sock.sock, NULL, NULL);
717 if (data_sock[free_dscock_idx].sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700718 ALOGE("lmkd control socket accept failed; errno=%d", errno);
719 return;
720 }
721
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800722 ALOGI("lmkd data connection established");
723 /* use data to store data connection idx */
724 data_sock[free_dscock_idx].handler_info.data = free_dscock_idx;
725 data_sock[free_dscock_idx].handler_info.handler = ctrl_data_handler;
Todd Poynor3948f802013-07-09 19:35:14 -0700726 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800727 epev.data.ptr = (void *)&(data_sock[free_dscock_idx].handler_info);
728 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, data_sock[free_dscock_idx].sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -0700729 ALOGE("epoll_ctl for data connection socket failed; errno=%d", errno);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800730 ctrl_data_close(free_dscock_idx);
Todd Poynor3948f802013-07-09 19:35:14 -0700731 return;
732 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800733 maxevents++;
Todd Poynor3948f802013-07-09 19:35:14 -0700734}
735
Rajeev Kumar70450032018-01-31 17:54:56 -0800736#ifdef LMKD_LOG_STATS
Rajeev Kumar244ace62018-10-05 12:34:59 -0700737static void memory_stat_parse_line(char* line, struct memory_stat* mem_st) {
Greg Kaiserd6d84712018-03-23 14:16:12 -0700738 char key[LINE_MAX + 1];
Rajeev Kumar70450032018-01-31 17:54:56 -0800739 int64_t value;
740
Greg Kaiserd6d84712018-03-23 14:16:12 -0700741 sscanf(line, "%" STRINGIFY(LINE_MAX) "s %" SCNd64 "", key, &value);
Rajeev Kumar70450032018-01-31 17:54:56 -0800742
743 if (strcmp(key, "total_") < 0) {
744 return;
745 }
746
747 if (!strcmp(key, "total_pgfault"))
748 mem_st->pgfault = value;
749 else if (!strcmp(key, "total_pgmajfault"))
750 mem_st->pgmajfault = value;
751 else if (!strcmp(key, "total_rss"))
752 mem_st->rss_in_bytes = value;
753 else if (!strcmp(key, "total_cache"))
754 mem_st->cache_in_bytes = value;
755 else if (!strcmp(key, "total_swap"))
756 mem_st->swap_in_bytes = value;
757}
758
Rajeev Kumar244ace62018-10-05 12:34:59 -0700759static int memory_stat_from_cgroup(struct memory_stat* mem_st, int pid, uid_t uid) {
760 FILE* fp;
761 char buf[PATH_MAX];
Rajeev Kumar70450032018-01-31 17:54:56 -0800762
Rajeev Kumar244ace62018-10-05 12:34:59 -0700763 snprintf(buf, sizeof(buf), MEMCG_PROCESS_MEMORY_STAT_PATH, uid, pid);
Rajeev Kumar70450032018-01-31 17:54:56 -0800764
Rajeev Kumar244ace62018-10-05 12:34:59 -0700765 fp = fopen(buf, "r");
Rajeev Kumar70450032018-01-31 17:54:56 -0800766
Rajeev Kumar244ace62018-10-05 12:34:59 -0700767 if (fp == NULL) {
768 ALOGE("%s open failed: %s", buf, strerror(errno));
769 return -1;
770 }
Rajeev Kumar70450032018-01-31 17:54:56 -0800771
Rajeev Kumar244ace62018-10-05 12:34:59 -0700772 while (fgets(buf, PAGE_SIZE, fp) != NULL) {
773 memory_stat_parse_line(buf, mem_st);
774 }
775 fclose(fp);
Rajeev Kumar70450032018-01-31 17:54:56 -0800776
Rajeev Kumar244ace62018-10-05 12:34:59 -0700777 return 0;
778}
779
780static int memory_stat_from_procfs(struct memory_stat* mem_st, int pid) {
781 char path[PATH_MAX];
782 char buffer[PROC_STAT_BUFFER_SIZE];
783 int fd, ret;
784
785 snprintf(path, sizeof(path), PROC_STAT_FILE_PATH, pid);
786 if ((fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
787 ALOGE("%s open failed: %s", path, strerror(errno));
788 return -1;
789 }
790
791 ret = read(fd, buffer, sizeof(buffer));
792 if (ret < 0) {
793 ALOGE("%s read failed: %s", path, strerror(errno));
794 close(fd);
795 return -1;
796 }
797 close(fd);
798
799 // field 10 is pgfault
800 // field 12 is pgmajfault
801 // field 24 is rss_in_pages
802 int64_t pgfault = 0, pgmajfault = 0, rss_in_pages = 0;
803 if (sscanf(buffer,
804 "%*u %*s %*s %*d %*d %*d %*d %*d %*d %" SCNd64 " %*d "
805 "%" SCNd64 " %*d %*u %*u %*d %*d %*d %*d %*d %*d "
806 "%*d %*d %" SCNd64 "",
807 &pgfault, &pgmajfault, &rss_in_pages) != 3) {
808 return -1;
809 }
810 mem_st->pgfault = pgfault;
811 mem_st->pgmajfault = pgmajfault;
812 mem_st->rss_in_bytes = (rss_in_pages * PAGE_SIZE);
813
814 return 0;
Rajeev Kumar70450032018-01-31 17:54:56 -0800815}
816#endif
817
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700818/* /prop/zoneinfo parsing routines */
819static int64_t zoneinfo_parse_protection(char *cp) {
820 int64_t max = 0;
821 long long zoneval;
822 char *save_ptr;
823
824 for (cp = strtok_r(cp, "(), ", &save_ptr); cp;
825 cp = strtok_r(NULL, "), ", &save_ptr)) {
826 zoneval = strtoll(cp, &cp, 0);
827 if (zoneval > max) {
828 max = (zoneval > INT64_MAX) ? INT64_MAX : zoneval;
829 }
830 }
831
832 return max;
833}
834
835static bool zoneinfo_parse_line(char *line, union zoneinfo *zi) {
836 char *cp = line;
837 char *ap;
838 char *save_ptr;
839 int64_t val;
840 int field_idx;
841
842 cp = strtok_r(line, " ", &save_ptr);
843 if (!cp) {
844 return true;
845 }
846
847 if (!strcmp(cp, "protection:")) {
848 ap = strtok_r(NULL, ")", &save_ptr);
849 } else {
850 ap = strtok_r(NULL, " ", &save_ptr);
851 }
852
853 if (!ap) {
854 return true;
855 }
856
857 switch (match_field(cp, ap, zoneinfo_field_names,
858 ZI_FIELD_COUNT, &val, &field_idx)) {
859 case (PARSE_SUCCESS):
860 zi->arr[field_idx] += val;
861 break;
862 case (NO_MATCH):
863 if (!strcmp(cp, "protection:")) {
864 zi->field.totalreserve_pages +=
865 zoneinfo_parse_protection(ap);
866 }
867 break;
868 case (PARSE_FAIL):
869 default:
870 return false;
871 }
872 return true;
873}
874
875static int zoneinfo_parse(union zoneinfo *zi) {
876 static struct reread_data file_data = {
877 .filename = ZONEINFO_PATH,
878 .fd = -1,
879 };
880 char buf[PAGE_SIZE];
881 char *save_ptr;
882 char *line;
883
884 memset(zi, 0, sizeof(union zoneinfo));
885
886 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
887 return -1;
888 }
889
890 for (line = strtok_r(buf, "\n", &save_ptr); line;
891 line = strtok_r(NULL, "\n", &save_ptr)) {
892 if (!zoneinfo_parse_line(line, zi)) {
893 ALOGE("%s parse error", file_data.filename);
894 return -1;
895 }
896 }
897 zi->field.totalreserve_pages += zi->field.high;
898
899 return 0;
900}
901
902/* /prop/meminfo parsing routines */
903static bool meminfo_parse_line(char *line, union meminfo *mi) {
904 char *cp = line;
905 char *ap;
906 char *save_ptr;
907 int64_t val;
908 int field_idx;
909 enum field_match_result match_res;
910
911 cp = strtok_r(line, " ", &save_ptr);
912 if (!cp) {
913 return false;
914 }
915
916 ap = strtok_r(NULL, " ", &save_ptr);
917 if (!ap) {
918 return false;
919 }
920
921 match_res = match_field(cp, ap, meminfo_field_names, MI_FIELD_COUNT,
922 &val, &field_idx);
923 if (match_res == PARSE_SUCCESS) {
924 mi->arr[field_idx] = val / page_k;
925 }
926 return (match_res != PARSE_FAIL);
927}
928
929static int meminfo_parse(union meminfo *mi) {
930 static struct reread_data file_data = {
931 .filename = MEMINFO_PATH,
932 .fd = -1,
933 };
934 char buf[PAGE_SIZE];
935 char *save_ptr;
936 char *line;
937
938 memset(mi, 0, sizeof(union meminfo));
939
940 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
941 return -1;
942 }
943
944 for (line = strtok_r(buf, "\n", &save_ptr); line;
945 line = strtok_r(NULL, "\n", &save_ptr)) {
946 if (!meminfo_parse_line(line, mi)) {
947 ALOGE("%s parse error", file_data.filename);
948 return -1;
949 }
950 }
951 mi->field.nr_file_pages = mi->field.cached + mi->field.swap_cached +
952 mi->field.buffers;
953
954 return 0;
955}
956
Todd Poynor3948f802013-07-09 19:35:14 -0700957static int proc_get_size(int pid) {
958 char path[PATH_MAX];
959 char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700960 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700961 int rss = 0;
962 int total;
Colin Crossce85d952014-07-11 17:53:27 -0700963 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700964
965 snprintf(path, PATH_MAX, "/proc/%d/statm", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800966 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700967 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700968 return -1;
Colin Crossce85d952014-07-11 17:53:27 -0700969
970 ret = read_all(fd, line, sizeof(line) - 1);
971 if (ret < 0) {
972 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700973 return -1;
974 }
975
976 sscanf(line, "%d %d ", &total, &rss);
Colin Crossce85d952014-07-11 17:53:27 -0700977 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700978 return rss;
979}
980
981static char *proc_get_name(int pid) {
982 char path[PATH_MAX];
983 static char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700984 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700985 char *cp;
Colin Crossce85d952014-07-11 17:53:27 -0700986 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700987
988 snprintf(path, PATH_MAX, "/proc/%d/cmdline", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800989 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700990 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700991 return NULL;
Colin Crossce85d952014-07-11 17:53:27 -0700992 ret = read_all(fd, line, sizeof(line) - 1);
993 close(fd);
994 if (ret < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700995 return NULL;
996 }
997
998 cp = strchr(line, ' ');
999 if (cp)
1000 *cp = '\0';
1001
1002 return line;
1003}
1004
1005static struct proc *proc_adj_lru(int oomadj) {
1006 return (struct proc *)adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
1007}
1008
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001009static struct proc *proc_get_heaviest(int oomadj) {
1010 struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
1011 struct adjslot_list *curr = head->next;
1012 struct proc *maxprocp = NULL;
1013 int maxsize = 0;
1014 while (curr != head) {
1015 int pid = ((struct proc *)curr)->pid;
1016 int tasksize = proc_get_size(pid);
1017 if (tasksize <= 0) {
1018 struct adjslot_list *next = curr->next;
1019 pid_remove(pid);
1020 curr = next;
1021 } else {
1022 if (tasksize > maxsize) {
1023 maxsize = tasksize;
1024 maxprocp = (struct proc *)curr;
1025 }
1026 curr = curr->next;
1027 }
1028 }
1029 return maxprocp;
1030}
1031
Tim Murrayafb3a152018-10-25 17:05:41 -07001032static int last_killed_pid = -1;
1033
Colin Cross16b09462014-07-14 12:39:56 -07001034/* Kill one process specified by procp. Returns the size of the process killed */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001035static int kill_one_process(struct proc* procp) {
Colin Cross16b09462014-07-14 12:39:56 -07001036 int pid = procp->pid;
1037 uid_t uid = procp->uid;
1038 char *taskname;
1039 int tasksize;
1040 int r;
Rajeev Kumar244ace62018-10-05 12:34:59 -07001041 int result = -1;
Colin Cross16b09462014-07-14 12:39:56 -07001042
Rajeev Kumar70450032018-01-31 17:54:56 -08001043#ifdef LMKD_LOG_STATS
Rajeev Kumar92b659b2018-02-21 19:08:15 -08001044 struct memory_stat mem_st = {};
Rajeev Kumar70450032018-01-31 17:54:56 -08001045 int memory_stat_parse_result = -1;
1046#endif
1047
Colin Cross16b09462014-07-14 12:39:56 -07001048 taskname = proc_get_name(pid);
1049 if (!taskname) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001050 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001051 }
1052
1053 tasksize = proc_get_size(pid);
1054 if (tasksize <= 0) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001055 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001056 }
1057
Rajeev Kumar70450032018-01-31 17:54:56 -08001058#ifdef LMKD_LOG_STATS
1059 if (enable_stats_log) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001060 if (per_app_memcg) {
1061 memory_stat_parse_result = memory_stat_from_cgroup(&mem_st, pid, uid);
1062 } else {
1063 memory_stat_parse_result = memory_stat_from_procfs(&mem_st, pid);
1064 }
Rajeev Kumar70450032018-01-31 17:54:56 -08001065 }
1066#endif
1067
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001068 TRACE_KILL_START(pid);
1069
Rajeev Kumar244ace62018-10-05 12:34:59 -07001070 /* CAP_KILL required */
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001071 r = kill(pid, SIGKILL);
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001072 ALOGI("Kill '%s' (%d), uid %d, oom_adj %d to free %ldkB",
1073 taskname, pid, uid, procp->oomadj, tasksize * page_k);
Colin Cross16b09462014-07-14 12:39:56 -07001074
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001075 TRACE_KILL_END();
1076
Tim Murrayafb3a152018-10-25 17:05:41 -07001077 last_killed_pid = pid;
1078
Colin Cross16b09462014-07-14 12:39:56 -07001079 if (r) {
Mark Salyzyn919f5382018-02-04 15:27:23 -08001080 ALOGE("kill(%d): errno=%d", pid, errno);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001081 goto out;
Rajeev Kumar70450032018-01-31 17:54:56 -08001082 } else {
1083#ifdef LMKD_LOG_STATS
1084 if (memory_stat_parse_result == 0) {
1085 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname,
1086 procp->oomadj, mem_st.pgfault, mem_st.pgmajfault, mem_st.rss_in_bytes,
1087 mem_st.cache_in_bytes, mem_st.swap_in_bytes);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001088 } else if (enable_stats_log) {
1089 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname, procp->oomadj,
1090 -1, -1, tasksize * BYTES_IN_KILOBYTE, -1, -1);
Rajeev Kumar70450032018-01-31 17:54:56 -08001091 }
1092#endif
Rajeev Kumar244ace62018-10-05 12:34:59 -07001093 result = tasksize;
Colin Cross16b09462014-07-14 12:39:56 -07001094 }
Mark Salyzyn919f5382018-02-04 15:27:23 -08001095
Rajeev Kumar244ace62018-10-05 12:34:59 -07001096out:
1097 /*
1098 * WARNING: After pid_remove() procp is freed and can't be used!
1099 * Therefore placed at the end of the function.
1100 */
1101 pid_remove(pid);
1102 return result;
Colin Cross16b09462014-07-14 12:39:56 -07001103}
1104
1105/*
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001106 * Find processes to kill to free required number of pages.
1107 * If pages_to_free is set to 0 only one process will be killed.
1108 * Returns the size of the killed processes.
Colin Cross16b09462014-07-14 12:39:56 -07001109 */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001110static int find_and_kill_processes(int min_score_adj, int pages_to_free) {
Colin Cross16b09462014-07-14 12:39:56 -07001111 int i;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001112 int killed_size;
1113 int pages_freed = 0;
Colin Cross16b09462014-07-14 12:39:56 -07001114
Rajeev Kumar70450032018-01-31 17:54:56 -08001115#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001116 bool lmk_state_change_start = false;
Rajeev Kumar70450032018-01-31 17:54:56 -08001117#endif
1118
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001119 for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) {
Colin Cross16b09462014-07-14 12:39:56 -07001120 struct proc *procp;
1121
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001122 while (true) {
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001123 procp = kill_heaviest_task ?
1124 proc_get_heaviest(i) : proc_adj_lru(i);
Colin Cross16b09462014-07-14 12:39:56 -07001125
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001126 if (!procp)
1127 break;
1128
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001129 killed_size = kill_one_process(procp);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001130 if (killed_size >= 0) {
Yang Lu5564f4e2018-05-15 04:59:44 +00001131#ifdef LMKD_LOG_STATS
1132 if (enable_stats_log && !lmk_state_change_start) {
1133 lmk_state_change_start = true;
1134 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1135 LMK_STATE_CHANGE_START);
1136 }
1137#endif
1138
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001139 pages_freed += killed_size;
1140 if (pages_freed >= pages_to_free) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001141
1142#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001143 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001144 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1145 LMK_STATE_CHANGE_STOP);
1146 }
1147#endif
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001148 return pages_freed;
1149 }
Colin Cross16b09462014-07-14 12:39:56 -07001150 }
1151 }
1152 }
1153
Rajeev Kumar70450032018-01-31 17:54:56 -08001154#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001155 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001156 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED, LMK_STATE_CHANGE_STOP);
1157 }
1158#endif
1159
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001160 return pages_freed;
Colin Cross16b09462014-07-14 12:39:56 -07001161}
1162
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001163static int64_t get_memory_usage(struct reread_data *file_data) {
Robert Beneac47f2992017-08-21 15:18:31 -07001164 int ret;
1165 int64_t mem_usage;
1166 char buf[32];
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001167
1168 if (reread_file(file_data, buf, sizeof(buf)) < 0) {
Robert Beneac47f2992017-08-21 15:18:31 -07001169 return -1;
1170 }
1171
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001172 if (!parse_int64(buf, &mem_usage)) {
1173 ALOGE("%s parse error", file_data->filename);
Robert Beneac47f2992017-08-21 15:18:31 -07001174 return -1;
1175 }
Robert Beneac47f2992017-08-21 15:18:31 -07001176 if (mem_usage == 0) {
1177 ALOGE("No memory!");
1178 return -1;
1179 }
1180 return mem_usage;
1181}
1182
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001183void record_low_pressure_levels(union meminfo *mi) {
1184 if (low_pressure_mem.min_nr_free_pages == -1 ||
1185 low_pressure_mem.min_nr_free_pages > mi->field.nr_free_pages) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001186 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001187 ALOGI("Low pressure min memory update from %" PRId64 " to %" PRId64,
1188 low_pressure_mem.min_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001189 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001190 low_pressure_mem.min_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001191 }
1192 /*
1193 * Free memory at low vmpressure events occasionally gets spikes,
1194 * possibly a stale low vmpressure event with memory already
1195 * freed up (no memory pressure should have been reported).
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001196 * Ignore large jumps in max_nr_free_pages that would mess up our stats.
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001197 */
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001198 if (low_pressure_mem.max_nr_free_pages == -1 ||
1199 (low_pressure_mem.max_nr_free_pages < mi->field.nr_free_pages &&
1200 mi->field.nr_free_pages - low_pressure_mem.max_nr_free_pages <
1201 low_pressure_mem.max_nr_free_pages * 0.1)) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001202 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001203 ALOGI("Low pressure max memory update from %" PRId64 " to %" PRId64,
1204 low_pressure_mem.max_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001205 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001206 low_pressure_mem.max_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001207 }
1208}
1209
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001210enum vmpressure_level upgrade_level(enum vmpressure_level level) {
1211 return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ?
1212 level + 1 : level);
1213}
1214
1215enum vmpressure_level downgrade_level(enum vmpressure_level level) {
1216 return (enum vmpressure_level)((level > VMPRESS_LEVEL_LOW) ?
1217 level - 1 : level);
1218}
1219
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001220static inline unsigned long get_time_diff_ms(struct timeval *from,
1221 struct timeval *to) {
1222 return (to->tv_sec - from->tv_sec) * 1000 +
1223 (to->tv_usec - from->tv_usec) / 1000;
1224}
1225
Tim Murrayafb3a152018-10-25 17:05:41 -07001226static bool is_kill_pending(void) {
1227 char buf[24];
1228 if (last_killed_pid < 0) {
1229 return false;
1230 }
1231
1232 snprintf(buf, sizeof(buf), "/proc/%d/", last_killed_pid);
1233 if (access(buf, F_OK) == 0) {
1234 return true;
1235 }
1236
1237 // reset last killed PID because there's nothing pending
1238 last_killed_pid = -1;
1239 return false;
1240}
1241
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001242static void mp_event_common(int data, uint32_t events __unused) {
Todd Poynor3948f802013-07-09 19:35:14 -07001243 int ret;
1244 unsigned long long evcount;
Robert Beneac47f2992017-08-21 15:18:31 -07001245 int64_t mem_usage, memsw_usage;
Robert Benea6e8e7102017-09-13 15:20:30 -07001246 int64_t mem_pressure;
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001247 enum vmpressure_level lvl;
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001248 union meminfo mi;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001249 union zoneinfo zi;
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001250 struct timeval curr_tm;
1251 static struct timeval last_kill_tm;
1252 static unsigned long kill_skip_count = 0;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001253 enum vmpressure_level level = (enum vmpressure_level)data;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001254 long other_free = 0, other_file = 0;
1255 int min_score_adj;
1256 int pages_to_free = 0;
1257 int minfree = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001258 static struct reread_data mem_usage_file_data = {
1259 .filename = MEMCG_MEMORY_USAGE,
1260 .fd = -1,
1261 };
1262 static struct reread_data memsw_usage_file_data = {
1263 .filename = MEMCG_MEMORYSW_USAGE,
1264 .fd = -1,
1265 };
Todd Poynor3948f802013-07-09 19:35:14 -07001266
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001267 /*
1268 * Check all event counters from low to critical
1269 * and upgrade to the highest priority one. By reading
1270 * eventfd we also reset the event counters.
1271 */
1272 for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
1273 if (mpevfd[lvl] != -1 &&
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001274 TEMP_FAILURE_RETRY(read(mpevfd[lvl],
1275 &evcount, sizeof(evcount))) > 0 &&
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001276 evcount > 0 && lvl > level) {
1277 level = lvl;
1278 }
1279 }
Todd Poynor3948f802013-07-09 19:35:14 -07001280
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001281 gettimeofday(&curr_tm, NULL);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001282 if (kill_timeout_ms) {
Tim Murrayafb3a152018-10-25 17:05:41 -07001283 // If we're within the timeout, see if there's pending reclaim work
1284 // from the last killed process. If there is (as evidenced by
1285 // /proc/<pid> continuing to exist), skip killing for now.
1286 if ((get_time_diff_ms(&last_kill_tm, &curr_tm) < kill_timeout_ms) &&
1287 (low_ram_device || is_kill_pending())) {
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001288 kill_skip_count++;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001289 return;
1290 }
1291 }
1292
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001293 if (kill_skip_count > 0) {
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001294 ALOGI("%lu memory pressure events were skipped after a kill!",
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001295 kill_skip_count);
1296 kill_skip_count = 0;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001297 }
1298
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001299 if (meminfo_parse(&mi) < 0 || zoneinfo_parse(&zi) < 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001300 ALOGE("Failed to get free memory!");
1301 return;
1302 }
1303
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001304 if (use_minfree_levels) {
1305 int i;
1306
1307 other_free = mi.field.nr_free_pages - zi.field.totalreserve_pages;
1308 if (mi.field.nr_file_pages > (mi.field.shmem + mi.field.unevictable + mi.field.swap_cached)) {
1309 other_file = (mi.field.nr_file_pages - mi.field.shmem -
1310 mi.field.unevictable - mi.field.swap_cached);
1311 } else {
1312 other_file = 0;
1313 }
1314
1315 min_score_adj = OOM_SCORE_ADJ_MAX + 1;
1316 for (i = 0; i < lowmem_targets_size; i++) {
1317 minfree = lowmem_minfree[i];
1318 if (other_free < minfree && other_file < minfree) {
1319 min_score_adj = lowmem_adj[i];
1320 break;
1321 }
1322 }
1323
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001324 if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
1325 if (debug_process_killing) {
1326 ALOGI("Ignore %s memory pressure event "
1327 "(free memory=%ldkB, cache=%ldkB, limit=%ldkB)",
1328 level_name[level], other_free * page_k, other_file * page_k,
1329 (long)lowmem_minfree[lowmem_targets_size - 1] * page_k);
1330 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001331 return;
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001332 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001333
1334 /* Free up enough pages to push over the highest minfree level */
1335 pages_to_free = lowmem_minfree[lowmem_targets_size - 1] -
1336 ((other_free < other_file) ? other_free : other_file);
1337 goto do_kill;
1338 }
1339
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001340 if (level == VMPRESS_LEVEL_LOW) {
1341 record_low_pressure_levels(&mi);
1342 }
1343
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001344 if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) {
1345 /* Do not monitor this pressure level */
1346 return;
1347 }
1348
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001349 if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
1350 goto do_kill;
1351 }
1352 if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001353 goto do_kill;
Robert Benea6e8e7102017-09-13 15:20:30 -07001354 }
Robert Beneac47f2992017-08-21 15:18:31 -07001355
Robert Benea6e8e7102017-09-13 15:20:30 -07001356 // Calculate percent for swappinness.
1357 mem_pressure = (mem_usage * 100) / memsw_usage;
1358
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001359 if (enable_pressure_upgrade && level != VMPRESS_LEVEL_CRITICAL) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001360 // We are swapping too much.
1361 if (mem_pressure < upgrade_pressure) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001362 level = upgrade_level(level);
1363 if (debug_process_killing) {
1364 ALOGI("Event upgraded to %s", level_name[level]);
1365 }
Robert Beneac47f2992017-08-21 15:18:31 -07001366 }
1367 }
1368
Robert Benea6e8e7102017-09-13 15:20:30 -07001369 // If the pressure is larger than downgrade_pressure lmk will not
1370 // kill any process, since enough memory is available.
1371 if (mem_pressure > downgrade_pressure) {
1372 if (debug_process_killing) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001373 ALOGI("Ignore %s memory pressure", level_name[level]);
Robert Benea6e8e7102017-09-13 15:20:30 -07001374 }
1375 return;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001376 } else if (level == VMPRESS_LEVEL_CRITICAL &&
1377 mem_pressure > upgrade_pressure) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001378 if (debug_process_killing) {
1379 ALOGI("Downgrade critical memory pressure");
1380 }
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001381 // Downgrade event, since enough memory available.
1382 level = downgrade_level(level);
Robert Benea6e8e7102017-09-13 15:20:30 -07001383 }
1384
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001385do_kill:
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001386 if (low_ram_device) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001387 /* For Go devices kill only one task */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001388 if (find_and_kill_processes(level_oomadj[level], 0) == 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001389 if (debug_process_killing) {
1390 ALOGI("Nothing to kill");
1391 }
1392 }
1393 } else {
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001394 int pages_freed;
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001395 static struct timeval last_report_tm;
1396 static unsigned long report_skip_count = 0;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001397
1398 if (!use_minfree_levels) {
1399 /* If pressure level is less than critical and enough free swap then ignore */
1400 if (level < VMPRESS_LEVEL_CRITICAL &&
1401 mi.field.free_swap > low_pressure_mem.max_nr_free_pages) {
1402 if (debug_process_killing) {
1403 ALOGI("Ignoring pressure since %" PRId64
1404 " swap pages are available ",
1405 mi.field.free_swap);
1406 }
1407 return;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001408 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001409 /* Free up enough memory to downgrate the memory pressure to low level */
1410 if (mi.field.nr_free_pages < low_pressure_mem.max_nr_free_pages) {
1411 pages_to_free = low_pressure_mem.max_nr_free_pages -
1412 mi.field.nr_free_pages;
1413 } else {
1414 if (debug_process_killing) {
1415 ALOGI("Ignoring pressure since more memory is "
1416 "available (%" PRId64 ") than watermark (%" PRId64 ")",
1417 mi.field.nr_free_pages, low_pressure_mem.max_nr_free_pages);
1418 }
1419 return;
1420 }
1421 min_score_adj = level_oomadj[level];
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001422 }
1423
Tim Murrayafb3a152018-10-25 17:05:41 -07001424 pages_freed = find_and_kill_processes(min_score_adj, 0);
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001425
1426 if (pages_freed == 0) {
1427 /* Rate limit kill reports when nothing was reclaimed */
1428 if (get_time_diff_ms(&last_report_tm, &curr_tm) < FAIL_REPORT_RLIMIT_MS) {
1429 report_skip_count++;
1430 return;
1431 }
Tim Murrayafb3a152018-10-25 17:05:41 -07001432 } else {
1433 /* If we killed anything, update the last killed timestamp. */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001434 last_kill_tm = curr_tm;
1435 }
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001436
1437 if (use_minfree_levels) {
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001438 ALOGI("Killing to reclaim %ldkB, reclaimed %ldkB, cache(%ldkB) and "
1439 "free(%" PRId64 "kB)-reserved(%" PRId64 "kB) below min(%ldkB) for oom_adj %d",
1440 pages_to_free * page_k, pages_freed * page_k,
1441 other_file * page_k, mi.field.nr_free_pages * page_k,
1442 zi.field.totalreserve_pages * page_k,
1443 minfree * page_k, min_score_adj);
1444 } else {
1445 ALOGI("Killing to reclaim %ldkB, reclaimed %ldkB at oom_adj %d",
1446 pages_to_free * page_k, pages_freed * page_k, min_score_adj);
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001447 }
1448
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001449 if (report_skip_count > 0) {
1450 ALOGI("Suppressed %lu failed kill reports", report_skip_count);
1451 report_skip_count = 0;
Robert Beneacaeaa652017-08-11 16:03:20 -07001452 }
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001453
1454 last_report_tm = curr_tm;
Colin Crossf8857cc2014-07-11 17:16:56 -07001455 }
Todd Poynor3948f802013-07-09 19:35:14 -07001456}
1457
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001458static bool init_mp_common(enum vmpressure_level level) {
Todd Poynor3948f802013-07-09 19:35:14 -07001459 int mpfd;
1460 int evfd;
1461 int evctlfd;
1462 char buf[256];
1463 struct epoll_event epev;
1464 int ret;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001465 int level_idx = (int)level;
1466 const char *levelstr = level_name[level_idx];
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001467
Nick Kralevichc68c8862015-12-18 20:52:37 -08001468 mpfd = open(MEMCG_SYSFS_PATH "memory.pressure_level", O_RDONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001469 if (mpfd < 0) {
1470 ALOGI("No kernel memory.pressure_level support (errno=%d)", errno);
1471 goto err_open_mpfd;
1472 }
1473
Nick Kralevichc68c8862015-12-18 20:52:37 -08001474 evctlfd = open(MEMCG_SYSFS_PATH "cgroup.event_control", O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001475 if (evctlfd < 0) {
1476 ALOGI("No kernel memory cgroup event control (errno=%d)", errno);
1477 goto err_open_evctlfd;
1478 }
1479
Nick Kralevichc68c8862015-12-18 20:52:37 -08001480 evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001481 if (evfd < 0) {
1482 ALOGE("eventfd failed for level %s; errno=%d", levelstr, errno);
1483 goto err_eventfd;
1484 }
1485
1486 ret = snprintf(buf, sizeof(buf), "%d %d %s", evfd, mpfd, levelstr);
1487 if (ret >= (ssize_t)sizeof(buf)) {
1488 ALOGE("cgroup.event_control line overflow for level %s", levelstr);
1489 goto err;
1490 }
1491
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001492 ret = TEMP_FAILURE_RETRY(write(evctlfd, buf, strlen(buf) + 1));
Todd Poynor3948f802013-07-09 19:35:14 -07001493 if (ret == -1) {
1494 ALOGE("cgroup.event_control write failed for level %s; errno=%d",
1495 levelstr, errno);
1496 goto err;
1497 }
1498
1499 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001500 /* use data to store event level */
1501 vmpressure_hinfo[level_idx].data = level_idx;
1502 vmpressure_hinfo[level_idx].handler = mp_event_common;
1503 epev.data.ptr = (void *)&vmpressure_hinfo[level_idx];
Todd Poynor3948f802013-07-09 19:35:14 -07001504 ret = epoll_ctl(epollfd, EPOLL_CTL_ADD, evfd, &epev);
1505 if (ret == -1) {
1506 ALOGE("epoll_ctl for level %s failed; errno=%d", levelstr, errno);
1507 goto err;
1508 }
1509 maxevents++;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001510 mpevfd[level] = evfd;
Suren Baghdasaryan1bd2fc42018-01-04 08:54:53 -08001511 close(evctlfd);
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001512 return true;
Todd Poynor3948f802013-07-09 19:35:14 -07001513
1514err:
1515 close(evfd);
1516err_eventfd:
1517 close(evctlfd);
1518err_open_evctlfd:
1519 close(mpfd);
1520err_open_mpfd:
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001521 return false;
Robert Benea673e2762017-06-01 16:32:31 -07001522}
1523
Todd Poynor3948f802013-07-09 19:35:14 -07001524static int init(void) {
1525 struct epoll_event epev;
1526 int i;
1527 int ret;
1528
1529 page_k = sysconf(_SC_PAGESIZE);
1530 if (page_k == -1)
1531 page_k = PAGE_SIZE;
1532 page_k /= 1024;
1533
1534 epollfd = epoll_create(MAX_EPOLL_EVENTS);
1535 if (epollfd == -1) {
1536 ALOGE("epoll_create failed (errno=%d)", errno);
1537 return -1;
1538 }
1539
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001540 // mark data connections as not connected
1541 for (int i = 0; i < MAX_DATA_CONN; i++) {
1542 data_sock[i].sock = -1;
1543 }
1544
1545 ctrl_sock.sock = android_get_control_socket("lmkd");
1546 if (ctrl_sock.sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001547 ALOGE("get lmkd control socket failed");
1548 return -1;
1549 }
1550
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001551 ret = listen(ctrl_sock.sock, MAX_DATA_CONN);
Todd Poynor3948f802013-07-09 19:35:14 -07001552 if (ret < 0) {
1553 ALOGE("lmkd control socket listen failed (errno=%d)", errno);
1554 return -1;
1555 }
1556
1557 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001558 ctrl_sock.handler_info.handler = ctrl_connect_handler;
1559 epev.data.ptr = (void *)&(ctrl_sock.handler_info);
1560 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ctrl_sock.sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07001561 ALOGE("epoll_ctl for lmkd control socket failed (errno=%d)", errno);
1562 return -1;
1563 }
1564 maxevents++;
1565
Robert Benea164baeb2017-09-11 16:53:28 -07001566 has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK);
Suren Baghdasaryan979591b2018-01-18 17:27:30 -08001567 use_inkernel_interface = has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -07001568
1569 if (use_inkernel_interface) {
1570 ALOGI("Using in-kernel low memory killer interface");
1571 } else {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001572 if (!init_mp_common(VMPRESS_LEVEL_LOW) ||
1573 !init_mp_common(VMPRESS_LEVEL_MEDIUM) ||
1574 !init_mp_common(VMPRESS_LEVEL_CRITICAL)) {
Todd Poynor3948f802013-07-09 19:35:14 -07001575 ALOGE("Kernel does not support memory pressure events or in-kernel low memory killer");
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001576 return -1;
1577 }
Todd Poynor3948f802013-07-09 19:35:14 -07001578 }
1579
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001580 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
Todd Poynor3948f802013-07-09 19:35:14 -07001581 procadjslot_list[i].next = &procadjslot_list[i];
1582 procadjslot_list[i].prev = &procadjslot_list[i];
1583 }
1584
1585 return 0;
1586}
1587
1588static void mainloop(void) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001589 struct event_handler_info* handler_info;
1590 struct epoll_event *evt;
1591
Todd Poynor3948f802013-07-09 19:35:14 -07001592 while (1) {
1593 struct epoll_event events[maxevents];
1594 int nevents;
1595 int i;
1596
Todd Poynor3948f802013-07-09 19:35:14 -07001597 nevents = epoll_wait(epollfd, events, maxevents, -1);
1598
1599 if (nevents == -1) {
1600 if (errno == EINTR)
1601 continue;
1602 ALOGE("epoll_wait failed (errno=%d)", errno);
1603 continue;
1604 }
1605
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001606 /*
1607 * First pass to see if any data socket connections were dropped.
1608 * Dropped connection should be handled before any other events
1609 * to deallocate data connection and correctly handle cases when
1610 * connection gets dropped and reestablished in the same epoll cycle.
1611 * In such cases it's essential to handle connection closures first.
1612 */
1613 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1614 if ((evt->events & EPOLLHUP) && evt->data.ptr) {
1615 ALOGI("lmkd data connection dropped");
1616 handler_info = (struct event_handler_info*)evt->data.ptr;
1617 ctrl_data_close(handler_info->data);
1618 }
1619 }
1620
1621 /* Second pass to handle all other events */
1622 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1623 if (evt->events & EPOLLERR)
Todd Poynor3948f802013-07-09 19:35:14 -07001624 ALOGD("EPOLLERR on event #%d", i);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001625 if (evt->events & EPOLLHUP) {
1626 /* This case was handled in the first pass */
1627 continue;
1628 }
1629 if (evt->data.ptr) {
1630 handler_info = (struct event_handler_info*)evt->data.ptr;
1631 handler_info->handler(handler_info->data, evt->events);
1632 }
Todd Poynor3948f802013-07-09 19:35:14 -07001633 }
1634 }
1635}
1636
Mark Salyzyne6ed68b2014-04-30 13:36:35 -07001637int main(int argc __unused, char **argv __unused) {
Colin Cross1a0d9be2014-07-14 14:31:15 -07001638 struct sched_param param = {
1639 .sched_priority = 1,
1640 };
1641
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001642 /* By default disable low level vmpressure events */
1643 level_oomadj[VMPRESS_LEVEL_LOW] =
1644 property_get_int32("ro.lmk.low", OOM_SCORE_ADJ_MAX + 1);
1645 level_oomadj[VMPRESS_LEVEL_MEDIUM] =
1646 property_get_int32("ro.lmk.medium", 800);
1647 level_oomadj[VMPRESS_LEVEL_CRITICAL] =
1648 property_get_int32("ro.lmk.critical", 0);
Robert Beneacaeaa652017-08-11 16:03:20 -07001649 debug_process_killing = property_get_bool("ro.lmk.debug", false);
Suren Baghdasaryanad2fd912017-12-08 13:08:41 -08001650
1651 /* By default disable upgrade/downgrade logic */
1652 enable_pressure_upgrade =
1653 property_get_bool("ro.lmk.critical_upgrade", false);
1654 upgrade_pressure =
1655 (int64_t)property_get_int32("ro.lmk.upgrade_pressure", 100);
1656 downgrade_pressure =
1657 (int64_t)property_get_int32("ro.lmk.downgrade_pressure", 100);
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001658 kill_heaviest_task =
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001659 property_get_bool("ro.lmk.kill_heaviest_task", false);
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001660 low_ram_device = property_get_bool("ro.config.low_ram", false);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001661 kill_timeout_ms =
1662 (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001663 use_minfree_levels =
1664 property_get_bool("ro.lmk.use_minfree_levels", false);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001665 per_app_memcg = property_get_bool("ro.config.per_app_memcg", low_ram_device);
Rajeev Kumar70450032018-01-31 17:54:56 -08001666#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001667 statslog_init(&log_ctx, &enable_stats_log);
Rajeev Kumar70450032018-01-31 17:54:56 -08001668#endif
1669
Daniel Colascioned39adf22018-01-05 14:59:55 -08001670 // MCL_ONFAULT pins pages as they fault instead of loading
1671 // everything immediately all at once. (Which would be bad,
1672 // because as of this writing, we have a lot of mapped pages we
1673 // never use.) Old kernels will see MCL_ONFAULT and fail with
1674 // EINVAL; we ignore this failure.
1675 //
1676 // N.B. read the man page for mlockall. MCL_CURRENT | MCL_ONFAULT
1677 // pins ⊆ MCL_CURRENT, converging to just MCL_CURRENT as we fault
1678 // in pages.
1679 if (mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT) && errno != EINVAL)
Daniel Colascione4dd5d002018-01-03 12:01:02 -08001680 ALOGW("mlockall failed: errno=%d", errno);
1681
Colin Cross1a0d9be2014-07-14 14:31:15 -07001682 sched_setscheduler(0, SCHED_FIFO, &param);
Todd Poynor3948f802013-07-09 19:35:14 -07001683 if (!init())
1684 mainloop();
1685
Rajeev Kumar70450032018-01-31 17:54:56 -08001686#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001687 statslog_destroy(&log_ctx);
Rajeev Kumar70450032018-01-31 17:54:56 -08001688#endif
1689
Todd Poynor3948f802013-07-09 19:35:14 -07001690 ALOGI("exiting");
1691 return 0;
1692}