blob: 74049eabb451906aa1bf3555ed601be21672455a [file] [log] [blame]
Todd Poynor3948f802013-07-09 19:35:14 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "lowmemorykiller"
18
19#include <errno.h>
Robert Beneac47f2992017-08-21 15:18:31 -070020#include <inttypes.h>
Mark Salyzyncfd5b082016-10-17 14:28:00 -070021#include <sched.h>
Todd Poynor3948f802013-07-09 19:35:14 -070022#include <signal.h>
Todd Poynor3948f802013-07-09 19:35:14 -070023#include <stdlib.h>
24#include <string.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070025#include <sys/cdefs.h>
Todd Poynor3948f802013-07-09 19:35:14 -070026#include <sys/epoll.h>
27#include <sys/eventfd.h>
Colin Crossb28ff912014-07-11 17:15:44 -070028#include <sys/mman.h>
Todd Poynor3948f802013-07-09 19:35:14 -070029#include <sys/socket.h>
30#include <sys/types.h>
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -080031#include <sys/sysinfo.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070032#include <unistd.h>
33
Robert Benea58891d52017-07-31 17:15:20 -070034#include <cutils/properties.h>
Todd Poynor3948f802013-07-09 19:35:14 -070035#include <cutils/sockets.h>
Suren Baghdasaryan0f100512018-01-24 16:51:41 -080036#include <lmkd.h>
Mark Salyzyn30f991f2017-01-10 13:19:54 -080037#include <log/log.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070038
Rajeev Kumar70450032018-01-31 17:54:56 -080039#ifdef LMKD_LOG_STATS
Yao Chen389aee12018-05-02 11:19:27 -070040#include "statslog.h"
Rajeev Kumar70450032018-01-31 17:54:56 -080041#endif
42
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080043/*
44 * Define LMKD_TRACE_KILLS to record lmkd kills in kernel traces
45 * to profile and correlate with OOM kills
46 */
47#ifdef LMKD_TRACE_KILLS
48
49#define ATRACE_TAG ATRACE_TAG_ALWAYS
50#include <cutils/trace.h>
51
52#define TRACE_KILL_START(pid) ATRACE_INT(__FUNCTION__, pid);
53#define TRACE_KILL_END() ATRACE_INT(__FUNCTION__, 0);
54
55#else /* LMKD_TRACE_KILLS */
56
Daniel Colascione347f6b42018-02-12 11:24:47 -080057#define TRACE_KILL_START(pid) ((void)(pid))
58#define TRACE_KILL_END() ((void)0)
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080059
60#endif /* LMKD_TRACE_KILLS */
61
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070062#ifndef __unused
63#define __unused __attribute__((__unused__))
64#endif
Todd Poynor3948f802013-07-09 19:35:14 -070065
66#define MEMCG_SYSFS_PATH "/dev/memcg/"
Robert Beneac47f2992017-08-21 15:18:31 -070067#define MEMCG_MEMORY_USAGE "/dev/memcg/memory.usage_in_bytes"
68#define MEMCG_MEMORYSW_USAGE "/dev/memcg/memory.memsw.usage_in_bytes"
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -070069#define ZONEINFO_PATH "/proc/zoneinfo"
70#define MEMINFO_PATH "/proc/meminfo"
Todd Poynor3948f802013-07-09 19:35:14 -070071#define LINE_MAX 128
72
73#define INKERNEL_MINFREE_PATH "/sys/module/lowmemorykiller/parameters/minfree"
74#define INKERNEL_ADJ_PATH "/sys/module/lowmemorykiller/parameters/adj"
75
76#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
Robert Benea673e2762017-06-01 16:32:31 -070077#define EIGHT_MEGA (1 << 23)
Todd Poynor3948f802013-07-09 19:35:14 -070078
Greg Kaiserd6d84712018-03-23 14:16:12 -070079#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
80#define STRINGIFY_INTERNAL(x) #x
81
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -070082#define FAIL_REPORT_RLIMIT_MS 1000
83
Todd Poynor3948f802013-07-09 19:35:14 -070084/* default to old in-kernel interface if no memory pressure events */
85static int use_inkernel_interface = 1;
Robert Benea164baeb2017-09-11 16:53:28 -070086static bool has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -070087
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080088/* memory pressure levels */
89enum vmpressure_level {
90 VMPRESS_LEVEL_LOW = 0,
91 VMPRESS_LEVEL_MEDIUM,
92 VMPRESS_LEVEL_CRITICAL,
93 VMPRESS_LEVEL_COUNT
94};
Todd Poynor3948f802013-07-09 19:35:14 -070095
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080096static const char *level_name[] = {
97 "low",
98 "medium",
99 "critical"
100};
101
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800102struct {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -0700103 int64_t min_nr_free_pages; /* recorded but not used yet */
104 int64_t max_nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800105} low_pressure_mem = { -1, -1 };
106
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800107static int level_oomadj[VMPRESS_LEVEL_COUNT];
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -0800108static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 };
Robert Beneac47f2992017-08-21 15:18:31 -0700109static bool debug_process_killing;
110static bool enable_pressure_upgrade;
111static int64_t upgrade_pressure;
Robert Benea6e8e7102017-09-13 15:20:30 -0700112static int64_t downgrade_pressure;
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -0700113static bool low_ram_device;
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800114static bool kill_heaviest_task;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -0800115static unsigned long kill_timeout_ms;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -0700116static bool use_minfree_levels;
Rajeev Kumar244ace62018-10-05 12:34:59 -0700117static bool per_app_memcg;
Robert Benea58891d52017-07-31 17:15:20 -0700118
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800119/* data required to handle events */
120struct event_handler_info {
121 int data;
122 void (*handler)(int data, uint32_t events);
123};
Todd Poynor3948f802013-07-09 19:35:14 -0700124
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800125/* data required to handle socket events */
126struct sock_event_handler_info {
127 int sock;
128 struct event_handler_info handler_info;
129};
130
131/* max supported number of data connections */
132#define MAX_DATA_CONN 2
133
134/* socket event handler data */
135static struct sock_event_handler_info ctrl_sock;
136static struct sock_event_handler_info data_sock[MAX_DATA_CONN];
137
138/* vmpressure event handler data */
139static struct event_handler_info vmpressure_hinfo[VMPRESS_LEVEL_COUNT];
140
141/* 3 memory pressure levels, 1 ctrl listen socket, 2 ctrl data socket */
142#define MAX_EPOLL_EVENTS (1 + MAX_DATA_CONN + VMPRESS_LEVEL_COUNT)
Todd Poynor3948f802013-07-09 19:35:14 -0700143static int epollfd;
144static int maxevents;
145
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700146/* OOM score values used by both kernel and framework */
Todd Poynor16b60992013-09-16 19:26:47 -0700147#define OOM_SCORE_ADJ_MIN (-1000)
148#define OOM_SCORE_ADJ_MAX 1000
149
Todd Poynor3948f802013-07-09 19:35:14 -0700150static int lowmem_adj[MAX_TARGETS];
151static int lowmem_minfree[MAX_TARGETS];
152static int lowmem_targets_size;
153
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700154/* Fields to parse in /proc/zoneinfo */
155enum zoneinfo_field {
156 ZI_NR_FREE_PAGES = 0,
157 ZI_NR_FILE_PAGES,
158 ZI_NR_SHMEM,
159 ZI_NR_UNEVICTABLE,
160 ZI_WORKINGSET_REFAULT,
161 ZI_HIGH,
162 ZI_FIELD_COUNT
163};
164
165static const char* const zoneinfo_field_names[ZI_FIELD_COUNT] = {
166 "nr_free_pages",
167 "nr_file_pages",
168 "nr_shmem",
169 "nr_unevictable",
170 "workingset_refault",
171 "high",
172};
173
174union zoneinfo {
175 struct {
176 int64_t nr_free_pages;
177 int64_t nr_file_pages;
178 int64_t nr_shmem;
179 int64_t nr_unevictable;
180 int64_t workingset_refault;
181 int64_t high;
182 /* fields below are calculated rather than read from the file */
183 int64_t totalreserve_pages;
184 } field;
185 int64_t arr[ZI_FIELD_COUNT];
186};
187
188/* Fields to parse in /proc/meminfo */
189enum meminfo_field {
190 MI_NR_FREE_PAGES = 0,
191 MI_CACHED,
192 MI_SWAP_CACHED,
193 MI_BUFFERS,
194 MI_SHMEM,
195 MI_UNEVICTABLE,
196 MI_FREE_SWAP,
197 MI_DIRTY,
198 MI_FIELD_COUNT
199};
200
201static const char* const meminfo_field_names[MI_FIELD_COUNT] = {
202 "MemFree:",
203 "Cached:",
204 "SwapCached:",
205 "Buffers:",
206 "Shmem:",
207 "Unevictable:",
208 "SwapFree:",
209 "Dirty:",
210};
211
212union meminfo {
213 struct {
214 int64_t nr_free_pages;
215 int64_t cached;
216 int64_t swap_cached;
217 int64_t buffers;
218 int64_t shmem;
219 int64_t unevictable;
220 int64_t free_swap;
221 int64_t dirty;
222 /* fields below are calculated rather than read from the file */
223 int64_t nr_file_pages;
224 } field;
225 int64_t arr[MI_FIELD_COUNT];
226};
227
228enum field_match_result {
229 NO_MATCH,
230 PARSE_FAIL,
231 PARSE_SUCCESS
232};
233
Todd Poynor3948f802013-07-09 19:35:14 -0700234struct adjslot_list {
235 struct adjslot_list *next;
236 struct adjslot_list *prev;
237};
238
239struct proc {
240 struct adjslot_list asl;
241 int pid;
Colin Crossfbb78c62014-06-13 14:52:43 -0700242 uid_t uid;
Todd Poynor3948f802013-07-09 19:35:14 -0700243 int oomadj;
244 struct proc *pidhash_next;
245};
246
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700247struct reread_data {
248 const char* const filename;
249 int fd;
250};
251
Rajeev Kumar70450032018-01-31 17:54:56 -0800252#ifdef LMKD_LOG_STATS
Rajeev Kumar70450032018-01-31 17:54:56 -0800253static bool enable_stats_log;
254static android_log_context log_ctx;
255#endif
256
Todd Poynor3948f802013-07-09 19:35:14 -0700257#define PIDHASH_SZ 1024
258static struct proc *pidhash[PIDHASH_SZ];
259#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
260
Chih-Hung Hsiehdaa13ea2016-05-19 16:02:22 -0700261#define ADJTOSLOT(adj) ((adj) + -OOM_SCORE_ADJ_MIN)
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700262static struct adjslot_list procadjslot_list[ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1];
Todd Poynor3948f802013-07-09 19:35:14 -0700263
Todd Poynor3948f802013-07-09 19:35:14 -0700264/* PAGE_SIZE / 1024 */
265static long page_k;
266
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700267static bool parse_int64(const char* str, int64_t* ret) {
268 char* endptr;
269 long long val = strtoll(str, &endptr, 10);
270 if (str == endptr || val > INT64_MAX) {
271 return false;
272 }
273 *ret = (int64_t)val;
274 return true;
275}
276
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700277static enum field_match_result match_field(const char* cp, const char* ap,
278 const char* const field_names[],
279 int field_count, int64_t* field,
280 int *field_idx) {
281 int64_t val;
282 int i;
283
284 for (i = 0; i < field_count; i++) {
285 if (!strcmp(cp, field_names[i])) {
286 *field_idx = i;
287 return parse_int64(ap, field) ? PARSE_SUCCESS : PARSE_FAIL;
288 }
289 }
290 return NO_MATCH;
291}
292
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700293/*
294 * Read file content from the beginning up to max_len bytes or EOF
295 * whichever happens first.
296 */
Colin Crossce85d952014-07-11 17:53:27 -0700297static ssize_t read_all(int fd, char *buf, size_t max_len)
298{
299 ssize_t ret = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700300 off_t offset = 0;
Colin Crossce85d952014-07-11 17:53:27 -0700301
302 while (max_len > 0) {
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700303 ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
Colin Crossce85d952014-07-11 17:53:27 -0700304 if (r == 0) {
305 break;
306 }
307 if (r == -1) {
308 return -1;
309 }
310 ret += r;
311 buf += r;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700312 offset += r;
Colin Crossce85d952014-07-11 17:53:27 -0700313 max_len -= r;
314 }
315
316 return ret;
317}
318
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700319/*
320 * Read a new or already opened file from the beginning.
321 * If the file has not been opened yet data->fd should be set to -1.
322 * To be used with files which are read often and possibly during high
323 * memory pressure to minimize file opening which by itself requires kernel
324 * memory allocation and might result in a stall on memory stressed system.
325 */
326static int reread_file(struct reread_data *data, char *buf, size_t buf_size) {
327 ssize_t size;
328
329 if (data->fd == -1) {
330 data->fd = open(data->filename, O_RDONLY | O_CLOEXEC);
331 if (data->fd == -1) {
332 ALOGE("%s open: %s", data->filename, strerror(errno));
333 return -1;
334 }
335 }
336
337 size = read_all(data->fd, buf, buf_size - 1);
338 if (size < 0) {
339 ALOGE("%s read: %s", data->filename, strerror(errno));
340 close(data->fd);
341 data->fd = -1;
342 return -1;
343 }
344 ALOG_ASSERT((size_t)size < buf_size - 1, data->filename " too large");
345 buf[size] = 0;
346
347 return 0;
348}
349
Todd Poynor3948f802013-07-09 19:35:14 -0700350static struct proc *pid_lookup(int pid) {
351 struct proc *procp;
352
353 for (procp = pidhash[pid_hashfn(pid)]; procp && procp->pid != pid;
354 procp = procp->pidhash_next)
355 ;
356
357 return procp;
358}
359
360static void adjslot_insert(struct adjslot_list *head, struct adjslot_list *new)
361{
362 struct adjslot_list *next = head->next;
363 new->prev = head;
364 new->next = next;
365 next->prev = new;
366 head->next = new;
367}
368
369static void adjslot_remove(struct adjslot_list *old)
370{
371 struct adjslot_list *prev = old->prev;
372 struct adjslot_list *next = old->next;
373 next->prev = prev;
374 prev->next = next;
375}
376
377static struct adjslot_list *adjslot_tail(struct adjslot_list *head) {
378 struct adjslot_list *asl = head->prev;
379
380 return asl == head ? NULL : asl;
381}
382
383static void proc_slot(struct proc *procp) {
384 int adjslot = ADJTOSLOT(procp->oomadj);
385
386 adjslot_insert(&procadjslot_list[adjslot], &procp->asl);
387}
388
389static void proc_unslot(struct proc *procp) {
390 adjslot_remove(&procp->asl);
391}
392
393static void proc_insert(struct proc *procp) {
394 int hval = pid_hashfn(procp->pid);
395
396 procp->pidhash_next = pidhash[hval];
397 pidhash[hval] = procp;
398 proc_slot(procp);
399}
400
401static int pid_remove(int pid) {
402 int hval = pid_hashfn(pid);
403 struct proc *procp;
404 struct proc *prevp;
405
406 for (procp = pidhash[hval], prevp = NULL; procp && procp->pid != pid;
407 procp = procp->pidhash_next)
408 prevp = procp;
409
410 if (!procp)
411 return -1;
412
413 if (!prevp)
414 pidhash[hval] = procp->pidhash_next;
415 else
416 prevp->pidhash_next = procp->pidhash_next;
417
418 proc_unslot(procp);
419 free(procp);
420 return 0;
421}
422
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800423static void writefilestring(const char *path, char *s) {
Nick Kralevichc68c8862015-12-18 20:52:37 -0800424 int fd = open(path, O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -0700425 int len = strlen(s);
426 int ret;
427
428 if (fd < 0) {
429 ALOGE("Error opening %s; errno=%d", path, errno);
430 return;
431 }
432
433 ret = write(fd, s, len);
434 if (ret < 0) {
435 ALOGE("Error writing %s; errno=%d", path, errno);
436 } else if (ret < len) {
437 ALOGE("Short write on %s; length=%d", path, ret);
438 }
439
440 close(fd);
441}
442
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800443static void cmd_procprio(LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700444 struct proc *procp;
445 char path[80];
446 char val[20];
Robert Benea673e2762017-06-01 16:32:31 -0700447 int soft_limit_mult;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800448 struct lmk_procprio params;
Todd Poynor3948f802013-07-09 19:35:14 -0700449
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800450 lmkd_pack_get_procprio(packet, &params);
451
452 if (params.oomadj < OOM_SCORE_ADJ_MIN ||
453 params.oomadj > OOM_SCORE_ADJ_MAX) {
454 ALOGE("Invalid PROCPRIO oomadj argument %d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700455 return;
456 }
457
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800458 snprintf(path, sizeof(path), "/proc/%d/oom_score_adj", params.pid);
459 snprintf(val, sizeof(val), "%d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700460 writefilestring(path, val);
461
462 if (use_inkernel_interface)
463 return;
464
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700465 if (low_ram_device) {
466 if (params.oomadj >= 900) {
467 soft_limit_mult = 0;
468 } else if (params.oomadj >= 800) {
469 soft_limit_mult = 0;
470 } else if (params.oomadj >= 700) {
471 soft_limit_mult = 0;
472 } else if (params.oomadj >= 600) {
473 // Launcher should be perceptible, don't kill it.
474 params.oomadj = 200;
475 soft_limit_mult = 1;
476 } else if (params.oomadj >= 500) {
477 soft_limit_mult = 0;
478 } else if (params.oomadj >= 400) {
479 soft_limit_mult = 0;
480 } else if (params.oomadj >= 300) {
481 soft_limit_mult = 1;
482 } else if (params.oomadj >= 200) {
483 soft_limit_mult = 2;
484 } else if (params.oomadj >= 100) {
485 soft_limit_mult = 10;
486 } else if (params.oomadj >= 0) {
487 soft_limit_mult = 20;
488 } else {
489 // Persistent processes will have a large
490 // soft limit 512MB.
491 soft_limit_mult = 64;
492 }
Robert Benea673e2762017-06-01 16:32:31 -0700493
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700494 snprintf(path, sizeof(path),
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800495 "/dev/memcg/apps/uid_%d/pid_%d/memory.soft_limit_in_bytes",
496 params.uid, params.pid);
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700497 snprintf(val, sizeof(val), "%d", soft_limit_mult * EIGHT_MEGA);
498 writefilestring(path, val);
499 }
Robert Benea673e2762017-06-01 16:32:31 -0700500
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800501 procp = pid_lookup(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700502 if (!procp) {
503 procp = malloc(sizeof(struct proc));
504 if (!procp) {
505 // Oh, the irony. May need to rebuild our state.
506 return;
507 }
508
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800509 procp->pid = params.pid;
510 procp->uid = params.uid;
511 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700512 proc_insert(procp);
513 } else {
514 proc_unslot(procp);
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800515 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700516 proc_slot(procp);
517 }
518}
519
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800520static void cmd_procremove(LMKD_CTRL_PACKET packet) {
521 struct lmk_procremove params;
522
Todd Poynor3948f802013-07-09 19:35:14 -0700523 if (use_inkernel_interface)
524 return;
525
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800526 lmkd_pack_get_procremove(packet, &params);
Rajeev Kumar244ace62018-10-05 12:34:59 -0700527 /*
528 * WARNING: After pid_remove() procp is freed and can't be used!
529 * Therefore placed at the end of the function.
530 */
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800531 pid_remove(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700532}
533
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800534static void cmd_target(int ntargets, LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700535 int i;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800536 struct lmk_target target;
Todd Poynor3948f802013-07-09 19:35:14 -0700537
538 if (ntargets > (int)ARRAY_SIZE(lowmem_adj))
539 return;
540
541 for (i = 0; i < ntargets; i++) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800542 lmkd_pack_get_target(packet, i, &target);
543 lowmem_minfree[i] = target.minfree;
544 lowmem_adj[i] = target.oom_adj_score;
Todd Poynor3948f802013-07-09 19:35:14 -0700545 }
546
547 lowmem_targets_size = ntargets;
548
Robert Benea164baeb2017-09-11 16:53:28 -0700549 if (has_inkernel_module) {
Todd Poynor3948f802013-07-09 19:35:14 -0700550 char minfreestr[128];
551 char killpriostr[128];
552
553 minfreestr[0] = '\0';
554 killpriostr[0] = '\0';
555
556 for (i = 0; i < lowmem_targets_size; i++) {
557 char val[40];
558
559 if (i) {
560 strlcat(minfreestr, ",", sizeof(minfreestr));
561 strlcat(killpriostr, ",", sizeof(killpriostr));
562 }
563
Robert Benea164baeb2017-09-11 16:53:28 -0700564 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_minfree[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700565 strlcat(minfreestr, val, sizeof(minfreestr));
Robert Benea164baeb2017-09-11 16:53:28 -0700566 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_adj[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700567 strlcat(killpriostr, val, sizeof(killpriostr));
568 }
569
570 writefilestring(INKERNEL_MINFREE_PATH, minfreestr);
571 writefilestring(INKERNEL_ADJ_PATH, killpriostr);
572 }
573}
574
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800575static void ctrl_data_close(int dsock_idx) {
576 struct epoll_event epev;
577
578 ALOGI("closing lmkd data connection");
579 if (epoll_ctl(epollfd, EPOLL_CTL_DEL, data_sock[dsock_idx].sock, &epev) == -1) {
580 // Log a warning and keep going
581 ALOGW("epoll_ctl for data connection socket failed; errno=%d", errno);
582 }
Todd Poynor3948f802013-07-09 19:35:14 -0700583 maxevents--;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800584
585 close(data_sock[dsock_idx].sock);
586 data_sock[dsock_idx].sock = -1;
Todd Poynor3948f802013-07-09 19:35:14 -0700587}
588
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800589static int ctrl_data_read(int dsock_idx, char *buf, size_t bufsz) {
Todd Poynor3948f802013-07-09 19:35:14 -0700590 int ret = 0;
591
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700592 ret = TEMP_FAILURE_RETRY(read(data_sock[dsock_idx].sock, buf, bufsz));
Todd Poynor3948f802013-07-09 19:35:14 -0700593
594 if (ret == -1) {
595 ALOGE("control data socket read failed; errno=%d", errno);
596 } else if (ret == 0) {
597 ALOGE("Got EOF on control data socket");
598 ret = -1;
599 }
600
601 return ret;
602}
603
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800604static void ctrl_command_handler(int dsock_idx) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800605 LMKD_CTRL_PACKET packet;
Todd Poynor3948f802013-07-09 19:35:14 -0700606 int len;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800607 enum lmk_cmd cmd;
Todd Poynor3948f802013-07-09 19:35:14 -0700608 int nargs;
609 int targets;
610
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800611 len = ctrl_data_read(dsock_idx, (char *)packet, CTRL_PACKET_MAX_SIZE);
Todd Poynor3948f802013-07-09 19:35:14 -0700612 if (len <= 0)
613 return;
614
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800615 if (len < (int)sizeof(int)) {
616 ALOGE("Wrong control socket read length len=%d", len);
617 return;
618 }
619
620 cmd = lmkd_pack_get_cmd(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700621 nargs = len / sizeof(int) - 1;
622 if (nargs < 0)
623 goto wronglen;
624
Todd Poynor3948f802013-07-09 19:35:14 -0700625 switch(cmd) {
626 case LMK_TARGET:
627 targets = nargs / 2;
628 if (nargs & 0x1 || targets > (int)ARRAY_SIZE(lowmem_adj))
629 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800630 cmd_target(targets, packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700631 break;
632 case LMK_PROCPRIO:
Colin Crossfbb78c62014-06-13 14:52:43 -0700633 if (nargs != 3)
Todd Poynor3948f802013-07-09 19:35:14 -0700634 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800635 cmd_procprio(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700636 break;
637 case LMK_PROCREMOVE:
638 if (nargs != 1)
639 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800640 cmd_procremove(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700641 break;
642 default:
643 ALOGE("Received unknown command code %d", cmd);
644 return;
645 }
646
647 return;
648
649wronglen:
650 ALOGE("Wrong control socket read length cmd=%d len=%d", cmd, len);
651}
652
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800653static void ctrl_data_handler(int data, uint32_t events) {
654 if (events & EPOLLIN) {
655 ctrl_command_handler(data);
Todd Poynor3948f802013-07-09 19:35:14 -0700656 }
657}
658
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800659static int get_free_dsock() {
660 for (int i = 0; i < MAX_DATA_CONN; i++) {
661 if (data_sock[i].sock < 0) {
662 return i;
663 }
664 }
665 return -1;
666}
Todd Poynor3948f802013-07-09 19:35:14 -0700667
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800668static void ctrl_connect_handler(int data __unused, uint32_t events __unused) {
669 struct epoll_event epev;
670 int free_dscock_idx = get_free_dsock();
671
672 if (free_dscock_idx < 0) {
673 /*
674 * Number of data connections exceeded max supported. This should not
675 * happen but if it does we drop all existing connections and accept
676 * the new one. This prevents inactive connections from monopolizing
677 * data socket and if we drop ActivityManager connection it will
678 * immediately reconnect.
679 */
680 for (int i = 0; i < MAX_DATA_CONN; i++) {
681 ctrl_data_close(i);
682 }
683 free_dscock_idx = 0;
Todd Poynor3948f802013-07-09 19:35:14 -0700684 }
685
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800686 data_sock[free_dscock_idx].sock = accept(ctrl_sock.sock, NULL, NULL);
687 if (data_sock[free_dscock_idx].sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700688 ALOGE("lmkd control socket accept failed; errno=%d", errno);
689 return;
690 }
691
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800692 ALOGI("lmkd data connection established");
693 /* use data to store data connection idx */
694 data_sock[free_dscock_idx].handler_info.data = free_dscock_idx;
695 data_sock[free_dscock_idx].handler_info.handler = ctrl_data_handler;
Todd Poynor3948f802013-07-09 19:35:14 -0700696 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800697 epev.data.ptr = (void *)&(data_sock[free_dscock_idx].handler_info);
698 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, data_sock[free_dscock_idx].sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -0700699 ALOGE("epoll_ctl for data connection socket failed; errno=%d", errno);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800700 ctrl_data_close(free_dscock_idx);
Todd Poynor3948f802013-07-09 19:35:14 -0700701 return;
702 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800703 maxevents++;
Todd Poynor3948f802013-07-09 19:35:14 -0700704}
705
Rajeev Kumar70450032018-01-31 17:54:56 -0800706#ifdef LMKD_LOG_STATS
Rajeev Kumar244ace62018-10-05 12:34:59 -0700707static void memory_stat_parse_line(char* line, struct memory_stat* mem_st) {
Greg Kaiserd6d84712018-03-23 14:16:12 -0700708 char key[LINE_MAX + 1];
Rajeev Kumar70450032018-01-31 17:54:56 -0800709 int64_t value;
710
Greg Kaiserd6d84712018-03-23 14:16:12 -0700711 sscanf(line, "%" STRINGIFY(LINE_MAX) "s %" SCNd64 "", key, &value);
Rajeev Kumar70450032018-01-31 17:54:56 -0800712
713 if (strcmp(key, "total_") < 0) {
714 return;
715 }
716
717 if (!strcmp(key, "total_pgfault"))
718 mem_st->pgfault = value;
719 else if (!strcmp(key, "total_pgmajfault"))
720 mem_st->pgmajfault = value;
721 else if (!strcmp(key, "total_rss"))
722 mem_st->rss_in_bytes = value;
723 else if (!strcmp(key, "total_cache"))
724 mem_st->cache_in_bytes = value;
725 else if (!strcmp(key, "total_swap"))
726 mem_st->swap_in_bytes = value;
727}
728
Rajeev Kumar244ace62018-10-05 12:34:59 -0700729static int memory_stat_from_cgroup(struct memory_stat* mem_st, int pid, uid_t uid) {
730 FILE* fp;
731 char buf[PATH_MAX];
Rajeev Kumar70450032018-01-31 17:54:56 -0800732
Rajeev Kumar244ace62018-10-05 12:34:59 -0700733 snprintf(buf, sizeof(buf), MEMCG_PROCESS_MEMORY_STAT_PATH, uid, pid);
Rajeev Kumar70450032018-01-31 17:54:56 -0800734
Rajeev Kumar244ace62018-10-05 12:34:59 -0700735 fp = fopen(buf, "r");
Rajeev Kumar70450032018-01-31 17:54:56 -0800736
Rajeev Kumar244ace62018-10-05 12:34:59 -0700737 if (fp == NULL) {
738 ALOGE("%s open failed: %s", buf, strerror(errno));
739 return -1;
740 }
Rajeev Kumar70450032018-01-31 17:54:56 -0800741
Rajeev Kumar244ace62018-10-05 12:34:59 -0700742 while (fgets(buf, PAGE_SIZE, fp) != NULL) {
743 memory_stat_parse_line(buf, mem_st);
744 }
745 fclose(fp);
Rajeev Kumar70450032018-01-31 17:54:56 -0800746
Rajeev Kumar244ace62018-10-05 12:34:59 -0700747 return 0;
748}
749
750static int memory_stat_from_procfs(struct memory_stat* mem_st, int pid) {
751 char path[PATH_MAX];
752 char buffer[PROC_STAT_BUFFER_SIZE];
753 int fd, ret;
754
755 snprintf(path, sizeof(path), PROC_STAT_FILE_PATH, pid);
756 if ((fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
757 ALOGE("%s open failed: %s", path, strerror(errno));
758 return -1;
759 }
760
761 ret = read(fd, buffer, sizeof(buffer));
762 if (ret < 0) {
763 ALOGE("%s read failed: %s", path, strerror(errno));
764 close(fd);
765 return -1;
766 }
767 close(fd);
768
769 // field 10 is pgfault
770 // field 12 is pgmajfault
771 // field 24 is rss_in_pages
772 int64_t pgfault = 0, pgmajfault = 0, rss_in_pages = 0;
773 if (sscanf(buffer,
774 "%*u %*s %*s %*d %*d %*d %*d %*d %*d %" SCNd64 " %*d "
775 "%" SCNd64 " %*d %*u %*u %*d %*d %*d %*d %*d %*d "
776 "%*d %*d %" SCNd64 "",
777 &pgfault, &pgmajfault, &rss_in_pages) != 3) {
778 return -1;
779 }
780 mem_st->pgfault = pgfault;
781 mem_st->pgmajfault = pgmajfault;
782 mem_st->rss_in_bytes = (rss_in_pages * PAGE_SIZE);
783
784 return 0;
Rajeev Kumar70450032018-01-31 17:54:56 -0800785}
786#endif
787
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700788/* /prop/zoneinfo parsing routines */
789static int64_t zoneinfo_parse_protection(char *cp) {
790 int64_t max = 0;
791 long long zoneval;
792 char *save_ptr;
793
794 for (cp = strtok_r(cp, "(), ", &save_ptr); cp;
795 cp = strtok_r(NULL, "), ", &save_ptr)) {
796 zoneval = strtoll(cp, &cp, 0);
797 if (zoneval > max) {
798 max = (zoneval > INT64_MAX) ? INT64_MAX : zoneval;
799 }
800 }
801
802 return max;
803}
804
805static bool zoneinfo_parse_line(char *line, union zoneinfo *zi) {
806 char *cp = line;
807 char *ap;
808 char *save_ptr;
809 int64_t val;
810 int field_idx;
811
812 cp = strtok_r(line, " ", &save_ptr);
813 if (!cp) {
814 return true;
815 }
816
817 if (!strcmp(cp, "protection:")) {
818 ap = strtok_r(NULL, ")", &save_ptr);
819 } else {
820 ap = strtok_r(NULL, " ", &save_ptr);
821 }
822
823 if (!ap) {
824 return true;
825 }
826
827 switch (match_field(cp, ap, zoneinfo_field_names,
828 ZI_FIELD_COUNT, &val, &field_idx)) {
829 case (PARSE_SUCCESS):
830 zi->arr[field_idx] += val;
831 break;
832 case (NO_MATCH):
833 if (!strcmp(cp, "protection:")) {
834 zi->field.totalreserve_pages +=
835 zoneinfo_parse_protection(ap);
836 }
837 break;
838 case (PARSE_FAIL):
839 default:
840 return false;
841 }
842 return true;
843}
844
845static int zoneinfo_parse(union zoneinfo *zi) {
846 static struct reread_data file_data = {
847 .filename = ZONEINFO_PATH,
848 .fd = -1,
849 };
850 char buf[PAGE_SIZE];
851 char *save_ptr;
852 char *line;
853
854 memset(zi, 0, sizeof(union zoneinfo));
855
856 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
857 return -1;
858 }
859
860 for (line = strtok_r(buf, "\n", &save_ptr); line;
861 line = strtok_r(NULL, "\n", &save_ptr)) {
862 if (!zoneinfo_parse_line(line, zi)) {
863 ALOGE("%s parse error", file_data.filename);
864 return -1;
865 }
866 }
867 zi->field.totalreserve_pages += zi->field.high;
868
869 return 0;
870}
871
872/* /prop/meminfo parsing routines */
873static bool meminfo_parse_line(char *line, union meminfo *mi) {
874 char *cp = line;
875 char *ap;
876 char *save_ptr;
877 int64_t val;
878 int field_idx;
879 enum field_match_result match_res;
880
881 cp = strtok_r(line, " ", &save_ptr);
882 if (!cp) {
883 return false;
884 }
885
886 ap = strtok_r(NULL, " ", &save_ptr);
887 if (!ap) {
888 return false;
889 }
890
891 match_res = match_field(cp, ap, meminfo_field_names, MI_FIELD_COUNT,
892 &val, &field_idx);
893 if (match_res == PARSE_SUCCESS) {
894 mi->arr[field_idx] = val / page_k;
895 }
896 return (match_res != PARSE_FAIL);
897}
898
899static int meminfo_parse(union meminfo *mi) {
900 static struct reread_data file_data = {
901 .filename = MEMINFO_PATH,
902 .fd = -1,
903 };
904 char buf[PAGE_SIZE];
905 char *save_ptr;
906 char *line;
907
908 memset(mi, 0, sizeof(union meminfo));
909
910 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
911 return -1;
912 }
913
914 for (line = strtok_r(buf, "\n", &save_ptr); line;
915 line = strtok_r(NULL, "\n", &save_ptr)) {
916 if (!meminfo_parse_line(line, mi)) {
917 ALOGE("%s parse error", file_data.filename);
918 return -1;
919 }
920 }
921 mi->field.nr_file_pages = mi->field.cached + mi->field.swap_cached +
922 mi->field.buffers;
923
924 return 0;
925}
926
Todd Poynor3948f802013-07-09 19:35:14 -0700927static int proc_get_size(int pid) {
928 char path[PATH_MAX];
929 char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700930 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700931 int rss = 0;
932 int total;
Colin Crossce85d952014-07-11 17:53:27 -0700933 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700934
935 snprintf(path, PATH_MAX, "/proc/%d/statm", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800936 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700937 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700938 return -1;
Colin Crossce85d952014-07-11 17:53:27 -0700939
940 ret = read_all(fd, line, sizeof(line) - 1);
941 if (ret < 0) {
942 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700943 return -1;
944 }
945
946 sscanf(line, "%d %d ", &total, &rss);
Colin Crossce85d952014-07-11 17:53:27 -0700947 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700948 return rss;
949}
950
951static char *proc_get_name(int pid) {
952 char path[PATH_MAX];
953 static char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700954 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700955 char *cp;
Colin Crossce85d952014-07-11 17:53:27 -0700956 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700957
958 snprintf(path, PATH_MAX, "/proc/%d/cmdline", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800959 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700960 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700961 return NULL;
Colin Crossce85d952014-07-11 17:53:27 -0700962 ret = read_all(fd, line, sizeof(line) - 1);
963 close(fd);
964 if (ret < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700965 return NULL;
966 }
967
968 cp = strchr(line, ' ');
969 if (cp)
970 *cp = '\0';
971
972 return line;
973}
974
975static struct proc *proc_adj_lru(int oomadj) {
976 return (struct proc *)adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
977}
978
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800979static struct proc *proc_get_heaviest(int oomadj) {
980 struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
981 struct adjslot_list *curr = head->next;
982 struct proc *maxprocp = NULL;
983 int maxsize = 0;
984 while (curr != head) {
985 int pid = ((struct proc *)curr)->pid;
986 int tasksize = proc_get_size(pid);
987 if (tasksize <= 0) {
988 struct adjslot_list *next = curr->next;
989 pid_remove(pid);
990 curr = next;
991 } else {
992 if (tasksize > maxsize) {
993 maxsize = tasksize;
994 maxprocp = (struct proc *)curr;
995 }
996 curr = curr->next;
997 }
998 }
999 return maxprocp;
1000}
1001
Colin Cross16b09462014-07-14 12:39:56 -07001002/* Kill one process specified by procp. Returns the size of the process killed */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001003static int kill_one_process(struct proc* procp) {
Colin Cross16b09462014-07-14 12:39:56 -07001004 int pid = procp->pid;
1005 uid_t uid = procp->uid;
1006 char *taskname;
1007 int tasksize;
1008 int r;
Rajeev Kumar244ace62018-10-05 12:34:59 -07001009 int result = -1;
Colin Cross16b09462014-07-14 12:39:56 -07001010
Rajeev Kumar70450032018-01-31 17:54:56 -08001011#ifdef LMKD_LOG_STATS
Rajeev Kumar92b659b2018-02-21 19:08:15 -08001012 struct memory_stat mem_st = {};
Rajeev Kumar70450032018-01-31 17:54:56 -08001013 int memory_stat_parse_result = -1;
1014#endif
1015
Colin Cross16b09462014-07-14 12:39:56 -07001016 taskname = proc_get_name(pid);
1017 if (!taskname) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001018 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001019 }
1020
1021 tasksize = proc_get_size(pid);
1022 if (tasksize <= 0) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001023 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001024 }
1025
Rajeev Kumar70450032018-01-31 17:54:56 -08001026#ifdef LMKD_LOG_STATS
1027 if (enable_stats_log) {
Rajeev Kumar244ace62018-10-05 12:34:59 -07001028 if (per_app_memcg) {
1029 memory_stat_parse_result = memory_stat_from_cgroup(&mem_st, pid, uid);
1030 } else {
1031 memory_stat_parse_result = memory_stat_from_procfs(&mem_st, pid);
1032 }
Rajeev Kumar70450032018-01-31 17:54:56 -08001033 }
1034#endif
1035
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001036 TRACE_KILL_START(pid);
1037
Rajeev Kumar244ace62018-10-05 12:34:59 -07001038 /* CAP_KILL required */
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001039 r = kill(pid, SIGKILL);
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001040 ALOGI("Kill '%s' (%d), uid %d, oom_adj %d to free %ldkB",
1041 taskname, pid, uid, procp->oomadj, tasksize * page_k);
Colin Cross16b09462014-07-14 12:39:56 -07001042
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001043 TRACE_KILL_END();
1044
Colin Cross16b09462014-07-14 12:39:56 -07001045 if (r) {
Mark Salyzyn919f5382018-02-04 15:27:23 -08001046 ALOGE("kill(%d): errno=%d", pid, errno);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001047 goto out;
Rajeev Kumar70450032018-01-31 17:54:56 -08001048 } else {
1049#ifdef LMKD_LOG_STATS
1050 if (memory_stat_parse_result == 0) {
1051 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname,
1052 procp->oomadj, mem_st.pgfault, mem_st.pgmajfault, mem_st.rss_in_bytes,
1053 mem_st.cache_in_bytes, mem_st.swap_in_bytes);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001054 } else if (enable_stats_log) {
1055 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname, procp->oomadj,
1056 -1, -1, tasksize * BYTES_IN_KILOBYTE, -1, -1);
Rajeev Kumar70450032018-01-31 17:54:56 -08001057 }
1058#endif
Rajeev Kumar244ace62018-10-05 12:34:59 -07001059 result = tasksize;
Colin Cross16b09462014-07-14 12:39:56 -07001060 }
Mark Salyzyn919f5382018-02-04 15:27:23 -08001061
Rajeev Kumar244ace62018-10-05 12:34:59 -07001062out:
1063 /*
1064 * WARNING: After pid_remove() procp is freed and can't be used!
1065 * Therefore placed at the end of the function.
1066 */
1067 pid_remove(pid);
1068 return result;
Colin Cross16b09462014-07-14 12:39:56 -07001069}
1070
1071/*
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001072 * Find processes to kill to free required number of pages.
1073 * If pages_to_free is set to 0 only one process will be killed.
1074 * Returns the size of the killed processes.
Colin Cross16b09462014-07-14 12:39:56 -07001075 */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001076static int find_and_kill_processes(int min_score_adj, int pages_to_free) {
Colin Cross16b09462014-07-14 12:39:56 -07001077 int i;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001078 int killed_size;
1079 int pages_freed = 0;
Colin Cross16b09462014-07-14 12:39:56 -07001080
Rajeev Kumar70450032018-01-31 17:54:56 -08001081#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001082 bool lmk_state_change_start = false;
Rajeev Kumar70450032018-01-31 17:54:56 -08001083#endif
1084
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001085 for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) {
Colin Cross16b09462014-07-14 12:39:56 -07001086 struct proc *procp;
1087
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001088 while (true) {
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001089 procp = kill_heaviest_task ?
1090 proc_get_heaviest(i) : proc_adj_lru(i);
Colin Cross16b09462014-07-14 12:39:56 -07001091
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001092 if (!procp)
1093 break;
1094
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001095 killed_size = kill_one_process(procp);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001096 if (killed_size >= 0) {
Yang Lu5564f4e2018-05-15 04:59:44 +00001097#ifdef LMKD_LOG_STATS
1098 if (enable_stats_log && !lmk_state_change_start) {
1099 lmk_state_change_start = true;
1100 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1101 LMK_STATE_CHANGE_START);
1102 }
1103#endif
1104
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001105 pages_freed += killed_size;
1106 if (pages_freed >= pages_to_free) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001107
1108#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001109 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001110 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1111 LMK_STATE_CHANGE_STOP);
1112 }
1113#endif
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001114 return pages_freed;
1115 }
Colin Cross16b09462014-07-14 12:39:56 -07001116 }
1117 }
1118 }
1119
Rajeev Kumar70450032018-01-31 17:54:56 -08001120#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001121 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001122 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED, LMK_STATE_CHANGE_STOP);
1123 }
1124#endif
1125
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001126 return pages_freed;
Colin Cross16b09462014-07-14 12:39:56 -07001127}
1128
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001129static int64_t get_memory_usage(struct reread_data *file_data) {
Robert Beneac47f2992017-08-21 15:18:31 -07001130 int ret;
1131 int64_t mem_usage;
1132 char buf[32];
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001133
1134 if (reread_file(file_data, buf, sizeof(buf)) < 0) {
Robert Beneac47f2992017-08-21 15:18:31 -07001135 return -1;
1136 }
1137
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001138 if (!parse_int64(buf, &mem_usage)) {
1139 ALOGE("%s parse error", file_data->filename);
Robert Beneac47f2992017-08-21 15:18:31 -07001140 return -1;
1141 }
Robert Beneac47f2992017-08-21 15:18:31 -07001142 if (mem_usage == 0) {
1143 ALOGE("No memory!");
1144 return -1;
1145 }
1146 return mem_usage;
1147}
1148
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001149void record_low_pressure_levels(union meminfo *mi) {
1150 if (low_pressure_mem.min_nr_free_pages == -1 ||
1151 low_pressure_mem.min_nr_free_pages > mi->field.nr_free_pages) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001152 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001153 ALOGI("Low pressure min memory update from %" PRId64 " to %" PRId64,
1154 low_pressure_mem.min_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001155 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001156 low_pressure_mem.min_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001157 }
1158 /*
1159 * Free memory at low vmpressure events occasionally gets spikes,
1160 * possibly a stale low vmpressure event with memory already
1161 * freed up (no memory pressure should have been reported).
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001162 * Ignore large jumps in max_nr_free_pages that would mess up our stats.
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001163 */
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001164 if (low_pressure_mem.max_nr_free_pages == -1 ||
1165 (low_pressure_mem.max_nr_free_pages < mi->field.nr_free_pages &&
1166 mi->field.nr_free_pages - low_pressure_mem.max_nr_free_pages <
1167 low_pressure_mem.max_nr_free_pages * 0.1)) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001168 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001169 ALOGI("Low pressure max memory update from %" PRId64 " to %" PRId64,
1170 low_pressure_mem.max_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001171 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001172 low_pressure_mem.max_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001173 }
1174}
1175
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001176enum vmpressure_level upgrade_level(enum vmpressure_level level) {
1177 return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ?
1178 level + 1 : level);
1179}
1180
1181enum vmpressure_level downgrade_level(enum vmpressure_level level) {
1182 return (enum vmpressure_level)((level > VMPRESS_LEVEL_LOW) ?
1183 level - 1 : level);
1184}
1185
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001186static inline unsigned long get_time_diff_ms(struct timeval *from,
1187 struct timeval *to) {
1188 return (to->tv_sec - from->tv_sec) * 1000 +
1189 (to->tv_usec - from->tv_usec) / 1000;
1190}
1191
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001192static void mp_event_common(int data, uint32_t events __unused) {
Todd Poynor3948f802013-07-09 19:35:14 -07001193 int ret;
1194 unsigned long long evcount;
Robert Beneac47f2992017-08-21 15:18:31 -07001195 int64_t mem_usage, memsw_usage;
Robert Benea6e8e7102017-09-13 15:20:30 -07001196 int64_t mem_pressure;
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001197 enum vmpressure_level lvl;
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001198 union meminfo mi;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001199 union zoneinfo zi;
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001200 struct timeval curr_tm;
1201 static struct timeval last_kill_tm;
1202 static unsigned long kill_skip_count = 0;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001203 enum vmpressure_level level = (enum vmpressure_level)data;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001204 long other_free = 0, other_file = 0;
1205 int min_score_adj;
1206 int pages_to_free = 0;
1207 int minfree = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001208 static struct reread_data mem_usage_file_data = {
1209 .filename = MEMCG_MEMORY_USAGE,
1210 .fd = -1,
1211 };
1212 static struct reread_data memsw_usage_file_data = {
1213 .filename = MEMCG_MEMORYSW_USAGE,
1214 .fd = -1,
1215 };
Todd Poynor3948f802013-07-09 19:35:14 -07001216
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001217 /*
1218 * Check all event counters from low to critical
1219 * and upgrade to the highest priority one. By reading
1220 * eventfd we also reset the event counters.
1221 */
1222 for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
1223 if (mpevfd[lvl] != -1 &&
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001224 TEMP_FAILURE_RETRY(read(mpevfd[lvl],
1225 &evcount, sizeof(evcount))) > 0 &&
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001226 evcount > 0 && lvl > level) {
1227 level = lvl;
1228 }
1229 }
Todd Poynor3948f802013-07-09 19:35:14 -07001230
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001231 gettimeofday(&curr_tm, NULL);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001232 if (kill_timeout_ms) {
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001233 if (get_time_diff_ms(&last_kill_tm, &curr_tm) < kill_timeout_ms) {
1234 kill_skip_count++;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001235 return;
1236 }
1237 }
1238
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001239 if (kill_skip_count > 0) {
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001240 ALOGI("%lu memory pressure events were skipped after a kill!",
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001241 kill_skip_count);
1242 kill_skip_count = 0;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001243 }
1244
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001245 if (meminfo_parse(&mi) < 0 || zoneinfo_parse(&zi) < 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001246 ALOGE("Failed to get free memory!");
1247 return;
1248 }
1249
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001250 if (use_minfree_levels) {
1251 int i;
1252
1253 other_free = mi.field.nr_free_pages - zi.field.totalreserve_pages;
1254 if (mi.field.nr_file_pages > (mi.field.shmem + mi.field.unevictable + mi.field.swap_cached)) {
1255 other_file = (mi.field.nr_file_pages - mi.field.shmem -
1256 mi.field.unevictable - mi.field.swap_cached);
1257 } else {
1258 other_file = 0;
1259 }
1260
1261 min_score_adj = OOM_SCORE_ADJ_MAX + 1;
1262 for (i = 0; i < lowmem_targets_size; i++) {
1263 minfree = lowmem_minfree[i];
1264 if (other_free < minfree && other_file < minfree) {
1265 min_score_adj = lowmem_adj[i];
1266 break;
1267 }
1268 }
1269
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001270 if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
1271 if (debug_process_killing) {
1272 ALOGI("Ignore %s memory pressure event "
1273 "(free memory=%ldkB, cache=%ldkB, limit=%ldkB)",
1274 level_name[level], other_free * page_k, other_file * page_k,
1275 (long)lowmem_minfree[lowmem_targets_size - 1] * page_k);
1276 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001277 return;
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001278 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001279
1280 /* Free up enough pages to push over the highest minfree level */
1281 pages_to_free = lowmem_minfree[lowmem_targets_size - 1] -
1282 ((other_free < other_file) ? other_free : other_file);
1283 goto do_kill;
1284 }
1285
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001286 if (level == VMPRESS_LEVEL_LOW) {
1287 record_low_pressure_levels(&mi);
1288 }
1289
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001290 if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) {
1291 /* Do not monitor this pressure level */
1292 return;
1293 }
1294
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001295 if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
1296 goto do_kill;
1297 }
1298 if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001299 goto do_kill;
Robert Benea6e8e7102017-09-13 15:20:30 -07001300 }
Robert Beneac47f2992017-08-21 15:18:31 -07001301
Robert Benea6e8e7102017-09-13 15:20:30 -07001302 // Calculate percent for swappinness.
1303 mem_pressure = (mem_usage * 100) / memsw_usage;
1304
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001305 if (enable_pressure_upgrade && level != VMPRESS_LEVEL_CRITICAL) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001306 // We are swapping too much.
1307 if (mem_pressure < upgrade_pressure) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001308 level = upgrade_level(level);
1309 if (debug_process_killing) {
1310 ALOGI("Event upgraded to %s", level_name[level]);
1311 }
Robert Beneac47f2992017-08-21 15:18:31 -07001312 }
1313 }
1314
Robert Benea6e8e7102017-09-13 15:20:30 -07001315 // If the pressure is larger than downgrade_pressure lmk will not
1316 // kill any process, since enough memory is available.
1317 if (mem_pressure > downgrade_pressure) {
1318 if (debug_process_killing) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001319 ALOGI("Ignore %s memory pressure", level_name[level]);
Robert Benea6e8e7102017-09-13 15:20:30 -07001320 }
1321 return;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001322 } else if (level == VMPRESS_LEVEL_CRITICAL &&
1323 mem_pressure > upgrade_pressure) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001324 if (debug_process_killing) {
1325 ALOGI("Downgrade critical memory pressure");
1326 }
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001327 // Downgrade event, since enough memory available.
1328 level = downgrade_level(level);
Robert Benea6e8e7102017-09-13 15:20:30 -07001329 }
1330
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001331do_kill:
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001332 if (low_ram_device) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001333 /* For Go devices kill only one task */
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001334 if (find_and_kill_processes(level_oomadj[level], 0) == 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001335 if (debug_process_killing) {
1336 ALOGI("Nothing to kill");
1337 }
1338 }
1339 } else {
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001340 int pages_freed;
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001341 static struct timeval last_report_tm;
1342 static unsigned long report_skip_count = 0;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001343
1344 if (!use_minfree_levels) {
1345 /* If pressure level is less than critical and enough free swap then ignore */
1346 if (level < VMPRESS_LEVEL_CRITICAL &&
1347 mi.field.free_swap > low_pressure_mem.max_nr_free_pages) {
1348 if (debug_process_killing) {
1349 ALOGI("Ignoring pressure since %" PRId64
1350 " swap pages are available ",
1351 mi.field.free_swap);
1352 }
1353 return;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001354 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001355 /* Free up enough memory to downgrate the memory pressure to low level */
1356 if (mi.field.nr_free_pages < low_pressure_mem.max_nr_free_pages) {
1357 pages_to_free = low_pressure_mem.max_nr_free_pages -
1358 mi.field.nr_free_pages;
1359 } else {
1360 if (debug_process_killing) {
1361 ALOGI("Ignoring pressure since more memory is "
1362 "available (%" PRId64 ") than watermark (%" PRId64 ")",
1363 mi.field.nr_free_pages, low_pressure_mem.max_nr_free_pages);
1364 }
1365 return;
1366 }
1367 min_score_adj = level_oomadj[level];
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001368 }
1369
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001370 pages_freed = find_and_kill_processes(min_score_adj, pages_to_free);
1371
1372 if (pages_freed == 0) {
1373 /* Rate limit kill reports when nothing was reclaimed */
1374 if (get_time_diff_ms(&last_report_tm, &curr_tm) < FAIL_REPORT_RLIMIT_MS) {
1375 report_skip_count++;
1376 return;
1377 }
1378 }
1379
1380 if (pages_freed >= pages_to_free) {
1381 /* Reset kill time only if reclaimed enough memory */
1382 last_kill_tm = curr_tm;
1383 }
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001384
1385 if (use_minfree_levels) {
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001386 ALOGI("Killing to reclaim %ldkB, reclaimed %ldkB, cache(%ldkB) and "
1387 "free(%" PRId64 "kB)-reserved(%" PRId64 "kB) below min(%ldkB) for oom_adj %d",
1388 pages_to_free * page_k, pages_freed * page_k,
1389 other_file * page_k, mi.field.nr_free_pages * page_k,
1390 zi.field.totalreserve_pages * page_k,
1391 minfree * page_k, min_score_adj);
1392 } else {
1393 ALOGI("Killing to reclaim %ldkB, reclaimed %ldkB at oom_adj %d",
1394 pages_to_free * page_k, pages_freed * page_k, min_score_adj);
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001395 }
1396
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001397 if (report_skip_count > 0) {
1398 ALOGI("Suppressed %lu failed kill reports", report_skip_count);
1399 report_skip_count = 0;
Robert Beneacaeaa652017-08-11 16:03:20 -07001400 }
Suren Baghdasaryand6cbf3f2018-09-05 15:46:32 -07001401
1402 last_report_tm = curr_tm;
Colin Crossf8857cc2014-07-11 17:16:56 -07001403 }
Todd Poynor3948f802013-07-09 19:35:14 -07001404}
1405
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001406static bool init_mp_common(enum vmpressure_level level) {
Todd Poynor3948f802013-07-09 19:35:14 -07001407 int mpfd;
1408 int evfd;
1409 int evctlfd;
1410 char buf[256];
1411 struct epoll_event epev;
1412 int ret;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001413 int level_idx = (int)level;
1414 const char *levelstr = level_name[level_idx];
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001415
Nick Kralevichc68c8862015-12-18 20:52:37 -08001416 mpfd = open(MEMCG_SYSFS_PATH "memory.pressure_level", O_RDONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001417 if (mpfd < 0) {
1418 ALOGI("No kernel memory.pressure_level support (errno=%d)", errno);
1419 goto err_open_mpfd;
1420 }
1421
Nick Kralevichc68c8862015-12-18 20:52:37 -08001422 evctlfd = open(MEMCG_SYSFS_PATH "cgroup.event_control", O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001423 if (evctlfd < 0) {
1424 ALOGI("No kernel memory cgroup event control (errno=%d)", errno);
1425 goto err_open_evctlfd;
1426 }
1427
Nick Kralevichc68c8862015-12-18 20:52:37 -08001428 evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001429 if (evfd < 0) {
1430 ALOGE("eventfd failed for level %s; errno=%d", levelstr, errno);
1431 goto err_eventfd;
1432 }
1433
1434 ret = snprintf(buf, sizeof(buf), "%d %d %s", evfd, mpfd, levelstr);
1435 if (ret >= (ssize_t)sizeof(buf)) {
1436 ALOGE("cgroup.event_control line overflow for level %s", levelstr);
1437 goto err;
1438 }
1439
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001440 ret = TEMP_FAILURE_RETRY(write(evctlfd, buf, strlen(buf) + 1));
Todd Poynor3948f802013-07-09 19:35:14 -07001441 if (ret == -1) {
1442 ALOGE("cgroup.event_control write failed for level %s; errno=%d",
1443 levelstr, errno);
1444 goto err;
1445 }
1446
1447 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001448 /* use data to store event level */
1449 vmpressure_hinfo[level_idx].data = level_idx;
1450 vmpressure_hinfo[level_idx].handler = mp_event_common;
1451 epev.data.ptr = (void *)&vmpressure_hinfo[level_idx];
Todd Poynor3948f802013-07-09 19:35:14 -07001452 ret = epoll_ctl(epollfd, EPOLL_CTL_ADD, evfd, &epev);
1453 if (ret == -1) {
1454 ALOGE("epoll_ctl for level %s failed; errno=%d", levelstr, errno);
1455 goto err;
1456 }
1457 maxevents++;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001458 mpevfd[level] = evfd;
Suren Baghdasaryan1bd2fc42018-01-04 08:54:53 -08001459 close(evctlfd);
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001460 return true;
Todd Poynor3948f802013-07-09 19:35:14 -07001461
1462err:
1463 close(evfd);
1464err_eventfd:
1465 close(evctlfd);
1466err_open_evctlfd:
1467 close(mpfd);
1468err_open_mpfd:
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001469 return false;
Robert Benea673e2762017-06-01 16:32:31 -07001470}
1471
Todd Poynor3948f802013-07-09 19:35:14 -07001472static int init(void) {
1473 struct epoll_event epev;
1474 int i;
1475 int ret;
1476
1477 page_k = sysconf(_SC_PAGESIZE);
1478 if (page_k == -1)
1479 page_k = PAGE_SIZE;
1480 page_k /= 1024;
1481
1482 epollfd = epoll_create(MAX_EPOLL_EVENTS);
1483 if (epollfd == -1) {
1484 ALOGE("epoll_create failed (errno=%d)", errno);
1485 return -1;
1486 }
1487
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001488 // mark data connections as not connected
1489 for (int i = 0; i < MAX_DATA_CONN; i++) {
1490 data_sock[i].sock = -1;
1491 }
1492
1493 ctrl_sock.sock = android_get_control_socket("lmkd");
1494 if (ctrl_sock.sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001495 ALOGE("get lmkd control socket failed");
1496 return -1;
1497 }
1498
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001499 ret = listen(ctrl_sock.sock, MAX_DATA_CONN);
Todd Poynor3948f802013-07-09 19:35:14 -07001500 if (ret < 0) {
1501 ALOGE("lmkd control socket listen failed (errno=%d)", errno);
1502 return -1;
1503 }
1504
1505 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001506 ctrl_sock.handler_info.handler = ctrl_connect_handler;
1507 epev.data.ptr = (void *)&(ctrl_sock.handler_info);
1508 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ctrl_sock.sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07001509 ALOGE("epoll_ctl for lmkd control socket failed (errno=%d)", errno);
1510 return -1;
1511 }
1512 maxevents++;
1513
Robert Benea164baeb2017-09-11 16:53:28 -07001514 has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK);
Suren Baghdasaryan979591b2018-01-18 17:27:30 -08001515 use_inkernel_interface = has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -07001516
1517 if (use_inkernel_interface) {
1518 ALOGI("Using in-kernel low memory killer interface");
1519 } else {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001520 if (!init_mp_common(VMPRESS_LEVEL_LOW) ||
1521 !init_mp_common(VMPRESS_LEVEL_MEDIUM) ||
1522 !init_mp_common(VMPRESS_LEVEL_CRITICAL)) {
Todd Poynor3948f802013-07-09 19:35:14 -07001523 ALOGE("Kernel does not support memory pressure events or in-kernel low memory killer");
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001524 return -1;
1525 }
Todd Poynor3948f802013-07-09 19:35:14 -07001526 }
1527
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001528 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
Todd Poynor3948f802013-07-09 19:35:14 -07001529 procadjslot_list[i].next = &procadjslot_list[i];
1530 procadjslot_list[i].prev = &procadjslot_list[i];
1531 }
1532
1533 return 0;
1534}
1535
1536static void mainloop(void) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001537 struct event_handler_info* handler_info;
1538 struct epoll_event *evt;
1539
Todd Poynor3948f802013-07-09 19:35:14 -07001540 while (1) {
1541 struct epoll_event events[maxevents];
1542 int nevents;
1543 int i;
1544
Todd Poynor3948f802013-07-09 19:35:14 -07001545 nevents = epoll_wait(epollfd, events, maxevents, -1);
1546
1547 if (nevents == -1) {
1548 if (errno == EINTR)
1549 continue;
1550 ALOGE("epoll_wait failed (errno=%d)", errno);
1551 continue;
1552 }
1553
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001554 /*
1555 * First pass to see if any data socket connections were dropped.
1556 * Dropped connection should be handled before any other events
1557 * to deallocate data connection and correctly handle cases when
1558 * connection gets dropped and reestablished in the same epoll cycle.
1559 * In such cases it's essential to handle connection closures first.
1560 */
1561 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1562 if ((evt->events & EPOLLHUP) && evt->data.ptr) {
1563 ALOGI("lmkd data connection dropped");
1564 handler_info = (struct event_handler_info*)evt->data.ptr;
1565 ctrl_data_close(handler_info->data);
1566 }
1567 }
1568
1569 /* Second pass to handle all other events */
1570 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1571 if (evt->events & EPOLLERR)
Todd Poynor3948f802013-07-09 19:35:14 -07001572 ALOGD("EPOLLERR on event #%d", i);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001573 if (evt->events & EPOLLHUP) {
1574 /* This case was handled in the first pass */
1575 continue;
1576 }
1577 if (evt->data.ptr) {
1578 handler_info = (struct event_handler_info*)evt->data.ptr;
1579 handler_info->handler(handler_info->data, evt->events);
1580 }
Todd Poynor3948f802013-07-09 19:35:14 -07001581 }
1582 }
1583}
1584
Mark Salyzyne6ed68b2014-04-30 13:36:35 -07001585int main(int argc __unused, char **argv __unused) {
Colin Cross1a0d9be2014-07-14 14:31:15 -07001586 struct sched_param param = {
1587 .sched_priority = 1,
1588 };
1589
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001590 /* By default disable low level vmpressure events */
1591 level_oomadj[VMPRESS_LEVEL_LOW] =
1592 property_get_int32("ro.lmk.low", OOM_SCORE_ADJ_MAX + 1);
1593 level_oomadj[VMPRESS_LEVEL_MEDIUM] =
1594 property_get_int32("ro.lmk.medium", 800);
1595 level_oomadj[VMPRESS_LEVEL_CRITICAL] =
1596 property_get_int32("ro.lmk.critical", 0);
Robert Beneacaeaa652017-08-11 16:03:20 -07001597 debug_process_killing = property_get_bool("ro.lmk.debug", false);
Suren Baghdasaryanad2fd912017-12-08 13:08:41 -08001598
1599 /* By default disable upgrade/downgrade logic */
1600 enable_pressure_upgrade =
1601 property_get_bool("ro.lmk.critical_upgrade", false);
1602 upgrade_pressure =
1603 (int64_t)property_get_int32("ro.lmk.upgrade_pressure", 100);
1604 downgrade_pressure =
1605 (int64_t)property_get_int32("ro.lmk.downgrade_pressure", 100);
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001606 kill_heaviest_task =
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001607 property_get_bool("ro.lmk.kill_heaviest_task", false);
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001608 low_ram_device = property_get_bool("ro.config.low_ram", false);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001609 kill_timeout_ms =
1610 (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001611 use_minfree_levels =
1612 property_get_bool("ro.lmk.use_minfree_levels", false);
Rajeev Kumar244ace62018-10-05 12:34:59 -07001613 per_app_memcg = property_get_bool("ro.config.per_app_memcg", low_ram_device);
Rajeev Kumar70450032018-01-31 17:54:56 -08001614#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001615 statslog_init(&log_ctx, &enable_stats_log);
Rajeev Kumar70450032018-01-31 17:54:56 -08001616#endif
1617
Daniel Colascioned39adf22018-01-05 14:59:55 -08001618 // MCL_ONFAULT pins pages as they fault instead of loading
1619 // everything immediately all at once. (Which would be bad,
1620 // because as of this writing, we have a lot of mapped pages we
1621 // never use.) Old kernels will see MCL_ONFAULT and fail with
1622 // EINVAL; we ignore this failure.
1623 //
1624 // N.B. read the man page for mlockall. MCL_CURRENT | MCL_ONFAULT
1625 // pins ⊆ MCL_CURRENT, converging to just MCL_CURRENT as we fault
1626 // in pages.
1627 if (mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT) && errno != EINVAL)
Daniel Colascione4dd5d002018-01-03 12:01:02 -08001628 ALOGW("mlockall failed: errno=%d", errno);
1629
Colin Cross1a0d9be2014-07-14 14:31:15 -07001630 sched_setscheduler(0, SCHED_FIFO, &param);
Todd Poynor3948f802013-07-09 19:35:14 -07001631 if (!init())
1632 mainloop();
1633
Rajeev Kumar70450032018-01-31 17:54:56 -08001634#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001635 statslog_destroy(&log_ctx);
Rajeev Kumar70450032018-01-31 17:54:56 -08001636#endif
1637
Todd Poynor3948f802013-07-09 19:35:14 -07001638 ALOGI("exiting");
1639 return 0;
1640}