blob: 04662fedae8f5a21ebfbc5042d6ccc2e263fa78a [file] [log] [blame]
Todd Poynor3948f802013-07-09 19:35:14 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "lowmemorykiller"
18
Wei Wang2d95c102018-11-21 00:11:44 -080019#include <dirent.h>
Todd Poynor3948f802013-07-09 19:35:14 -070020#include <errno.h>
Robert Beneac47f2992017-08-21 15:18:31 -070021#include <inttypes.h>
Suren Baghdasaryan4311d1e2018-03-20 16:03:29 -070022#include <pwd.h>
Mark Salyzyncfd5b082016-10-17 14:28:00 -070023#include <sched.h>
Todd Poynor3948f802013-07-09 19:35:14 -070024#include <signal.h>
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -070025#include <stdbool.h>
Todd Poynor3948f802013-07-09 19:35:14 -070026#include <stdlib.h>
27#include <string.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070028#include <sys/cdefs.h>
Todd Poynor3948f802013-07-09 19:35:14 -070029#include <sys/epoll.h>
30#include <sys/eventfd.h>
Colin Crossb28ff912014-07-11 17:15:44 -070031#include <sys/mman.h>
Wei Wang2d95c102018-11-21 00:11:44 -080032#include <sys/resource.h>
Todd Poynor3948f802013-07-09 19:35:14 -070033#include <sys/socket.h>
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -080034#include <sys/sysinfo.h>
Wei Wang2d95c102018-11-21 00:11:44 -080035#include <sys/time.h>
Mark Salyzyn721d7c72018-03-21 12:24:58 -070036#include <sys/types.h>
Suren Baghdasaryan314a5052018-07-24 17:13:06 -070037#include <time.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070038#include <unistd.h>
39
Robert Benea58891d52017-07-31 17:15:20 -070040#include <cutils/properties.h>
Wei Wang2d95c102018-11-21 00:11:44 -080041#include <cutils/sched_policy.h>
Todd Poynor3948f802013-07-09 19:35:14 -070042#include <cutils/sockets.h>
Suren Baghdasaryan0f100512018-01-24 16:51:41 -080043#include <lmkd.h>
Mark Salyzyn30f991f2017-01-10 13:19:54 -080044#include <log/log.h>
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -070045#include <log/log_event_list.h>
Suren Baghdasaryan314a5052018-07-24 17:13:06 -070046#include <log/log_time.h>
Suren Baghdasaryan77122e52019-01-08 12:54:48 -080047#include <psi/psi.h>
Wei Wang2d95c102018-11-21 00:11:44 -080048#include <system/thread_defs.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070049
Rajeev Kumar70450032018-01-31 17:54:56 -080050#ifdef LMKD_LOG_STATS
Yao Chen389aee12018-05-02 11:19:27 -070051#include "statslog.h"
Rajeev Kumar70450032018-01-31 17:54:56 -080052#endif
53
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080054/*
55 * Define LMKD_TRACE_KILLS to record lmkd kills in kernel traces
56 * to profile and correlate with OOM kills
57 */
58#ifdef LMKD_TRACE_KILLS
59
60#define ATRACE_TAG ATRACE_TAG_ALWAYS
61#include <cutils/trace.h>
62
63#define TRACE_KILL_START(pid) ATRACE_INT(__FUNCTION__, pid);
64#define TRACE_KILL_END() ATRACE_INT(__FUNCTION__, 0);
65
66#else /* LMKD_TRACE_KILLS */
67
Daniel Colascione347f6b42018-02-12 11:24:47 -080068#define TRACE_KILL_START(pid) ((void)(pid))
69#define TRACE_KILL_END() ((void)0)
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080070
71#endif /* LMKD_TRACE_KILLS */
72
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070073#ifndef __unused
74#define __unused __attribute__((__unused__))
75#endif
Todd Poynor3948f802013-07-09 19:35:14 -070076
77#define MEMCG_SYSFS_PATH "/dev/memcg/"
Robert Beneac47f2992017-08-21 15:18:31 -070078#define MEMCG_MEMORY_USAGE "/dev/memcg/memory.usage_in_bytes"
79#define MEMCG_MEMORYSW_USAGE "/dev/memcg/memory.memsw.usage_in_bytes"
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -070080#define ZONEINFO_PATH "/proc/zoneinfo"
81#define MEMINFO_PATH "/proc/meminfo"
Suren Baghdasaryan0082ef12019-07-02 15:52:07 -070082#define PROC_STATUS_TGID_FIELD "Tgid:"
Todd Poynor3948f802013-07-09 19:35:14 -070083#define LINE_MAX 128
84
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -070085/* Android Logger event logtags (see event.logtags) */
86#define MEMINFO_LOG_TAG 10195355
87
Mark Salyzyn64d97d82018-04-09 09:50:32 -070088/* gid containing AID_SYSTEM required */
Todd Poynor3948f802013-07-09 19:35:14 -070089#define INKERNEL_MINFREE_PATH "/sys/module/lowmemorykiller/parameters/minfree"
90#define INKERNEL_ADJ_PATH "/sys/module/lowmemorykiller/parameters/adj"
91
92#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
Robert Benea673e2762017-06-01 16:32:31 -070093#define EIGHT_MEGA (1 << 23)
Todd Poynor3948f802013-07-09 19:35:14 -070094
Suren Baghdasaryan314a5052018-07-24 17:13:06 -070095#define TARGET_UPDATE_MIN_INTERVAL_MS 1000
96
97#define NS_PER_MS (NS_PER_SEC / MS_PER_SEC)
Suren Baghdasaryan77122e52019-01-08 12:54:48 -080098#define US_PER_MS (US_PER_SEC / MS_PER_SEC)
Suren Baghdasaryan314a5052018-07-24 17:13:06 -070099
Suren Baghdasaryan4311d1e2018-03-20 16:03:29 -0700100/* Defined as ProcessList.SYSTEM_ADJ in ProcessList.java */
101#define SYSTEM_ADJ (-900)
102
Greg Kaiserf0da9b02018-03-23 14:16:12 -0700103#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
104#define STRINGIFY_INTERNAL(x) #x
105
Suren Baghdasaryan77122e52019-01-08 12:54:48 -0800106/*
107 * PSI monitor tracking window size.
108 * PSI monitor generates events at most once per window,
109 * therefore we poll memory state for the duration of
110 * PSI_WINDOW_SIZE_MS after the event happens.
111 */
112#define PSI_WINDOW_SIZE_MS 1000
113/* Polling period after initial PSI signal */
Suren Baghdasaryan5db6a842019-03-26 13:21:45 -0700114#define PSI_POLL_PERIOD_MS 10
Suren Baghdasaryan77122e52019-01-08 12:54:48 -0800115
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -0700116#define min(a, b) (((a) < (b)) ? (a) : (b))
117
Suren Baghdasaryan36934412018-09-05 15:46:32 -0700118#define FAIL_REPORT_RLIMIT_MS 1000
119
Todd Poynor3948f802013-07-09 19:35:14 -0700120/* default to old in-kernel interface if no memory pressure events */
Mark Salyzyn721d7c72018-03-21 12:24:58 -0700121static bool use_inkernel_interface = true;
Robert Benea164baeb2017-09-11 16:53:28 -0700122static bool has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -0700123
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800124/* memory pressure levels */
125enum vmpressure_level {
126 VMPRESS_LEVEL_LOW = 0,
127 VMPRESS_LEVEL_MEDIUM,
128 VMPRESS_LEVEL_CRITICAL,
129 VMPRESS_LEVEL_COUNT
130};
Todd Poynor3948f802013-07-09 19:35:14 -0700131
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800132static const char *level_name[] = {
133 "low",
134 "medium",
135 "critical"
136};
137
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800138struct {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -0700139 int64_t min_nr_free_pages; /* recorded but not used yet */
140 int64_t max_nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800141} low_pressure_mem = { -1, -1 };
142
Suren Baghdasaryan77122e52019-01-08 12:54:48 -0800143struct psi_threshold {
144 enum psi_stall_type stall_type;
145 int threshold_ms;
146};
147
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800148static int level_oomadj[VMPRESS_LEVEL_COUNT];
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -0800149static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 };
Robert Beneac47f2992017-08-21 15:18:31 -0700150static bool debug_process_killing;
151static bool enable_pressure_upgrade;
152static int64_t upgrade_pressure;
Robert Benea6e8e7102017-09-13 15:20:30 -0700153static int64_t downgrade_pressure;
Suren Baghdasaryanff61afb2018-04-13 11:45:38 -0700154static bool low_ram_device;
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800155static bool kill_heaviest_task;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -0800156static unsigned long kill_timeout_ms;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -0700157static bool use_minfree_levels;
Suren Baghdasaryance13cb52018-06-19 18:38:12 -0700158static bool per_app_memcg;
Vic Yang360a1132018-08-07 10:18:22 -0700159static int swap_free_low_percentage;
Suren Baghdasaryan77122e52019-01-08 12:54:48 -0800160static bool use_psi_monitors = false;
161static struct psi_threshold psi_thresholds[VMPRESS_LEVEL_COUNT] = {
162 { PSI_SOME, 70 }, /* 70ms out of 1sec for partial stall */
163 { PSI_SOME, 100 }, /* 100ms out of 1sec for partial stall */
164 { PSI_FULL, 70 }, /* 70ms out of 1sec for complete stall */
165};
Robert Benea58891d52017-07-31 17:15:20 -0700166
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -0700167static android_log_context ctx;
168
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -0700169enum polling_update {
170 POLLING_DO_NOT_CHANGE,
171 POLLING_START,
172 POLLING_STOP,
173};
174
175/*
176 * Data used for periodic polling for the memory state of the device.
177 * Note that when system is not polling poll_handler is set to NULL,
178 * when polling starts poll_handler gets set and is reset back to
179 * NULL when polling stops.
180 */
181struct polling_params {
182 struct event_handler_info* poll_handler;
183 struct timespec poll_start_tm;
184 struct timespec last_poll_tm;
185 int polling_interval_ms;
186 enum polling_update update;
187};
188
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800189/* data required to handle events */
190struct event_handler_info {
191 int data;
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -0700192 void (*handler)(int data, uint32_t events, struct polling_params *poll_params);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800193};
Todd Poynor3948f802013-07-09 19:35:14 -0700194
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800195/* data required to handle socket events */
196struct sock_event_handler_info {
197 int sock;
198 struct event_handler_info handler_info;
199};
200
201/* max supported number of data connections */
202#define MAX_DATA_CONN 2
203
204/* socket event handler data */
205static struct sock_event_handler_info ctrl_sock;
206static struct sock_event_handler_info data_sock[MAX_DATA_CONN];
207
208/* vmpressure event handler data */
209static struct event_handler_info vmpressure_hinfo[VMPRESS_LEVEL_COUNT];
210
Jim Blackler3947c932019-04-26 11:18:29 +0100211/* 3 memory pressure levels, 1 ctrl listen socket, 2 ctrl data socket, 1 lmk events */
212#define MAX_EPOLL_EVENTS (2 + MAX_DATA_CONN + VMPRESS_LEVEL_COUNT)
Todd Poynor3948f802013-07-09 19:35:14 -0700213static int epollfd;
214static int maxevents;
215
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700216/* OOM score values used by both kernel and framework */
Todd Poynor16b60992013-09-16 19:26:47 -0700217#define OOM_SCORE_ADJ_MIN (-1000)
218#define OOM_SCORE_ADJ_MAX 1000
219
Todd Poynor3948f802013-07-09 19:35:14 -0700220static int lowmem_adj[MAX_TARGETS];
221static int lowmem_minfree[MAX_TARGETS];
222static int lowmem_targets_size;
223
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700224/* Fields to parse in /proc/zoneinfo */
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700225/* zoneinfo per-zone fields */
226enum zoneinfo_zone_field {
227 ZI_ZONE_NR_FREE_PAGES = 0,
228 ZI_ZONE_MIN,
229 ZI_ZONE_LOW,
230 ZI_ZONE_HIGH,
231 ZI_ZONE_PRESENT,
232 ZI_ZONE_NR_FREE_CMA,
233 ZI_ZONE_FIELD_COUNT
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700234};
235
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700236static const char* const zoneinfo_zone_field_names[ZI_ZONE_FIELD_COUNT] = {
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700237 "nr_free_pages",
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700238 "min",
239 "low",
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700240 "high",
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700241 "present",
242 "nr_free_cma",
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700243};
244
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700245/* zoneinfo per-zone special fields */
246enum zoneinfo_zone_spec_field {
247 ZI_ZONE_SPEC_PROTECTION = 0,
248 ZI_ZONE_SPEC_PAGESETS,
249 ZI_ZONE_SPEC_FIELD_COUNT,
250};
251
252static const char* const zoneinfo_zone_spec_field_names[ZI_ZONE_SPEC_FIELD_COUNT] = {
253 "protection:",
254 "pagesets",
255};
256
257/* see __MAX_NR_ZONES definition in kernel mmzone.h */
258#define MAX_NR_ZONES 6
259
260union zoneinfo_zone_fields {
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700261 struct {
262 int64_t nr_free_pages;
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700263 int64_t min;
264 int64_t low;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700265 int64_t high;
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700266 int64_t present;
267 int64_t nr_free_cma;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700268 } field;
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700269 int64_t arr[ZI_ZONE_FIELD_COUNT];
270};
271
272struct zoneinfo_zone {
273 union zoneinfo_zone_fields fields;
274 int64_t protection[MAX_NR_ZONES];
275 int64_t max_protection;
276};
277
278/* zoneinfo per-node fields */
279enum zoneinfo_node_field {
280 ZI_NODE_NR_INACTIVE_FILE = 0,
281 ZI_NODE_NR_ACTIVE_FILE,
282 ZI_NODE_WORKINGSET_REFAULT,
283 ZI_NODE_FIELD_COUNT
284};
285
286static const char* const zoneinfo_node_field_names[ZI_NODE_FIELD_COUNT] = {
287 "nr_inactive_file",
288 "nr_active_file",
289 "workingset_refault",
290};
291
292union zoneinfo_node_fields {
293 struct {
294 int64_t nr_inactive_file;
295 int64_t nr_active_file;
296 int64_t workingset_refault;
297 } field;
298 int64_t arr[ZI_NODE_FIELD_COUNT];
299};
300
301struct zoneinfo_node {
302 int id;
303 int zone_count;
304 struct zoneinfo_zone zones[MAX_NR_ZONES];
305 union zoneinfo_node_fields fields;
306};
307
308/* for now two memory nodes is more than enough */
309#define MAX_NR_NODES 2
310
311struct zoneinfo {
312 int node_count;
313 struct zoneinfo_node nodes[MAX_NR_NODES];
314 int64_t totalreserve_pages;
315 int64_t total_inactive_file;
316 int64_t total_active_file;
317 int64_t total_workingset_refault;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700318};
319
320/* Fields to parse in /proc/meminfo */
321enum meminfo_field {
322 MI_NR_FREE_PAGES = 0,
323 MI_CACHED,
324 MI_SWAP_CACHED,
325 MI_BUFFERS,
326 MI_SHMEM,
327 MI_UNEVICTABLE,
Vic Yang360a1132018-08-07 10:18:22 -0700328 MI_TOTAL_SWAP,
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700329 MI_FREE_SWAP,
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -0700330 MI_ACTIVE_ANON,
331 MI_INACTIVE_ANON,
332 MI_ACTIVE_FILE,
333 MI_INACTIVE_FILE,
334 MI_SRECLAIMABLE,
335 MI_SUNRECLAIM,
336 MI_KERNEL_STACK,
337 MI_PAGE_TABLES,
338 MI_ION_HELP,
339 MI_ION_HELP_POOL,
340 MI_CMA_FREE,
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700341 MI_FIELD_COUNT
342};
343
344static const char* const meminfo_field_names[MI_FIELD_COUNT] = {
345 "MemFree:",
346 "Cached:",
347 "SwapCached:",
348 "Buffers:",
349 "Shmem:",
350 "Unevictable:",
Vic Yang360a1132018-08-07 10:18:22 -0700351 "SwapTotal:",
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700352 "SwapFree:",
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -0700353 "Active(anon):",
354 "Inactive(anon):",
355 "Active(file):",
356 "Inactive(file):",
357 "SReclaimable:",
358 "SUnreclaim:",
359 "KernelStack:",
360 "PageTables:",
361 "ION_heap:",
362 "ION_heap_pool:",
363 "CmaFree:",
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700364};
365
366union meminfo {
367 struct {
368 int64_t nr_free_pages;
369 int64_t cached;
370 int64_t swap_cached;
371 int64_t buffers;
372 int64_t shmem;
373 int64_t unevictable;
Vic Yang360a1132018-08-07 10:18:22 -0700374 int64_t total_swap;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700375 int64_t free_swap;
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -0700376 int64_t active_anon;
377 int64_t inactive_anon;
378 int64_t active_file;
379 int64_t inactive_file;
380 int64_t sreclaimable;
381 int64_t sunreclaimable;
382 int64_t kernel_stack;
383 int64_t page_tables;
384 int64_t ion_heap;
385 int64_t ion_heap_pool;
386 int64_t cma_free;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700387 /* fields below are calculated rather than read from the file */
388 int64_t nr_file_pages;
389 } field;
390 int64_t arr[MI_FIELD_COUNT];
391};
392
393enum field_match_result {
394 NO_MATCH,
395 PARSE_FAIL,
396 PARSE_SUCCESS
397};
398
Todd Poynor3948f802013-07-09 19:35:14 -0700399struct adjslot_list {
400 struct adjslot_list *next;
401 struct adjslot_list *prev;
402};
403
404struct proc {
405 struct adjslot_list asl;
406 int pid;
Colin Crossfbb78c62014-06-13 14:52:43 -0700407 uid_t uid;
Todd Poynor3948f802013-07-09 19:35:14 -0700408 int oomadj;
409 struct proc *pidhash_next;
410};
411
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700412struct reread_data {
413 const char* const filename;
414 int fd;
415};
416
Rajeev Kumar70450032018-01-31 17:54:56 -0800417#ifdef LMKD_LOG_STATS
Rajeev Kumar70450032018-01-31 17:54:56 -0800418static bool enable_stats_log;
419static android_log_context log_ctx;
420#endif
421
Todd Poynor3948f802013-07-09 19:35:14 -0700422#define PIDHASH_SZ 1024
423static struct proc *pidhash[PIDHASH_SZ];
424#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
425
Chih-Hung Hsiehdaa13ea2016-05-19 16:02:22 -0700426#define ADJTOSLOT(adj) ((adj) + -OOM_SCORE_ADJ_MIN)
Suren Baghdasaryand4a29902018-10-12 11:07:40 -0700427#define ADJTOSLOT_COUNT (ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1)
428static struct adjslot_list procadjslot_list[ADJTOSLOT_COUNT];
429
430#define MAX_DISTINCT_OOM_ADJ 32
431#define KILLCNT_INVALID_IDX 0xFF
432/*
433 * Because killcnt array is sparse a two-level indirection is used
434 * to keep the size small. killcnt_idx stores index of the element in
435 * killcnt array. Index KILLCNT_INVALID_IDX indicates an unused slot.
436 */
437static uint8_t killcnt_idx[ADJTOSLOT_COUNT];
438static uint16_t killcnt[MAX_DISTINCT_OOM_ADJ];
439static int killcnt_free_idx = 0;
440static uint32_t killcnt_total = 0;
Todd Poynor3948f802013-07-09 19:35:14 -0700441
Todd Poynor3948f802013-07-09 19:35:14 -0700442/* PAGE_SIZE / 1024 */
443static long page_k;
444
Jim Blacklerd2da8142019-09-10 15:30:05 +0100445static char* proc_get_name(int pid);
446static void poll_kernel();
447
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700448static bool parse_int64(const char* str, int64_t* ret) {
449 char* endptr;
450 long long val = strtoll(str, &endptr, 10);
451 if (str == endptr || val > INT64_MAX) {
452 return false;
453 }
454 *ret = (int64_t)val;
455 return true;
456}
457
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700458static int find_field(const char* name, const char* const field_names[], int field_count) {
459 for (int i = 0; i < field_count; i++) {
460 if (!strcmp(name, field_names[i])) {
461 return i;
462 }
463 }
464 return -1;
465}
466
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700467static enum field_match_result match_field(const char* cp, const char* ap,
468 const char* const field_names[],
469 int field_count, int64_t* field,
470 int *field_idx) {
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700471 int i = find_field(cp, field_names, field_count);
472 if (i < 0) {
473 return NO_MATCH;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700474 }
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -0700475 *field_idx = i;
476 return parse_int64(ap, field) ? PARSE_SUCCESS : PARSE_FAIL;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700477}
478
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700479/*
480 * Read file content from the beginning up to max_len bytes or EOF
481 * whichever happens first.
482 */
Colin Crossce85d952014-07-11 17:53:27 -0700483static ssize_t read_all(int fd, char *buf, size_t max_len)
484{
485 ssize_t ret = 0;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700486 off_t offset = 0;
Colin Crossce85d952014-07-11 17:53:27 -0700487
488 while (max_len > 0) {
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700489 ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
Colin Crossce85d952014-07-11 17:53:27 -0700490 if (r == 0) {
491 break;
492 }
493 if (r == -1) {
494 return -1;
495 }
496 ret += r;
497 buf += r;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700498 offset += r;
Colin Crossce85d952014-07-11 17:53:27 -0700499 max_len -= r;
500 }
501
502 return ret;
503}
504
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700505/*
506 * Read a new or already opened file from the beginning.
507 * If the file has not been opened yet data->fd should be set to -1.
508 * To be used with files which are read often and possibly during high
509 * memory pressure to minimize file opening which by itself requires kernel
510 * memory allocation and might result in a stall on memory stressed system.
511 */
Suren Baghdasaryana77b3272019-07-15 13:35:04 -0700512static char *reread_file(struct reread_data *data) {
513 /* start with page-size buffer and increase if needed */
514 static ssize_t buf_size = PAGE_SIZE;
515 static char *new_buf, *buf = NULL;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700516 ssize_t size;
517
518 if (data->fd == -1) {
Suren Baghdasaryana77b3272019-07-15 13:35:04 -0700519 /* First-time buffer initialization */
520 if (!buf && (buf = malloc(buf_size)) == NULL) {
521 return NULL;
522 }
523
524 data->fd = TEMP_FAILURE_RETRY(open(data->filename, O_RDONLY | O_CLOEXEC));
525 if (data->fd < 0) {
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700526 ALOGE("%s open: %s", data->filename, strerror(errno));
Suren Baghdasaryana77b3272019-07-15 13:35:04 -0700527 return NULL;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700528 }
529 }
530
Suren Baghdasaryana77b3272019-07-15 13:35:04 -0700531 while (true) {
532 size = read_all(data->fd, buf, buf_size - 1);
533 if (size < 0) {
534 ALOGE("%s read: %s", data->filename, strerror(errno));
535 close(data->fd);
536 data->fd = -1;
537 return NULL;
538 }
539 if (size < buf_size - 1) {
540 break;
541 }
542 /*
543 * Since we are reading /proc files we can't use fstat to find out
544 * the real size of the file. Double the buffer size and keep retrying.
545 */
546 if ((new_buf = realloc(buf, buf_size * 2)) == NULL) {
547 errno = ENOMEM;
548 return NULL;
549 }
550 buf = new_buf;
551 buf_size *= 2;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700552 }
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700553 buf[size] = 0;
554
Suren Baghdasaryana77b3272019-07-15 13:35:04 -0700555 return buf;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -0700556}
557
Todd Poynor3948f802013-07-09 19:35:14 -0700558static struct proc *pid_lookup(int pid) {
559 struct proc *procp;
560
561 for (procp = pidhash[pid_hashfn(pid)]; procp && procp->pid != pid;
562 procp = procp->pidhash_next)
563 ;
564
565 return procp;
566}
567
568static void adjslot_insert(struct adjslot_list *head, struct adjslot_list *new)
569{
570 struct adjslot_list *next = head->next;
571 new->prev = head;
572 new->next = next;
573 next->prev = new;
574 head->next = new;
575}
576
577static void adjslot_remove(struct adjslot_list *old)
578{
579 struct adjslot_list *prev = old->prev;
580 struct adjslot_list *next = old->next;
581 next->prev = prev;
582 prev->next = next;
583}
584
585static struct adjslot_list *adjslot_tail(struct adjslot_list *head) {
586 struct adjslot_list *asl = head->prev;
587
588 return asl == head ? NULL : asl;
589}
590
591static void proc_slot(struct proc *procp) {
592 int adjslot = ADJTOSLOT(procp->oomadj);
593
594 adjslot_insert(&procadjslot_list[adjslot], &procp->asl);
595}
596
597static void proc_unslot(struct proc *procp) {
598 adjslot_remove(&procp->asl);
599}
600
601static void proc_insert(struct proc *procp) {
602 int hval = pid_hashfn(procp->pid);
603
604 procp->pidhash_next = pidhash[hval];
605 pidhash[hval] = procp;
606 proc_slot(procp);
607}
608
609static int pid_remove(int pid) {
610 int hval = pid_hashfn(pid);
611 struct proc *procp;
612 struct proc *prevp;
613
614 for (procp = pidhash[hval], prevp = NULL; procp && procp->pid != pid;
615 procp = procp->pidhash_next)
616 prevp = procp;
617
618 if (!procp)
619 return -1;
620
621 if (!prevp)
622 pidhash[hval] = procp->pidhash_next;
623 else
624 prevp->pidhash_next = procp->pidhash_next;
625
626 proc_unslot(procp);
627 free(procp);
628 return 0;
629}
630
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700631/*
632 * Write a string to a file.
633 * Returns false if the file does not exist.
634 */
635static bool writefilestring(const char *path, const char *s,
636 bool err_if_missing) {
Nick Kralevichc68c8862015-12-18 20:52:37 -0800637 int fd = open(path, O_WRONLY | O_CLOEXEC);
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700638 ssize_t len = strlen(s);
639 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700640
641 if (fd < 0) {
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700642 if (err_if_missing) {
643 ALOGE("Error opening %s; errno=%d", path, errno);
644 }
645 return false;
Todd Poynor3948f802013-07-09 19:35:14 -0700646 }
647
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700648 ret = TEMP_FAILURE_RETRY(write(fd, s, len));
Todd Poynor3948f802013-07-09 19:35:14 -0700649 if (ret < 0) {
650 ALOGE("Error writing %s; errno=%d", path, errno);
651 } else if (ret < len) {
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700652 ALOGE("Short write on %s; length=%zd", path, ret);
Todd Poynor3948f802013-07-09 19:35:14 -0700653 }
654
655 close(fd);
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700656 return true;
Todd Poynor3948f802013-07-09 19:35:14 -0700657}
658
Suren Baghdasaryan314a5052018-07-24 17:13:06 -0700659static inline long get_time_diff_ms(struct timespec *from,
660 struct timespec *to) {
661 return (to->tv_sec - from->tv_sec) * (long)MS_PER_SEC +
662 (to->tv_nsec - from->tv_nsec) / (long)NS_PER_MS;
663}
664
Suren Baghdasaryan0082ef12019-07-02 15:52:07 -0700665static int proc_get_tgid(int pid) {
666 char path[PATH_MAX];
667 char buf[PAGE_SIZE];
668 int fd;
669 ssize_t size;
670 char *pos;
671 int64_t tgid = -1;
672
673 snprintf(path, PATH_MAX, "/proc/%d/status", pid);
674 fd = open(path, O_RDONLY | O_CLOEXEC);
675 if (fd < 0) {
676 return -1;
677 }
678
679 size = read_all(fd, buf, sizeof(buf) - 1);
680 if (size < 0) {
681 goto out;
682 }
683 buf[size] = 0;
684
685 pos = buf;
686 while (true) {
687 pos = strstr(pos, PROC_STATUS_TGID_FIELD);
688 /* Stop if TGID tag not found or found at the line beginning */
689 if (pos == NULL || pos == buf || pos[-1] == '\n') {
690 break;
691 }
692 pos++;
693 }
694
695 if (pos == NULL) {
696 goto out;
697 }
698
699 pos += strlen(PROC_STATUS_TGID_FIELD);
700 while (*pos == ' ') pos++;
701 parse_int64(pos, &tgid);
702
703out:
704 close(fd);
705 return (int)tgid;
706}
707
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800708static void cmd_procprio(LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700709 struct proc *procp;
710 char path[80];
711 char val[20];
Robert Benea673e2762017-06-01 16:32:31 -0700712 int soft_limit_mult;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800713 struct lmk_procprio params;
Suren Baghdasaryan4311d1e2018-03-20 16:03:29 -0700714 bool is_system_server;
715 struct passwd *pwdrec;
Suren Baghdasaryan0082ef12019-07-02 15:52:07 -0700716 int tgid;
Todd Poynor3948f802013-07-09 19:35:14 -0700717
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800718 lmkd_pack_get_procprio(packet, &params);
719
720 if (params.oomadj < OOM_SCORE_ADJ_MIN ||
721 params.oomadj > OOM_SCORE_ADJ_MAX) {
722 ALOGE("Invalid PROCPRIO oomadj argument %d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700723 return;
724 }
725
Suren Baghdasaryan0082ef12019-07-02 15:52:07 -0700726 /* Check if registered process is a thread group leader */
727 tgid = proc_get_tgid(params.pid);
728 if (tgid >= 0 && tgid != params.pid) {
729 ALOGE("Attempt to register a task that is not a thread group leader (tid %d, tgid %d)",
730 params.pid, tgid);
731 return;
732 }
733
Mark Salyzyn64d97d82018-04-09 09:50:32 -0700734 /* gid containing AID_READPROC required */
735 /* CAP_SYS_RESOURCE required */
736 /* CAP_DAC_OVERRIDE required */
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800737 snprintf(path, sizeof(path), "/proc/%d/oom_score_adj", params.pid);
738 snprintf(val, sizeof(val), "%d", params.oomadj);
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -0700739 if (!writefilestring(path, val, false)) {
740 ALOGW("Failed to open %s; errno=%d: process %d might have been killed",
741 path, errno, params.pid);
742 /* If this file does not exist the process is dead. */
743 return;
744 }
Todd Poynor3948f802013-07-09 19:35:14 -0700745
Mark Salyzyn721d7c72018-03-21 12:24:58 -0700746 if (use_inkernel_interface) {
Jim Blacklerd2da8142019-09-10 15:30:05 +0100747#ifdef LMKD_LOG_STATS
748 stats_store_taskname(params.pid, proc_get_name(params.pid));
749#endif
Todd Poynor3948f802013-07-09 19:35:14 -0700750 return;
Mark Salyzyn721d7c72018-03-21 12:24:58 -0700751 }
Todd Poynor3948f802013-07-09 19:35:14 -0700752
Suren Baghdasaryance13cb52018-06-19 18:38:12 -0700753 if (per_app_memcg) {
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700754 if (params.oomadj >= 900) {
755 soft_limit_mult = 0;
756 } else if (params.oomadj >= 800) {
757 soft_limit_mult = 0;
758 } else if (params.oomadj >= 700) {
759 soft_limit_mult = 0;
760 } else if (params.oomadj >= 600) {
761 // Launcher should be perceptible, don't kill it.
762 params.oomadj = 200;
763 soft_limit_mult = 1;
764 } else if (params.oomadj >= 500) {
765 soft_limit_mult = 0;
766 } else if (params.oomadj >= 400) {
767 soft_limit_mult = 0;
768 } else if (params.oomadj >= 300) {
769 soft_limit_mult = 1;
770 } else if (params.oomadj >= 200) {
Srinivas Paladugu3eb20bc2018-10-09 14:21:10 -0700771 soft_limit_mult = 8;
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700772 } else if (params.oomadj >= 100) {
773 soft_limit_mult = 10;
774 } else if (params.oomadj >= 0) {
775 soft_limit_mult = 20;
776 } else {
777 // Persistent processes will have a large
778 // soft limit 512MB.
779 soft_limit_mult = 64;
780 }
Robert Benea673e2762017-06-01 16:32:31 -0700781
Suren Baghdasaryan3862dd32018-05-21 19:48:47 -0700782 snprintf(path, sizeof(path), MEMCG_SYSFS_PATH
783 "apps/uid_%d/pid_%d/memory.soft_limit_in_bytes",
784 params.uid, params.pid);
Suren Baghdasaryan20686f02018-05-18 14:42:00 -0700785 snprintf(val, sizeof(val), "%d", soft_limit_mult * EIGHT_MEGA);
Suren Baghdasaryan3862dd32018-05-21 19:48:47 -0700786
787 /*
788 * system_server process has no memcg under /dev/memcg/apps but should be
789 * registered with lmkd. This is the best way so far to identify it.
790 */
791 is_system_server = (params.oomadj == SYSTEM_ADJ &&
792 (pwdrec = getpwnam("system")) != NULL &&
793 params.uid == pwdrec->pw_uid);
794 writefilestring(path, val, !is_system_server);
Robert Benea673e2762017-06-01 16:32:31 -0700795 }
796
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800797 procp = pid_lookup(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700798 if (!procp) {
799 procp = malloc(sizeof(struct proc));
800 if (!procp) {
801 // Oh, the irony. May need to rebuild our state.
802 return;
803 }
804
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800805 procp->pid = params.pid;
806 procp->uid = params.uid;
807 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700808 proc_insert(procp);
809 } else {
810 proc_unslot(procp);
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800811 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700812 proc_slot(procp);
813 }
814}
815
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800816static void cmd_procremove(LMKD_CTRL_PACKET packet) {
817 struct lmk_procremove params;
818
Mark Salyzyn721d7c72018-03-21 12:24:58 -0700819 if (use_inkernel_interface) {
Jim Blacklerd2da8142019-09-10 15:30:05 +0100820#ifdef LMKD_LOG_STATS
821 /* Perform an extra check before the pid is removed, after which it
822 * will be impossible for poll_kernel to get the taskname. poll_kernel()
823 * is potentially a long-running blocking function; however this method
824 * handles AMS requests but does not block AMS.*/
825 if (enable_stats_log) {
826 poll_kernel();
827 }
828 stats_remove_taskname(params.pid);
829#endif
Todd Poynor3948f802013-07-09 19:35:14 -0700830 return;
Mark Salyzyn721d7c72018-03-21 12:24:58 -0700831 }
Todd Poynor3948f802013-07-09 19:35:14 -0700832
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800833 lmkd_pack_get_procremove(packet, &params);
Suren Baghdasaryan01063272018-10-12 11:28:33 -0700834 /*
835 * WARNING: After pid_remove() procp is freed and can't be used!
836 * Therefore placed at the end of the function.
837 */
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800838 pid_remove(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700839}
840
Suren Baghdasaryane3b60472018-10-10 14:17:17 -0700841static void cmd_procpurge() {
842 int i;
843 struct proc *procp;
844 struct proc *next;
845
846 if (use_inkernel_interface) {
Jim Blacklerd2da8142019-09-10 15:30:05 +0100847#ifdef LMKD_LOG_STATS
848 stats_purge_tasknames();
849#endif
Suren Baghdasaryane3b60472018-10-10 14:17:17 -0700850 return;
851 }
852
853 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
854 procadjslot_list[i].next = &procadjslot_list[i];
855 procadjslot_list[i].prev = &procadjslot_list[i];
856 }
857
858 for (i = 0; i < PIDHASH_SZ; i++) {
859 procp = pidhash[i];
860 while (procp) {
861 next = procp->pidhash_next;
862 free(procp);
863 procp = next;
864 }
865 }
866 memset(&pidhash[0], 0, sizeof(pidhash));
867}
868
Suren Baghdasaryand4a29902018-10-12 11:07:40 -0700869static void inc_killcnt(int oomadj) {
870 int slot = ADJTOSLOT(oomadj);
871 uint8_t idx = killcnt_idx[slot];
872
873 if (idx == KILLCNT_INVALID_IDX) {
874 /* index is not assigned for this oomadj */
875 if (killcnt_free_idx < MAX_DISTINCT_OOM_ADJ) {
876 killcnt_idx[slot] = killcnt_free_idx;
877 killcnt[killcnt_free_idx] = 1;
878 killcnt_free_idx++;
879 } else {
880 ALOGW("Number of distinct oomadj levels exceeds %d",
881 MAX_DISTINCT_OOM_ADJ);
882 }
883 } else {
884 /*
885 * wraparound is highly unlikely and is detectable using total
886 * counter because it has to be equal to the sum of all counters
887 */
888 killcnt[idx]++;
889 }
890 /* increment total kill counter */
891 killcnt_total++;
892}
893
894static int get_killcnt(int min_oomadj, int max_oomadj) {
895 int slot;
896 int count = 0;
897
898 if (min_oomadj > max_oomadj)
899 return 0;
900
901 /* special case to get total kill count */
902 if (min_oomadj > OOM_SCORE_ADJ_MAX)
903 return killcnt_total;
904
905 while (min_oomadj <= max_oomadj &&
906 (slot = ADJTOSLOT(min_oomadj)) < ADJTOSLOT_COUNT) {
907 uint8_t idx = killcnt_idx[slot];
908 if (idx != KILLCNT_INVALID_IDX) {
909 count += killcnt[idx];
910 }
911 min_oomadj++;
912 }
913
914 return count;
915}
916
917static int cmd_getkillcnt(LMKD_CTRL_PACKET packet) {
918 struct lmk_getkillcnt params;
919
920 if (use_inkernel_interface) {
921 /* kernel driver does not expose this information */
922 return 0;
923 }
924
925 lmkd_pack_get_getkillcnt(packet, &params);
926
927 return get_killcnt(params.min_oomadj, params.max_oomadj);
928}
929
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800930static void cmd_target(int ntargets, LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700931 int i;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800932 struct lmk_target target;
Suren Baghdasaryan314a5052018-07-24 17:13:06 -0700933 char minfree_str[PROPERTY_VALUE_MAX];
934 char *pstr = minfree_str;
935 char *pend = minfree_str + sizeof(minfree_str);
936 static struct timespec last_req_tm;
937 struct timespec curr_tm;
Todd Poynor3948f802013-07-09 19:35:14 -0700938
Suren Baghdasaryan314a5052018-07-24 17:13:06 -0700939 if (ntargets < 1 || ntargets > (int)ARRAY_SIZE(lowmem_adj))
Todd Poynor3948f802013-07-09 19:35:14 -0700940 return;
941
Suren Baghdasaryan314a5052018-07-24 17:13:06 -0700942 /*
943 * Ratelimit minfree updates to once per TARGET_UPDATE_MIN_INTERVAL_MS
944 * to prevent DoS attacks
945 */
946 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
947 ALOGE("Failed to get current time");
948 return;
949 }
950
951 if (get_time_diff_ms(&last_req_tm, &curr_tm) <
952 TARGET_UPDATE_MIN_INTERVAL_MS) {
953 ALOGE("Ignoring frequent updated to lmkd limits");
954 return;
955 }
956
957 last_req_tm = curr_tm;
958
Todd Poynor3948f802013-07-09 19:35:14 -0700959 for (i = 0; i < ntargets; i++) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800960 lmkd_pack_get_target(packet, i, &target);
961 lowmem_minfree[i] = target.minfree;
962 lowmem_adj[i] = target.oom_adj_score;
Suren Baghdasaryan314a5052018-07-24 17:13:06 -0700963
964 pstr += snprintf(pstr, pend - pstr, "%d:%d,", target.minfree,
965 target.oom_adj_score);
966 if (pstr >= pend) {
967 /* if no more space in the buffer then terminate the loop */
968 pstr = pend;
969 break;
970 }
Todd Poynor3948f802013-07-09 19:35:14 -0700971 }
972
973 lowmem_targets_size = ntargets;
974
Suren Baghdasaryan314a5052018-07-24 17:13:06 -0700975 /* Override the last extra comma */
976 pstr[-1] = '\0';
977 property_set("sys.lmk.minfree_levels", minfree_str);
978
Robert Benea164baeb2017-09-11 16:53:28 -0700979 if (has_inkernel_module) {
Todd Poynor3948f802013-07-09 19:35:14 -0700980 char minfreestr[128];
981 char killpriostr[128];
982
983 minfreestr[0] = '\0';
984 killpriostr[0] = '\0';
985
986 for (i = 0; i < lowmem_targets_size; i++) {
987 char val[40];
988
989 if (i) {
990 strlcat(minfreestr, ",", sizeof(minfreestr));
991 strlcat(killpriostr, ",", sizeof(killpriostr));
992 }
993
Robert Benea164baeb2017-09-11 16:53:28 -0700994 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_minfree[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700995 strlcat(minfreestr, val, sizeof(minfreestr));
Robert Benea164baeb2017-09-11 16:53:28 -0700996 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_adj[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700997 strlcat(killpriostr, val, sizeof(killpriostr));
998 }
999
Suren Baghdasaryan1ffa2462018-03-20 13:53:17 -07001000 writefilestring(INKERNEL_MINFREE_PATH, minfreestr, true);
1001 writefilestring(INKERNEL_ADJ_PATH, killpriostr, true);
Todd Poynor3948f802013-07-09 19:35:14 -07001002 }
1003}
1004
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001005static void ctrl_data_close(int dsock_idx) {
1006 struct epoll_event epev;
1007
1008 ALOGI("closing lmkd data connection");
1009 if (epoll_ctl(epollfd, EPOLL_CTL_DEL, data_sock[dsock_idx].sock, &epev) == -1) {
1010 // Log a warning and keep going
1011 ALOGW("epoll_ctl for data connection socket failed; errno=%d", errno);
1012 }
Todd Poynor3948f802013-07-09 19:35:14 -07001013 maxevents--;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001014
1015 close(data_sock[dsock_idx].sock);
1016 data_sock[dsock_idx].sock = -1;
Todd Poynor3948f802013-07-09 19:35:14 -07001017}
1018
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001019static int ctrl_data_read(int dsock_idx, char *buf, size_t bufsz) {
Todd Poynor3948f802013-07-09 19:35:14 -07001020 int ret = 0;
1021
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -07001022 ret = TEMP_FAILURE_RETRY(read(data_sock[dsock_idx].sock, buf, bufsz));
Todd Poynor3948f802013-07-09 19:35:14 -07001023
1024 if (ret == -1) {
1025 ALOGE("control data socket read failed; errno=%d", errno);
1026 } else if (ret == 0) {
1027 ALOGE("Got EOF on control data socket");
1028 ret = -1;
1029 }
1030
1031 return ret;
1032}
1033
Suren Baghdasaryand4a29902018-10-12 11:07:40 -07001034static int ctrl_data_write(int dsock_idx, char *buf, size_t bufsz) {
1035 int ret = 0;
1036
1037 ret = TEMP_FAILURE_RETRY(write(data_sock[dsock_idx].sock, buf, bufsz));
1038
1039 if (ret == -1) {
1040 ALOGE("control data socket write failed; errno=%d", errno);
1041 } else if (ret == 0) {
1042 ALOGE("Got EOF on control data socket");
1043 ret = -1;
1044 }
1045
1046 return ret;
1047}
1048
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001049static void ctrl_command_handler(int dsock_idx) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001050 LMKD_CTRL_PACKET packet;
Todd Poynor3948f802013-07-09 19:35:14 -07001051 int len;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001052 enum lmk_cmd cmd;
Todd Poynor3948f802013-07-09 19:35:14 -07001053 int nargs;
1054 int targets;
Suren Baghdasaryand4a29902018-10-12 11:07:40 -07001055 int kill_cnt;
Todd Poynor3948f802013-07-09 19:35:14 -07001056
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001057 len = ctrl_data_read(dsock_idx, (char *)packet, CTRL_PACKET_MAX_SIZE);
Todd Poynor3948f802013-07-09 19:35:14 -07001058 if (len <= 0)
1059 return;
1060
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001061 if (len < (int)sizeof(int)) {
1062 ALOGE("Wrong control socket read length len=%d", len);
1063 return;
1064 }
1065
1066 cmd = lmkd_pack_get_cmd(packet);
Todd Poynor3948f802013-07-09 19:35:14 -07001067 nargs = len / sizeof(int) - 1;
1068 if (nargs < 0)
1069 goto wronglen;
1070
Todd Poynor3948f802013-07-09 19:35:14 -07001071 switch(cmd) {
1072 case LMK_TARGET:
1073 targets = nargs / 2;
1074 if (nargs & 0x1 || targets > (int)ARRAY_SIZE(lowmem_adj))
1075 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001076 cmd_target(targets, packet);
Todd Poynor3948f802013-07-09 19:35:14 -07001077 break;
1078 case LMK_PROCPRIO:
Colin Crossfbb78c62014-06-13 14:52:43 -07001079 if (nargs != 3)
Todd Poynor3948f802013-07-09 19:35:14 -07001080 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001081 cmd_procprio(packet);
Todd Poynor3948f802013-07-09 19:35:14 -07001082 break;
1083 case LMK_PROCREMOVE:
1084 if (nargs != 1)
1085 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -08001086 cmd_procremove(packet);
Todd Poynor3948f802013-07-09 19:35:14 -07001087 break;
Suren Baghdasaryane3b60472018-10-10 14:17:17 -07001088 case LMK_PROCPURGE:
1089 if (nargs != 0)
1090 goto wronglen;
1091 cmd_procpurge();
1092 break;
Suren Baghdasaryand4a29902018-10-12 11:07:40 -07001093 case LMK_GETKILLCNT:
1094 if (nargs != 2)
1095 goto wronglen;
1096 kill_cnt = cmd_getkillcnt(packet);
1097 len = lmkd_pack_set_getkillcnt_repl(packet, kill_cnt);
1098 if (ctrl_data_write(dsock_idx, (char *)packet, len) != len)
1099 return;
1100 break;
Todd Poynor3948f802013-07-09 19:35:14 -07001101 default:
1102 ALOGE("Received unknown command code %d", cmd);
1103 return;
1104 }
1105
1106 return;
1107
1108wronglen:
1109 ALOGE("Wrong control socket read length cmd=%d len=%d", cmd, len);
1110}
1111
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07001112static void ctrl_data_handler(int data, uint32_t events,
1113 struct polling_params *poll_params __unused) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001114 if (events & EPOLLIN) {
1115 ctrl_command_handler(data);
Todd Poynor3948f802013-07-09 19:35:14 -07001116 }
1117}
1118
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001119static int get_free_dsock() {
1120 for (int i = 0; i < MAX_DATA_CONN; i++) {
1121 if (data_sock[i].sock < 0) {
1122 return i;
1123 }
1124 }
1125 return -1;
1126}
Todd Poynor3948f802013-07-09 19:35:14 -07001127
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07001128static void ctrl_connect_handler(int data __unused, uint32_t events __unused,
1129 struct polling_params *poll_params __unused) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001130 struct epoll_event epev;
1131 int free_dscock_idx = get_free_dsock();
1132
1133 if (free_dscock_idx < 0) {
1134 /*
1135 * Number of data connections exceeded max supported. This should not
1136 * happen but if it does we drop all existing connections and accept
1137 * the new one. This prevents inactive connections from monopolizing
1138 * data socket and if we drop ActivityManager connection it will
1139 * immediately reconnect.
1140 */
1141 for (int i = 0; i < MAX_DATA_CONN; i++) {
1142 ctrl_data_close(i);
1143 }
1144 free_dscock_idx = 0;
Todd Poynor3948f802013-07-09 19:35:14 -07001145 }
1146
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001147 data_sock[free_dscock_idx].sock = accept(ctrl_sock.sock, NULL, NULL);
1148 if (data_sock[free_dscock_idx].sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001149 ALOGE("lmkd control socket accept failed; errno=%d", errno);
1150 return;
1151 }
1152
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001153 ALOGI("lmkd data connection established");
1154 /* use data to store data connection idx */
1155 data_sock[free_dscock_idx].handler_info.data = free_dscock_idx;
1156 data_sock[free_dscock_idx].handler_info.handler = ctrl_data_handler;
Todd Poynor3948f802013-07-09 19:35:14 -07001157 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001158 epev.data.ptr = (void *)&(data_sock[free_dscock_idx].handler_info);
1159 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, data_sock[free_dscock_idx].sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07001160 ALOGE("epoll_ctl for data connection socket failed; errno=%d", errno);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001161 ctrl_data_close(free_dscock_idx);
Todd Poynor3948f802013-07-09 19:35:14 -07001162 return;
1163 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001164 maxevents++;
Todd Poynor3948f802013-07-09 19:35:14 -07001165}
1166
Rajeev Kumar70450032018-01-31 17:54:56 -08001167#ifdef LMKD_LOG_STATS
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001168static void memory_stat_parse_line(char* line, struct memory_stat* mem_st) {
Greg Kaiserf0da9b02018-03-23 14:16:12 -07001169 char key[LINE_MAX + 1];
Rajeev Kumar70450032018-01-31 17:54:56 -08001170 int64_t value;
1171
Greg Kaiserf0da9b02018-03-23 14:16:12 -07001172 sscanf(line, "%" STRINGIFY(LINE_MAX) "s %" SCNd64 "", key, &value);
Rajeev Kumar70450032018-01-31 17:54:56 -08001173
1174 if (strcmp(key, "total_") < 0) {
1175 return;
1176 }
1177
1178 if (!strcmp(key, "total_pgfault"))
1179 mem_st->pgfault = value;
1180 else if (!strcmp(key, "total_pgmajfault"))
1181 mem_st->pgmajfault = value;
1182 else if (!strcmp(key, "total_rss"))
1183 mem_st->rss_in_bytes = value;
1184 else if (!strcmp(key, "total_cache"))
1185 mem_st->cache_in_bytes = value;
1186 else if (!strcmp(key, "total_swap"))
1187 mem_st->swap_in_bytes = value;
1188}
1189
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001190static int memory_stat_from_cgroup(struct memory_stat* mem_st, int pid, uid_t uid) {
Suren Baghdasaryan1d1c0022018-06-19 18:38:12 -07001191 FILE *fp;
1192 char buf[PATH_MAX];
Rajeev Kumar70450032018-01-31 17:54:56 -08001193
Suren Baghdasaryan1d1c0022018-06-19 18:38:12 -07001194 snprintf(buf, sizeof(buf), MEMCG_PROCESS_MEMORY_STAT_PATH, uid, pid);
Rajeev Kumar70450032018-01-31 17:54:56 -08001195
Suren Baghdasaryan1d1c0022018-06-19 18:38:12 -07001196 fp = fopen(buf, "r");
Rajeev Kumar70450032018-01-31 17:54:56 -08001197
Suren Baghdasaryan1d1c0022018-06-19 18:38:12 -07001198 if (fp == NULL) {
1199 ALOGE("%s open failed: %s", buf, strerror(errno));
1200 return -1;
1201 }
Rajeev Kumar70450032018-01-31 17:54:56 -08001202
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001203 while (fgets(buf, PAGE_SIZE, fp) != NULL) {
Suren Baghdasaryan1d1c0022018-06-19 18:38:12 -07001204 memory_stat_parse_line(buf, mem_st);
1205 }
1206 fclose(fp);
1207
1208 return 0;
Rajeev Kumar70450032018-01-31 17:54:56 -08001209}
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001210
1211static int memory_stat_from_procfs(struct memory_stat* mem_st, int pid) {
1212 char path[PATH_MAX];
1213 char buffer[PROC_STAT_BUFFER_SIZE];
1214 int fd, ret;
1215
1216 snprintf(path, sizeof(path), PROC_STAT_FILE_PATH, pid);
1217 if ((fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) {
1218 ALOGE("%s open failed: %s", path, strerror(errno));
1219 return -1;
1220 }
1221
1222 ret = read(fd, buffer, sizeof(buffer));
1223 if (ret < 0) {
1224 ALOGE("%s read failed: %s", path, strerror(errno));
1225 close(fd);
1226 return -1;
1227 }
1228 close(fd);
1229
1230 // field 10 is pgfault
1231 // field 12 is pgmajfault
Jim Blackler1417cdb2018-11-21 16:22:36 +00001232 // field 22 is starttime
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001233 // field 24 is rss_in_pages
Jim Blackler1417cdb2018-11-21 16:22:36 +00001234 int64_t pgfault = 0, pgmajfault = 0, starttime = 0, rss_in_pages = 0;
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001235 if (sscanf(buffer,
1236 "%*u %*s %*s %*d %*d %*d %*d %*d %*d %" SCNd64 " %*d "
1237 "%" SCNd64 " %*d %*u %*u %*d %*d %*d %*d %*d %*d "
Jim Blackler1417cdb2018-11-21 16:22:36 +00001238 "%" SCNd64 " %*d %" SCNd64 "",
1239 &pgfault, &pgmajfault, &starttime, &rss_in_pages) != 4) {
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001240 return -1;
1241 }
1242 mem_st->pgfault = pgfault;
1243 mem_st->pgmajfault = pgmajfault;
1244 mem_st->rss_in_bytes = (rss_in_pages * PAGE_SIZE);
Jim Blackler1417cdb2018-11-21 16:22:36 +00001245 mem_st->process_start_time_ns = starttime * (NS_PER_SEC / sysconf(_SC_CLK_TCK));
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001246 return 0;
1247}
Rajeev Kumar70450032018-01-31 17:54:56 -08001248#endif
1249
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001250/*
1251 * /prop/zoneinfo parsing routines
1252 * Expected file format is:
1253 *
1254 * Node <node_id>, zone <zone_name>
1255 * (
1256 * per-node stats
1257 * (<per-node field name> <value>)+
1258 * )?
1259 * (pages free <value>
1260 * (<per-zone field name> <value>)+
1261 * pagesets
1262 * (<unused fields>)*
1263 * )+
1264 * ...
1265 */
1266static void zoneinfo_parse_protection(char *buf, struct zoneinfo_zone *zone) {
1267 int zone_idx;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001268 int64_t max = 0;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001269 char *save_ptr;
1270
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001271 for (buf = strtok_r(buf, "(), ", &save_ptr), zone_idx = 0;
1272 buf && zone_idx < MAX_NR_ZONES;
1273 buf = strtok_r(NULL, "), ", &save_ptr), zone_idx++) {
1274 long long zoneval = strtoll(buf, &buf, 0);
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001275 if (zoneval > max) {
1276 max = (zoneval > INT64_MAX) ? INT64_MAX : zoneval;
1277 }
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001278 zone->protection[zone_idx] = zoneval;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001279 }
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001280 zone->max_protection = max;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001281}
1282
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001283static int zoneinfo_parse_zone(char **buf, struct zoneinfo_zone *zone) {
1284 for (char *line = strtok_r(NULL, "\n", buf); line;
1285 line = strtok_r(NULL, "\n", buf)) {
1286 char *cp;
1287 char *ap;
1288 char *save_ptr;
1289 int64_t val;
1290 int field_idx;
1291 enum field_match_result match_res;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001292
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001293 cp = strtok_r(line, " ", &save_ptr);
1294 if (!cp) {
1295 return false;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001296 }
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001297
1298 field_idx = find_field(cp, zoneinfo_zone_spec_field_names, ZI_ZONE_SPEC_FIELD_COUNT);
1299 if (field_idx >= 0) {
1300 /* special field */
1301 if (field_idx == ZI_ZONE_SPEC_PAGESETS) {
1302 /* no mode fields we are interested in */
1303 return true;
1304 }
1305
1306 /* protection field */
1307 ap = strtok_r(NULL, ")", &save_ptr);
1308 if (ap) {
1309 zoneinfo_parse_protection(ap, zone);
1310 }
1311 continue;
1312 }
1313
1314 ap = strtok_r(NULL, " ", &save_ptr);
1315 if (!ap) {
1316 continue;
1317 }
1318
1319 match_res = match_field(cp, ap, zoneinfo_zone_field_names, ZI_ZONE_FIELD_COUNT,
1320 &val, &field_idx);
1321 if (match_res == PARSE_FAIL) {
1322 return false;
1323 }
1324 if (match_res == PARSE_SUCCESS) {
1325 zone->fields.arr[field_idx] = val;
1326 }
1327 if (field_idx == ZI_ZONE_PRESENT && val == 0) {
1328 /* zone is not populated, stop parsing it */
1329 return true;
1330 }
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001331 }
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001332 return false;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001333}
1334
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001335static int zoneinfo_parse_node(char **buf, struct zoneinfo_node *node) {
1336 int fields_to_match = ZI_NODE_FIELD_COUNT;
1337
1338 for (char *line = strtok_r(NULL, "\n", buf); line;
1339 line = strtok_r(NULL, "\n", buf)) {
1340 char *cp;
1341 char *ap;
1342 char *save_ptr;
1343 int64_t val;
1344 int field_idx;
1345 enum field_match_result match_res;
1346
1347 cp = strtok_r(line, " ", &save_ptr);
1348 if (!cp) {
1349 return false;
1350 }
1351
1352 ap = strtok_r(NULL, " ", &save_ptr);
1353 if (!ap) {
1354 return false;
1355 }
1356
1357 match_res = match_field(cp, ap, zoneinfo_node_field_names, ZI_NODE_FIELD_COUNT,
1358 &val, &field_idx);
1359 if (match_res == PARSE_FAIL) {
1360 return false;
1361 }
1362 if (match_res == PARSE_SUCCESS) {
1363 node->fields.arr[field_idx] = val;
1364 fields_to_match--;
1365 if (!fields_to_match) {
1366 return true;
1367 }
1368 }
1369 }
1370 return false;
1371}
1372
1373static int zoneinfo_parse(struct zoneinfo *zi) {
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001374 static struct reread_data file_data = {
1375 .filename = ZONEINFO_PATH,
1376 .fd = -1,
1377 };
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07001378 char *buf;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001379 char *save_ptr;
1380 char *line;
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001381 char zone_name[LINE_MAX];
1382 struct zoneinfo_node *node = NULL;
1383 int node_idx = 0;
1384 int zone_idx = 0;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001385
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001386 memset(zi, 0, sizeof(struct zoneinfo));
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001387
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07001388 if ((buf = reread_file(&file_data)) == NULL) {
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001389 return -1;
1390 }
1391
1392 for (line = strtok_r(buf, "\n", &save_ptr); line;
1393 line = strtok_r(NULL, "\n", &save_ptr)) {
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001394 int node_id;
1395 if (sscanf(line, "Node %d, zone %" STRINGIFY(LINE_MAX) "s", &node_id, zone_name) == 2) {
1396 if (!node || node->id != node_id) {
1397 /* new node is found */
1398 if (node) {
1399 node->zone_count = zone_idx + 1;
1400 node_idx++;
1401 if (node_idx == MAX_NR_NODES) {
1402 /* max node count exceeded */
1403 ALOGE("%s parse error", file_data.filename);
1404 return -1;
1405 }
1406 }
1407 node = &zi->nodes[node_idx];
1408 node->id = node_id;
1409 zone_idx = 0;
1410 if (!zoneinfo_parse_node(&save_ptr, node)) {
1411 ALOGE("%s parse error", file_data.filename);
1412 return -1;
1413 }
1414 } else {
1415 /* new zone is found */
1416 zone_idx++;
1417 }
1418 if (!zoneinfo_parse_zone(&save_ptr, &node->zones[zone_idx])) {
1419 ALOGE("%s parse error", file_data.filename);
1420 return -1;
1421 }
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001422 }
1423 }
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001424 if (!node) {
1425 ALOGE("%s parse error", file_data.filename);
1426 return -1;
1427 }
1428 node->zone_count = zone_idx + 1;
1429 zi->node_count = node_idx + 1;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001430
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001431 /* calculate totals fields */
1432 for (node_idx = 0; node_idx < zi->node_count; node_idx++) {
1433 node = &zi->nodes[node_idx];
1434 for (zone_idx = 0; zone_idx < node->zone_count; zone_idx++) {
1435 struct zoneinfo_zone *zone = &zi->nodes[node_idx].zones[zone_idx];
1436 zi->totalreserve_pages += zone->max_protection + zone->fields.field.high;
1437 }
1438 zi->total_inactive_file += node->fields.field.nr_inactive_file;
1439 zi->total_active_file += node->fields.field.nr_active_file;
1440 zi->total_workingset_refault += node->fields.field.workingset_refault;
1441 }
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001442 return 0;
1443}
1444
1445/* /prop/meminfo parsing routines */
1446static bool meminfo_parse_line(char *line, union meminfo *mi) {
1447 char *cp = line;
1448 char *ap;
1449 char *save_ptr;
1450 int64_t val;
1451 int field_idx;
1452 enum field_match_result match_res;
1453
1454 cp = strtok_r(line, " ", &save_ptr);
1455 if (!cp) {
1456 return false;
1457 }
1458
1459 ap = strtok_r(NULL, " ", &save_ptr);
1460 if (!ap) {
1461 return false;
1462 }
1463
1464 match_res = match_field(cp, ap, meminfo_field_names, MI_FIELD_COUNT,
1465 &val, &field_idx);
1466 if (match_res == PARSE_SUCCESS) {
1467 mi->arr[field_idx] = val / page_k;
1468 }
1469 return (match_res != PARSE_FAIL);
1470}
1471
1472static int meminfo_parse(union meminfo *mi) {
1473 static struct reread_data file_data = {
1474 .filename = MEMINFO_PATH,
1475 .fd = -1,
1476 };
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07001477 char *buf;
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001478 char *save_ptr;
1479 char *line;
1480
1481 memset(mi, 0, sizeof(union meminfo));
1482
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07001483 if ((buf = reread_file(&file_data)) == NULL) {
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -07001484 return -1;
1485 }
1486
1487 for (line = strtok_r(buf, "\n", &save_ptr); line;
1488 line = strtok_r(NULL, "\n", &save_ptr)) {
1489 if (!meminfo_parse_line(line, mi)) {
1490 ALOGE("%s parse error", file_data.filename);
1491 return -1;
1492 }
1493 }
1494 mi->field.nr_file_pages = mi->field.cached + mi->field.swap_cached +
1495 mi->field.buffers;
1496
1497 return 0;
1498}
1499
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -07001500static void meminfo_log(union meminfo *mi) {
1501 for (int field_idx = 0; field_idx < MI_FIELD_COUNT; field_idx++) {
1502 android_log_write_int32(ctx, (int32_t)min(mi->arr[field_idx] * page_k, INT32_MAX));
1503 }
1504
1505 android_log_write_list(ctx, LOG_ID_EVENTS);
1506 android_log_reset(ctx);
1507}
1508
Todd Poynor3948f802013-07-09 19:35:14 -07001509static int proc_get_size(int pid) {
1510 char path[PATH_MAX];
1511 char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -07001512 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -07001513 int rss = 0;
1514 int total;
Colin Crossce85d952014-07-11 17:53:27 -07001515 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -07001516
Mark Salyzyn64d97d82018-04-09 09:50:32 -07001517 /* gid containing AID_READPROC required */
Todd Poynor3948f802013-07-09 19:35:14 -07001518 snprintf(path, PATH_MAX, "/proc/%d/statm", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -08001519 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -07001520 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -07001521 return -1;
Colin Crossce85d952014-07-11 17:53:27 -07001522
1523 ret = read_all(fd, line, sizeof(line) - 1);
1524 if (ret < 0) {
1525 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -07001526 return -1;
1527 }
1528
1529 sscanf(line, "%d %d ", &total, &rss);
Colin Crossce85d952014-07-11 17:53:27 -07001530 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -07001531 return rss;
1532}
1533
1534static char *proc_get_name(int pid) {
1535 char path[PATH_MAX];
1536 static char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -07001537 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -07001538 char *cp;
Colin Crossce85d952014-07-11 17:53:27 -07001539 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -07001540
Mark Salyzyn64d97d82018-04-09 09:50:32 -07001541 /* gid containing AID_READPROC required */
Todd Poynor3948f802013-07-09 19:35:14 -07001542 snprintf(path, PATH_MAX, "/proc/%d/cmdline", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -08001543 fd = open(path, O_RDONLY | O_CLOEXEC);
Suren Baghdasaryan9e359db2019-09-27 16:56:50 -07001544 if (fd == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07001545 return NULL;
Suren Baghdasaryan9e359db2019-09-27 16:56:50 -07001546 }
Colin Crossce85d952014-07-11 17:53:27 -07001547 ret = read_all(fd, line, sizeof(line) - 1);
1548 close(fd);
1549 if (ret < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001550 return NULL;
1551 }
1552
1553 cp = strchr(line, ' ');
Suren Baghdasaryan9e359db2019-09-27 16:56:50 -07001554 if (cp) {
Todd Poynor3948f802013-07-09 19:35:14 -07001555 *cp = '\0';
Suren Baghdasaryan9e359db2019-09-27 16:56:50 -07001556 } else {
1557 line[ret] = '\0';
1558 }
Todd Poynor3948f802013-07-09 19:35:14 -07001559
1560 return line;
1561}
1562
1563static struct proc *proc_adj_lru(int oomadj) {
1564 return (struct proc *)adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
1565}
1566
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001567static struct proc *proc_get_heaviest(int oomadj) {
1568 struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
1569 struct adjslot_list *curr = head->next;
1570 struct proc *maxprocp = NULL;
1571 int maxsize = 0;
1572 while (curr != head) {
1573 int pid = ((struct proc *)curr)->pid;
1574 int tasksize = proc_get_size(pid);
1575 if (tasksize <= 0) {
1576 struct adjslot_list *next = curr->next;
1577 pid_remove(pid);
1578 curr = next;
1579 } else {
1580 if (tasksize > maxsize) {
1581 maxsize = tasksize;
1582 maxprocp = (struct proc *)curr;
1583 }
1584 curr = curr->next;
1585 }
1586 }
1587 return maxprocp;
1588}
1589
Wei Wang2d95c102018-11-21 00:11:44 -08001590static void set_process_group_and_prio(int pid, SchedPolicy sp, int prio) {
1591 DIR* d;
1592 char proc_path[PATH_MAX];
1593 struct dirent* de;
1594
1595 snprintf(proc_path, sizeof(proc_path), "/proc/%d/task", pid);
1596 if (!(d = opendir(proc_path))) {
1597 ALOGW("Failed to open %s; errno=%d: process pid(%d) might have died", proc_path, errno,
1598 pid);
1599 return;
1600 }
1601
1602 while ((de = readdir(d))) {
1603 int t_pid;
1604
1605 if (de->d_name[0] == '.') continue;
1606 t_pid = atoi(de->d_name);
1607
1608 if (!t_pid) {
1609 ALOGW("Failed to get t_pid for '%s' of pid(%d)", de->d_name, pid);
1610 continue;
1611 }
1612
1613 if (setpriority(PRIO_PROCESS, t_pid, prio) && errno != ESRCH) {
1614 ALOGW("Unable to raise priority of killing t_pid (%d): errno=%d", t_pid, errno);
1615 }
1616
1617 if (set_cpuset_policy(t_pid, sp)) {
1618 ALOGW("Failed to set_cpuset_policy on pid(%d) t_pid(%d) to %d", pid, t_pid, (int)sp);
1619 continue;
1620 }
1621 }
1622 closedir(d);
1623}
1624
Tim Murraye7853f62018-10-25 17:05:41 -07001625static int last_killed_pid = -1;
1626
Colin Cross16b09462014-07-14 12:39:56 -07001627/* Kill one process specified by procp. Returns the size of the process killed */
Suren Baghdasaryanec5e4c62019-03-04 11:07:39 -08001628static int kill_one_process(struct proc* procp, int min_oom_score) {
Colin Cross16b09462014-07-14 12:39:56 -07001629 int pid = procp->pid;
1630 uid_t uid = procp->uid;
Suren Baghdasaryan0082ef12019-07-02 15:52:07 -07001631 int tgid;
Colin Cross16b09462014-07-14 12:39:56 -07001632 char *taskname;
1633 int tasksize;
1634 int r;
Suren Baghdasaryan01063272018-10-12 11:28:33 -07001635 int result = -1;
Colin Cross16b09462014-07-14 12:39:56 -07001636
Rajeev Kumar70450032018-01-31 17:54:56 -08001637#ifdef LMKD_LOG_STATS
Rajeev Kumar92b659b2018-02-21 19:08:15 -08001638 struct memory_stat mem_st = {};
Rajeev Kumar70450032018-01-31 17:54:56 -08001639 int memory_stat_parse_result = -1;
Suren Baghdasaryanec5e4c62019-03-04 11:07:39 -08001640#else
1641 /* To prevent unused parameter warning */
1642 (void)(min_oom_score);
Rajeev Kumar70450032018-01-31 17:54:56 -08001643#endif
1644
Suren Baghdasaryan0082ef12019-07-02 15:52:07 -07001645 tgid = proc_get_tgid(pid);
1646 if (tgid >= 0 && tgid != pid) {
1647 ALOGE("Possible pid reuse detected (pid %d, tgid %d)!", pid, tgid);
1648 goto out;
1649 }
1650
Colin Cross16b09462014-07-14 12:39:56 -07001651 taskname = proc_get_name(pid);
1652 if (!taskname) {
Suren Baghdasaryan01063272018-10-12 11:28:33 -07001653 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001654 }
1655
1656 tasksize = proc_get_size(pid);
1657 if (tasksize <= 0) {
Suren Baghdasaryan01063272018-10-12 11:28:33 -07001658 goto out;
Colin Cross16b09462014-07-14 12:39:56 -07001659 }
1660
Rajeev Kumar70450032018-01-31 17:54:56 -08001661#ifdef LMKD_LOG_STATS
1662 if (enable_stats_log) {
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001663 if (per_app_memcg) {
1664 memory_stat_parse_result = memory_stat_from_cgroup(&mem_st, pid, uid);
1665 } else {
1666 memory_stat_parse_result = memory_stat_from_procfs(&mem_st, pid);
1667 }
Rajeev Kumar70450032018-01-31 17:54:56 -08001668 }
1669#endif
1670
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001671 TRACE_KILL_START(pid);
1672
Mark Salyzyn64d97d82018-04-09 09:50:32 -07001673 /* CAP_KILL required */
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001674 r = kill(pid, SIGKILL);
Wei Wang2d95c102018-11-21 00:11:44 -08001675
1676 set_process_group_and_prio(pid, SP_FOREGROUND, ANDROID_PRIORITY_HIGHEST);
1677
Suren Baghdasaryand4a29902018-10-12 11:07:40 -07001678 inc_killcnt(procp->oomadj);
Tim Murrayb62b3ef2019-05-28 12:15:34 -07001679 ALOGE("Kill '%s' (%d), uid %d, oom_adj %d to free %ldkB", taskname, pid, uid, procp->oomadj,
1680 tasksize * page_k);
Colin Cross16b09462014-07-14 12:39:56 -07001681
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001682 TRACE_KILL_END();
1683
Tim Murraye7853f62018-10-25 17:05:41 -07001684 last_killed_pid = pid;
1685
Colin Cross16b09462014-07-14 12:39:56 -07001686 if (r) {
Mark Salyzyn919f5382018-02-04 15:27:23 -08001687 ALOGE("kill(%d): errno=%d", pid, errno);
Suren Baghdasaryan01063272018-10-12 11:28:33 -07001688 goto out;
Rajeev Kumar70450032018-01-31 17:54:56 -08001689 } else {
1690#ifdef LMKD_LOG_STATS
1691 if (memory_stat_parse_result == 0) {
1692 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname,
1693 procp->oomadj, mem_st.pgfault, mem_st.pgmajfault, mem_st.rss_in_bytes,
Suren Baghdasaryanec5e4c62019-03-04 11:07:39 -08001694 mem_st.cache_in_bytes, mem_st.swap_in_bytes, mem_st.process_start_time_ns,
1695 min_oom_score);
Rajeev Kumar4dbc24d2018-10-05 12:34:59 -07001696 } else if (enable_stats_log) {
1697 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname, procp->oomadj,
Suren Baghdasaryanec5e4c62019-03-04 11:07:39 -08001698 -1, -1, tasksize * BYTES_IN_KILOBYTE, -1, -1, -1,
1699 min_oom_score);
Rajeev Kumar70450032018-01-31 17:54:56 -08001700 }
1701#endif
Suren Baghdasaryan01063272018-10-12 11:28:33 -07001702 result = tasksize;
Colin Cross16b09462014-07-14 12:39:56 -07001703 }
Mark Salyzyn919f5382018-02-04 15:27:23 -08001704
Suren Baghdasaryan01063272018-10-12 11:28:33 -07001705out:
1706 /*
1707 * WARNING: After pid_remove() procp is freed and can't be used!
1708 * Therefore placed at the end of the function.
1709 */
1710 pid_remove(pid);
1711 return result;
Colin Cross16b09462014-07-14 12:39:56 -07001712}
1713
1714/*
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07001715 * Find one process to kill at or above the given oom_adj level.
1716 * Returns size of the killed process.
Colin Cross16b09462014-07-14 12:39:56 -07001717 */
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07001718static int find_and_kill_process(int min_score_adj) {
Colin Cross16b09462014-07-14 12:39:56 -07001719 int i;
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07001720 int killed_size = 0;
Colin Cross16b09462014-07-14 12:39:56 -07001721
Rajeev Kumar70450032018-01-31 17:54:56 -08001722#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001723 bool lmk_state_change_start = false;
Rajeev Kumar70450032018-01-31 17:54:56 -08001724#endif
1725
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001726 for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) {
Colin Cross16b09462014-07-14 12:39:56 -07001727 struct proc *procp;
1728
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001729 while (true) {
Suren Baghdasaryan818b59b2018-04-13 11:49:54 -07001730 procp = kill_heaviest_task ?
1731 proc_get_heaviest(i) : proc_adj_lru(i);
Colin Cross16b09462014-07-14 12:39:56 -07001732
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001733 if (!procp)
1734 break;
1735
Suren Baghdasaryanec5e4c62019-03-04 11:07:39 -08001736 killed_size = kill_one_process(procp, min_score_adj);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001737 if (killed_size >= 0) {
Yang Lu5564f4e2018-05-15 04:59:44 +00001738#ifdef LMKD_LOG_STATS
1739 if (enable_stats_log && !lmk_state_change_start) {
1740 lmk_state_change_start = true;
1741 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1742 LMK_STATE_CHANGE_START);
1743 }
1744#endif
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07001745 break;
Colin Cross16b09462014-07-14 12:39:56 -07001746 }
1747 }
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07001748 if (killed_size) {
1749 break;
1750 }
Colin Cross16b09462014-07-14 12:39:56 -07001751 }
1752
Rajeev Kumar70450032018-01-31 17:54:56 -08001753#ifdef LMKD_LOG_STATS
Yang Lu5564f4e2018-05-15 04:59:44 +00001754 if (enable_stats_log && lmk_state_change_start) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001755 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED, LMK_STATE_CHANGE_STOP);
1756 }
1757#endif
1758
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07001759 return killed_size;
Colin Cross16b09462014-07-14 12:39:56 -07001760}
1761
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -07001762static int64_t get_memory_usage(struct reread_data *file_data) {
Robert Beneac47f2992017-08-21 15:18:31 -07001763 int ret;
1764 int64_t mem_usage;
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07001765 char *buf;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -07001766
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07001767 if ((buf = reread_file(file_data)) == NULL) {
Robert Beneac47f2992017-08-21 15:18:31 -07001768 return -1;
1769 }
1770
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -07001771 if (!parse_int64(buf, &mem_usage)) {
1772 ALOGE("%s parse error", file_data->filename);
Robert Beneac47f2992017-08-21 15:18:31 -07001773 return -1;
1774 }
Robert Beneac47f2992017-08-21 15:18:31 -07001775 if (mem_usage == 0) {
1776 ALOGE("No memory!");
1777 return -1;
1778 }
1779 return mem_usage;
1780}
1781
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001782void record_low_pressure_levels(union meminfo *mi) {
1783 if (low_pressure_mem.min_nr_free_pages == -1 ||
1784 low_pressure_mem.min_nr_free_pages > mi->field.nr_free_pages) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001785 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001786 ALOGI("Low pressure min memory update from %" PRId64 " to %" PRId64,
1787 low_pressure_mem.min_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001788 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001789 low_pressure_mem.min_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001790 }
1791 /*
1792 * Free memory at low vmpressure events occasionally gets spikes,
1793 * possibly a stale low vmpressure event with memory already
1794 * freed up (no memory pressure should have been reported).
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001795 * Ignore large jumps in max_nr_free_pages that would mess up our stats.
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001796 */
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001797 if (low_pressure_mem.max_nr_free_pages == -1 ||
1798 (low_pressure_mem.max_nr_free_pages < mi->field.nr_free_pages &&
1799 mi->field.nr_free_pages - low_pressure_mem.max_nr_free_pages <
1800 low_pressure_mem.max_nr_free_pages * 0.1)) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001801 if (debug_process_killing) {
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001802 ALOGI("Low pressure max memory update from %" PRId64 " to %" PRId64,
1803 low_pressure_mem.max_nr_free_pages, mi->field.nr_free_pages);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001804 }
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001805 low_pressure_mem.max_nr_free_pages = mi->field.nr_free_pages;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001806 }
1807}
1808
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001809enum vmpressure_level upgrade_level(enum vmpressure_level level) {
1810 return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ?
1811 level + 1 : level);
1812}
1813
1814enum vmpressure_level downgrade_level(enum vmpressure_level level) {
1815 return (enum vmpressure_level)((level > VMPRESS_LEVEL_LOW) ?
1816 level - 1 : level);
1817}
1818
Tim Murraye7853f62018-10-25 17:05:41 -07001819static bool is_kill_pending(void) {
1820 char buf[24];
1821
1822 if (last_killed_pid < 0) {
1823 return false;
1824 }
1825
1826 snprintf(buf, sizeof(buf), "/proc/%d/", last_killed_pid);
1827 if (access(buf, F_OK) == 0) {
1828 return true;
1829 }
1830
1831 // reset last killed PID because there's nothing pending
1832 last_killed_pid = -1;
1833 return false;
1834}
1835
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07001836static void mp_event_common(int data, uint32_t events, struct polling_params *poll_params) {
Todd Poynor3948f802013-07-09 19:35:14 -07001837 int ret;
1838 unsigned long long evcount;
Robert Beneac47f2992017-08-21 15:18:31 -07001839 int64_t mem_usage, memsw_usage;
Robert Benea6e8e7102017-09-13 15:20:30 -07001840 int64_t mem_pressure;
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001841 enum vmpressure_level lvl;
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001842 union meminfo mi;
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001843 struct zoneinfo zi;
Suren Baghdasaryan36934412018-09-05 15:46:32 -07001844 struct timespec curr_tm;
Suren Baghdasaryan314a5052018-07-24 17:13:06 -07001845 static struct timespec last_kill_tm;
1846 static unsigned long kill_skip_count = 0;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001847 enum vmpressure_level level = (enum vmpressure_level)data;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001848 long other_free = 0, other_file = 0;
1849 int min_score_adj;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001850 int minfree = 0;
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -07001851 static struct reread_data mem_usage_file_data = {
1852 .filename = MEMCG_MEMORY_USAGE,
1853 .fd = -1,
1854 };
1855 static struct reread_data memsw_usage_file_data = {
1856 .filename = MEMCG_MEMORYSW_USAGE,
1857 .fd = -1,
1858 };
Todd Poynor3948f802013-07-09 19:35:14 -07001859
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08001860 if (debug_process_killing) {
1861 ALOGI("%s memory pressure event is triggered", level_name[level]);
1862 }
1863
1864 if (!use_psi_monitors) {
1865 /*
1866 * Check all event counters from low to critical
1867 * and upgrade to the highest priority one. By reading
1868 * eventfd we also reset the event counters.
1869 */
1870 for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
1871 if (mpevfd[lvl] != -1 &&
1872 TEMP_FAILURE_RETRY(read(mpevfd[lvl],
1873 &evcount, sizeof(evcount))) > 0 &&
1874 evcount > 0 && lvl > level) {
1875 level = lvl;
1876 }
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001877 }
1878 }
Todd Poynor3948f802013-07-09 19:35:14 -07001879
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07001880 /* Start polling after initial PSI event */
1881 if (use_psi_monitors && events) {
1882 /* Override polling params only if current event is more critical */
1883 if (!poll_params->poll_handler || data > poll_params->poll_handler->data) {
1884 poll_params->polling_interval_ms = PSI_POLL_PERIOD_MS;
1885 poll_params->update = POLLING_START;
1886 }
1887 }
1888
Suren Baghdasaryan36934412018-09-05 15:46:32 -07001889 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
1890 ALOGE("Failed to get current time");
1891 return;
1892 }
1893
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001894 if (kill_timeout_ms) {
Tim Murraye7853f62018-10-25 17:05:41 -07001895 // If we're within the timeout, see if there's pending reclaim work
1896 // from the last killed process. If there is (as evidenced by
1897 // /proc/<pid> continuing to exist), skip killing for now.
1898 if ((get_time_diff_ms(&last_kill_tm, &curr_tm) < kill_timeout_ms) &&
1899 (low_ram_device || is_kill_pending())) {
Suren Baghdasaryan314a5052018-07-24 17:13:06 -07001900 kill_skip_count++;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001901 return;
1902 }
1903 }
1904
Suren Baghdasaryan314a5052018-07-24 17:13:06 -07001905 if (kill_skip_count > 0) {
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07001906 ALOGI("%lu memory pressure events were skipped after a kill!",
Suren Baghdasaryan314a5052018-07-24 17:13:06 -07001907 kill_skip_count);
1908 kill_skip_count = 0;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001909 }
1910
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001911 if (meminfo_parse(&mi) < 0 || zoneinfo_parse(&zi) < 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001912 ALOGE("Failed to get free memory!");
1913 return;
1914 }
1915
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001916 if (use_minfree_levels) {
1917 int i;
1918
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07001919 other_free = mi.field.nr_free_pages - zi.totalreserve_pages;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001920 if (mi.field.nr_file_pages > (mi.field.shmem + mi.field.unevictable + mi.field.swap_cached)) {
1921 other_file = (mi.field.nr_file_pages - mi.field.shmem -
1922 mi.field.unevictable - mi.field.swap_cached);
1923 } else {
1924 other_file = 0;
1925 }
1926
1927 min_score_adj = OOM_SCORE_ADJ_MAX + 1;
1928 for (i = 0; i < lowmem_targets_size; i++) {
1929 minfree = lowmem_minfree[i];
1930 if (other_free < minfree && other_file < minfree) {
1931 min_score_adj = lowmem_adj[i];
1932 break;
1933 }
1934 }
1935
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001936 if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
1937 if (debug_process_killing) {
1938 ALOGI("Ignore %s memory pressure event "
1939 "(free memory=%ldkB, cache=%ldkB, limit=%ldkB)",
1940 level_name[level], other_free * page_k, other_file * page_k,
1941 (long)lowmem_minfree[lowmem_targets_size - 1] * page_k);
1942 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001943 return;
Suren Baghdasaryan20686f02018-05-18 14:42:00 -07001944 }
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001945
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07001946 goto do_kill;
1947 }
1948
Suren Baghdasaryan9926e572018-04-13 13:41:12 -07001949 if (level == VMPRESS_LEVEL_LOW) {
1950 record_low_pressure_levels(&mi);
1951 }
1952
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001953 if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) {
1954 /* Do not monitor this pressure level */
1955 return;
1956 }
1957
Suren Baghdasaryan6499e5e2018-04-13 12:43:41 -07001958 if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
1959 goto do_kill;
1960 }
1961 if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001962 goto do_kill;
Robert Benea6e8e7102017-09-13 15:20:30 -07001963 }
Robert Beneac47f2992017-08-21 15:18:31 -07001964
Robert Benea6e8e7102017-09-13 15:20:30 -07001965 // Calculate percent for swappinness.
1966 mem_pressure = (mem_usage * 100) / memsw_usage;
1967
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001968 if (enable_pressure_upgrade && level != VMPRESS_LEVEL_CRITICAL) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001969 // We are swapping too much.
1970 if (mem_pressure < upgrade_pressure) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001971 level = upgrade_level(level);
1972 if (debug_process_killing) {
1973 ALOGI("Event upgraded to %s", level_name[level]);
1974 }
Robert Beneac47f2992017-08-21 15:18:31 -07001975 }
1976 }
1977
Vic Yang360a1132018-08-07 10:18:22 -07001978 // If we still have enough swap space available, check if we want to
1979 // ignore/downgrade pressure events.
1980 if (mi.field.free_swap >=
1981 mi.field.total_swap * swap_free_low_percentage / 100) {
1982 // If the pressure is larger than downgrade_pressure lmk will not
1983 // kill any process, since enough memory is available.
1984 if (mem_pressure > downgrade_pressure) {
1985 if (debug_process_killing) {
1986 ALOGI("Ignore %s memory pressure", level_name[level]);
1987 }
1988 return;
1989 } else if (level == VMPRESS_LEVEL_CRITICAL && mem_pressure > upgrade_pressure) {
1990 if (debug_process_killing) {
1991 ALOGI("Downgrade critical memory pressure");
1992 }
1993 // Downgrade event, since enough memory available.
1994 level = downgrade_level(level);
Robert Benea6e8e7102017-09-13 15:20:30 -07001995 }
Robert Benea6e8e7102017-09-13 15:20:30 -07001996 }
1997
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001998do_kill:
Suren Baghdasaryanff61afb2018-04-13 11:45:38 -07001999 if (low_ram_device) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08002000 /* For Go devices kill only one task */
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07002001 if (find_and_kill_process(level_oomadj[level]) == 0) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08002002 if (debug_process_killing) {
2003 ALOGI("Nothing to kill");
2004 }
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -07002005 } else {
2006 meminfo_log(&mi);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08002007 }
2008 } else {
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07002009 int pages_freed;
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002010 static struct timespec last_report_tm;
2011 static unsigned long report_skip_count = 0;
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07002012
2013 if (!use_minfree_levels) {
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07002014 /* Free up enough memory to downgrate the memory pressure to low level */
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07002015 if (mi.field.nr_free_pages >= low_pressure_mem.max_nr_free_pages) {
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07002016 if (debug_process_killing) {
2017 ALOGI("Ignoring pressure since more memory is "
2018 "available (%" PRId64 ") than watermark (%" PRId64 ")",
2019 mi.field.nr_free_pages, low_pressure_mem.max_nr_free_pages);
2020 }
2021 return;
2022 }
2023 min_score_adj = level_oomadj[level];
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08002024 }
2025
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07002026 pages_freed = find_and_kill_process(min_score_adj);
Suren Baghdasaryanda88b242018-05-10 16:10:56 -07002027
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002028 if (pages_freed == 0) {
2029 /* Rate limit kill reports when nothing was reclaimed */
2030 if (get_time_diff_ms(&last_report_tm, &curr_tm) < FAIL_REPORT_RLIMIT_MS) {
2031 report_skip_count++;
Suren Baghdasaryan314a5052018-07-24 17:13:06 -07002032 return;
2033 }
Tim Murraye7853f62018-10-25 17:05:41 -07002034 } else {
2035 /* If we killed anything, update the last killed timestamp. */
2036 last_kill_tm = curr_tm;
Robert Beneacaeaa652017-08-11 16:03:20 -07002037 }
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002038
2039 /* Log meminfo whenever we kill or when report rate limit allows */
2040 meminfo_log(&mi);
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002041
2042 if (use_minfree_levels) {
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07002043 ALOGI("Reclaimed %ldkB, cache(%ldkB) and "
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002044 "free(%" PRId64 "kB)-reserved(%" PRId64 "kB) below min(%ldkB) for oom_adj %d",
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07002045 pages_freed * page_k,
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002046 other_file * page_k, mi.field.nr_free_pages * page_k,
Suren Baghdasaryan94ce3dd2019-07-15 13:54:20 -07002047 zi.totalreserve_pages * page_k,
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002048 minfree * page_k, min_score_adj);
2049 } else {
Suren Baghdasaryanf81b5f42018-10-26 11:32:15 -07002050 ALOGI("Reclaimed %ldkB at oom_adj %d",
2051 pages_freed * page_k, min_score_adj);
Suren Baghdasaryan36934412018-09-05 15:46:32 -07002052 }
2053
2054 if (report_skip_count > 0) {
2055 ALOGI("Suppressed %lu failed kill reports", report_skip_count);
2056 report_skip_count = 0;
2057 }
2058
2059 last_report_tm = curr_tm;
Colin Crossf8857cc2014-07-11 17:16:56 -07002060 }
Todd Poynor3948f802013-07-09 19:35:14 -07002061}
2062
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002063static bool init_mp_psi(enum vmpressure_level level) {
2064 int fd = init_psi_monitor(psi_thresholds[level].stall_type,
2065 psi_thresholds[level].threshold_ms * US_PER_MS,
2066 PSI_WINDOW_SIZE_MS * US_PER_MS);
2067
2068 if (fd < 0) {
2069 return false;
2070 }
2071
2072 vmpressure_hinfo[level].handler = mp_event_common;
2073 vmpressure_hinfo[level].data = level;
2074 if (register_psi_monitor(epollfd, fd, &vmpressure_hinfo[level]) < 0) {
2075 destroy_psi_monitor(fd);
2076 return false;
2077 }
2078 maxevents++;
2079 mpevfd[level] = fd;
2080
2081 return true;
2082}
2083
2084static void destroy_mp_psi(enum vmpressure_level level) {
2085 int fd = mpevfd[level];
2086
2087 if (unregister_psi_monitor(epollfd, fd) < 0) {
2088 ALOGE("Failed to unregister psi monitor for %s memory pressure; errno=%d",
2089 level_name[level], errno);
2090 }
2091 destroy_psi_monitor(fd);
2092 mpevfd[level] = -1;
2093}
2094
2095static bool init_psi_monitors() {
2096 if (!init_mp_psi(VMPRESS_LEVEL_LOW)) {
2097 return false;
2098 }
2099 if (!init_mp_psi(VMPRESS_LEVEL_MEDIUM)) {
2100 destroy_mp_psi(VMPRESS_LEVEL_LOW);
2101 return false;
2102 }
2103 if (!init_mp_psi(VMPRESS_LEVEL_CRITICAL)) {
2104 destroy_mp_psi(VMPRESS_LEVEL_MEDIUM);
2105 destroy_mp_psi(VMPRESS_LEVEL_LOW);
2106 return false;
2107 }
2108 return true;
2109}
2110
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002111static bool init_mp_common(enum vmpressure_level level) {
Todd Poynor3948f802013-07-09 19:35:14 -07002112 int mpfd;
2113 int evfd;
2114 int evctlfd;
2115 char buf[256];
2116 struct epoll_event epev;
2117 int ret;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002118 int level_idx = (int)level;
2119 const char *levelstr = level_name[level_idx];
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08002120
Mark Salyzyn64d97d82018-04-09 09:50:32 -07002121 /* gid containing AID_SYSTEM required */
Nick Kralevichc68c8862015-12-18 20:52:37 -08002122 mpfd = open(MEMCG_SYSFS_PATH "memory.pressure_level", O_RDONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07002123 if (mpfd < 0) {
2124 ALOGI("No kernel memory.pressure_level support (errno=%d)", errno);
2125 goto err_open_mpfd;
2126 }
2127
Nick Kralevichc68c8862015-12-18 20:52:37 -08002128 evctlfd = open(MEMCG_SYSFS_PATH "cgroup.event_control", O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07002129 if (evctlfd < 0) {
2130 ALOGI("No kernel memory cgroup event control (errno=%d)", errno);
2131 goto err_open_evctlfd;
2132 }
2133
Nick Kralevichc68c8862015-12-18 20:52:37 -08002134 evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07002135 if (evfd < 0) {
2136 ALOGE("eventfd failed for level %s; errno=%d", levelstr, errno);
2137 goto err_eventfd;
2138 }
2139
2140 ret = snprintf(buf, sizeof(buf), "%d %d %s", evfd, mpfd, levelstr);
2141 if (ret >= (ssize_t)sizeof(buf)) {
2142 ALOGE("cgroup.event_control line overflow for level %s", levelstr);
2143 goto err;
2144 }
2145
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002146 ret = TEMP_FAILURE_RETRY(write(evctlfd, buf, strlen(buf) + 1));
Todd Poynor3948f802013-07-09 19:35:14 -07002147 if (ret == -1) {
2148 ALOGE("cgroup.event_control write failed for level %s; errno=%d",
2149 levelstr, errno);
2150 goto err;
2151 }
2152
2153 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002154 /* use data to store event level */
2155 vmpressure_hinfo[level_idx].data = level_idx;
2156 vmpressure_hinfo[level_idx].handler = mp_event_common;
2157 epev.data.ptr = (void *)&vmpressure_hinfo[level_idx];
Todd Poynor3948f802013-07-09 19:35:14 -07002158 ret = epoll_ctl(epollfd, EPOLL_CTL_ADD, evfd, &epev);
2159 if (ret == -1) {
2160 ALOGE("epoll_ctl for level %s failed; errno=%d", levelstr, errno);
2161 goto err;
2162 }
2163 maxevents++;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08002164 mpevfd[level] = evfd;
Suren Baghdasaryan1bd2fc42018-01-04 08:54:53 -08002165 close(evctlfd);
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08002166 return true;
Todd Poynor3948f802013-07-09 19:35:14 -07002167
2168err:
2169 close(evfd);
2170err_eventfd:
2171 close(evctlfd);
2172err_open_evctlfd:
2173 close(mpfd);
2174err_open_mpfd:
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08002175 return false;
Robert Benea673e2762017-06-01 16:32:31 -07002176}
2177
Jim Blackler3947c932019-04-26 11:18:29 +01002178#ifdef LMKD_LOG_STATS
2179static int kernel_poll_fd = -1;
Jim Blackler3947c932019-04-26 11:18:29 +01002180static void poll_kernel() {
2181 if (kernel_poll_fd == -1) {
2182 // not waiting
2183 return;
2184 }
2185
2186 while (1) {
2187 char rd_buf[256];
2188 int bytes_read =
2189 TEMP_FAILURE_RETRY(pread(kernel_poll_fd, (void*)rd_buf, sizeof(rd_buf), 0));
2190 if (bytes_read <= 0) break;
2191 rd_buf[bytes_read] = '\0';
2192
2193 int64_t pid;
2194 int64_t uid;
2195 int64_t group_leader_pid;
2196 int64_t min_flt;
2197 int64_t maj_flt;
2198 int64_t rss_in_pages;
2199 int16_t oom_score_adj;
2200 int16_t min_score_adj;
2201 int64_t starttime;
2202 char* taskname = 0;
2203 int fields_read = sscanf(rd_buf,
2204 "%" SCNd64 " %" SCNd64 " %" SCNd64 " %" SCNd64 " %" SCNd64
2205 " %" SCNd64 " %" SCNd16 " %" SCNd16 " %" SCNd64 "\n%m[^\n]",
2206 &pid, &uid, &group_leader_pid, &min_flt, &maj_flt, &rss_in_pages,
2207 &oom_score_adj, &min_score_adj, &starttime, &taskname);
2208
2209 /* only the death of the group leader process is logged */
2210 if (fields_read == 10 && group_leader_pid == pid) {
2211 int64_t process_start_time_ns = starttime * (NS_PER_SEC / sysconf(_SC_CLK_TCK));
Jim Blacklerd2da8142019-09-10 15:30:05 +01002212 stats_write_lmk_kill_occurred_pid(log_ctx, LMK_KILL_OCCURRED, uid, pid, oom_score_adj,
2213 min_flt, maj_flt, rss_in_pages * PAGE_SIZE, 0, 0,
2214 process_start_time_ns, min_score_adj);
Jim Blackler3947c932019-04-26 11:18:29 +01002215 }
2216
2217 free(taskname);
2218 }
2219}
2220
2221static struct event_handler_info kernel_poll_hinfo = {0, poll_kernel};
2222
2223static void init_poll_kernel() {
2224 struct epoll_event epev;
2225 kernel_poll_fd =
2226 TEMP_FAILURE_RETRY(open("/proc/lowmemorykiller", O_RDONLY | O_NONBLOCK | O_CLOEXEC));
2227
2228 if (kernel_poll_fd < 0) {
2229 ALOGE("kernel lmk event file could not be opened; errno=%d", kernel_poll_fd);
2230 return;
2231 }
2232
2233 epev.events = EPOLLIN;
2234 epev.data.ptr = (void*)&kernel_poll_hinfo;
2235 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, kernel_poll_fd, &epev) != 0) {
2236 ALOGE("epoll_ctl for lmk events failed; errno=%d", errno);
2237 close(kernel_poll_fd);
2238 kernel_poll_fd = -1;
2239 } else {
2240 maxevents++;
2241 }
2242}
2243#endif
2244
Todd Poynor3948f802013-07-09 19:35:14 -07002245static int init(void) {
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07002246 struct reread_data file_data = {
2247 .filename = ZONEINFO_PATH,
2248 .fd = -1,
2249 };
Todd Poynor3948f802013-07-09 19:35:14 -07002250 struct epoll_event epev;
2251 int i;
2252 int ret;
2253
2254 page_k = sysconf(_SC_PAGESIZE);
2255 if (page_k == -1)
2256 page_k = PAGE_SIZE;
2257 page_k /= 1024;
2258
2259 epollfd = epoll_create(MAX_EPOLL_EVENTS);
2260 if (epollfd == -1) {
2261 ALOGE("epoll_create failed (errno=%d)", errno);
2262 return -1;
2263 }
2264
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002265 // mark data connections as not connected
2266 for (int i = 0; i < MAX_DATA_CONN; i++) {
2267 data_sock[i].sock = -1;
2268 }
2269
2270 ctrl_sock.sock = android_get_control_socket("lmkd");
2271 if (ctrl_sock.sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07002272 ALOGE("get lmkd control socket failed");
2273 return -1;
2274 }
2275
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002276 ret = listen(ctrl_sock.sock, MAX_DATA_CONN);
Todd Poynor3948f802013-07-09 19:35:14 -07002277 if (ret < 0) {
2278 ALOGE("lmkd control socket listen failed (errno=%d)", errno);
2279 return -1;
2280 }
2281
2282 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002283 ctrl_sock.handler_info.handler = ctrl_connect_handler;
2284 epev.data.ptr = (void *)&(ctrl_sock.handler_info);
2285 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ctrl_sock.sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07002286 ALOGE("epoll_ctl for lmkd control socket failed (errno=%d)", errno);
2287 return -1;
2288 }
2289 maxevents++;
2290
Robert Benea164baeb2017-09-11 16:53:28 -07002291 has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK);
Suren Baghdasaryan979591b2018-01-18 17:27:30 -08002292 use_inkernel_interface = has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -07002293
2294 if (use_inkernel_interface) {
2295 ALOGI("Using in-kernel low memory killer interface");
Jim Blackler3947c932019-04-26 11:18:29 +01002296#ifdef LMKD_LOG_STATS
2297 if (enable_stats_log) {
2298 init_poll_kernel();
2299 }
2300#endif
Todd Poynor3948f802013-07-09 19:35:14 -07002301 } else {
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002302 /* Try to use psi monitor first if kernel has it */
2303 use_psi_monitors = property_get_bool("ro.lmk.use_psi", true) &&
2304 init_psi_monitors();
2305 /* Fall back to vmpressure */
2306 if (!use_psi_monitors &&
2307 (!init_mp_common(VMPRESS_LEVEL_LOW) ||
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002308 !init_mp_common(VMPRESS_LEVEL_MEDIUM) ||
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002309 !init_mp_common(VMPRESS_LEVEL_CRITICAL))) {
Todd Poynor3948f802013-07-09 19:35:14 -07002310 ALOGE("Kernel does not support memory pressure events or in-kernel low memory killer");
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08002311 return -1;
2312 }
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002313 if (use_psi_monitors) {
2314 ALOGI("Using psi monitors for memory pressure detection");
2315 } else {
2316 ALOGI("Using vmpressure for memory pressure detection");
2317 }
Todd Poynor3948f802013-07-09 19:35:14 -07002318 }
2319
Chong Zhang0a4acdf2015-10-14 16:19:53 -07002320 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
Todd Poynor3948f802013-07-09 19:35:14 -07002321 procadjslot_list[i].next = &procadjslot_list[i];
2322 procadjslot_list[i].prev = &procadjslot_list[i];
2323 }
2324
Suren Baghdasaryand4a29902018-10-12 11:07:40 -07002325 memset(killcnt_idx, KILLCNT_INVALID_IDX, sizeof(killcnt_idx));
2326
Suren Baghdasaryana77b3272019-07-15 13:35:04 -07002327 /*
2328 * Read zoneinfo as the biggest file we read to create and size the initial
2329 * read buffer and avoid memory re-allocations during memory pressure
2330 */
2331 if (reread_file(&file_data) == NULL) {
2332 ALOGE("Failed to read %s: %s", file_data.filename, strerror(errno));
2333 }
2334
Todd Poynor3948f802013-07-09 19:35:14 -07002335 return 0;
2336}
2337
2338static void mainloop(void) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002339 struct event_handler_info* handler_info;
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002340 struct polling_params poll_params;
2341 struct timespec curr_tm;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002342 struct epoll_event *evt;
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002343 long delay = -1;
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002344
2345 poll_params.poll_handler = NULL;
2346 poll_params.update = POLLING_DO_NOT_CHANGE;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002347
Todd Poynor3948f802013-07-09 19:35:14 -07002348 while (1) {
2349 struct epoll_event events[maxevents];
2350 int nevents;
2351 int i;
2352
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002353 if (poll_params.poll_handler) {
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002354 /* Calculate next timeout */
2355 clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002356 delay = get_time_diff_ms(&poll_params.last_poll_tm, &curr_tm);
2357 delay = (delay < poll_params.polling_interval_ms) ?
2358 poll_params.polling_interval_ms - delay : poll_params.polling_interval_ms;
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002359
2360 /* Wait for events until the next polling timeout */
2361 nevents = epoll_wait(epollfd, events, maxevents, delay);
2362
2363 clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002364 if (get_time_diff_ms(&poll_params.last_poll_tm, &curr_tm) >=
2365 poll_params.polling_interval_ms) {
2366 /* Set input params for the call */
2367 poll_params.poll_handler->handler(poll_params.poll_handler->data, 0, &poll_params);
2368 poll_params.last_poll_tm = curr_tm;
2369
2370 if (poll_params.update != POLLING_DO_NOT_CHANGE) {
2371 switch (poll_params.update) {
2372 case POLLING_START:
2373 poll_params.poll_start_tm = curr_tm;
2374 break;
2375 case POLLING_STOP:
2376 poll_params.poll_handler = NULL;
2377 break;
2378 default:
2379 break;
2380 }
2381 poll_params.update = POLLING_DO_NOT_CHANGE;
2382 } else {
2383 if (get_time_diff_ms(&poll_params.poll_start_tm, &curr_tm) >
2384 PSI_WINDOW_SIZE_MS) {
2385 /* Polled for the duration of PSI window, time to stop */
2386 poll_params.poll_handler = NULL;
2387 }
2388 }
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002389 }
2390 } else {
2391 /* Wait for events with no timeout */
2392 nevents = epoll_wait(epollfd, events, maxevents, -1);
2393 }
Todd Poynor3948f802013-07-09 19:35:14 -07002394
2395 if (nevents == -1) {
2396 if (errno == EINTR)
2397 continue;
2398 ALOGE("epoll_wait failed (errno=%d)", errno);
2399 continue;
2400 }
2401
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002402 /*
2403 * First pass to see if any data socket connections were dropped.
2404 * Dropped connection should be handled before any other events
2405 * to deallocate data connection and correctly handle cases when
2406 * connection gets dropped and reestablished in the same epoll cycle.
2407 * In such cases it's essential to handle connection closures first.
2408 */
2409 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
2410 if ((evt->events & EPOLLHUP) && evt->data.ptr) {
2411 ALOGI("lmkd data connection dropped");
2412 handler_info = (struct event_handler_info*)evt->data.ptr;
2413 ctrl_data_close(handler_info->data);
2414 }
2415 }
2416
2417 /* Second pass to handle all other events */
2418 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002419 if (evt->events & EPOLLERR) {
Todd Poynor3948f802013-07-09 19:35:14 -07002420 ALOGD("EPOLLERR on event #%d", i);
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002421 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002422 if (evt->events & EPOLLHUP) {
2423 /* This case was handled in the first pass */
2424 continue;
2425 }
2426 if (evt->data.ptr) {
2427 handler_info = (struct event_handler_info*)evt->data.ptr;
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002428 /* Set input params for the call */
2429 handler_info->handler(handler_info->data, evt->events, &poll_params);
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002430
Suren Baghdasaryanef3650f2019-07-15 14:50:49 -07002431 if (poll_params.update != POLLING_DO_NOT_CHANGE) {
2432 switch (poll_params.update) {
2433 case POLLING_START:
2434 /*
2435 * Poll for the duration of PSI_WINDOW_SIZE_MS after the
2436 * initial PSI event because psi events are rate-limited
2437 * at one per sec.
2438 */
2439 clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
2440 poll_params.poll_start_tm = poll_params.last_poll_tm = curr_tm;
2441 poll_params.poll_handler = handler_info;
2442 break;
2443 case POLLING_STOP:
2444 poll_params.poll_handler = NULL;
2445 break;
2446 default:
2447 break;
2448 }
2449 poll_params.update = POLLING_DO_NOT_CHANGE;
Suren Baghdasaryan77122e52019-01-08 12:54:48 -08002450 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08002451 }
Todd Poynor3948f802013-07-09 19:35:14 -07002452 }
2453 }
2454}
2455
Mark Salyzyne6ed68b2014-04-30 13:36:35 -07002456int main(int argc __unused, char **argv __unused) {
Colin Cross1a0d9be2014-07-14 14:31:15 -07002457 struct sched_param param = {
2458 .sched_priority = 1,
2459 };
2460
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08002461 /* By default disable low level vmpressure events */
2462 level_oomadj[VMPRESS_LEVEL_LOW] =
2463 property_get_int32("ro.lmk.low", OOM_SCORE_ADJ_MAX + 1);
2464 level_oomadj[VMPRESS_LEVEL_MEDIUM] =
2465 property_get_int32("ro.lmk.medium", 800);
2466 level_oomadj[VMPRESS_LEVEL_CRITICAL] =
2467 property_get_int32("ro.lmk.critical", 0);
Robert Beneacaeaa652017-08-11 16:03:20 -07002468 debug_process_killing = property_get_bool("ro.lmk.debug", false);
Suren Baghdasaryanad2fd912017-12-08 13:08:41 -08002469
2470 /* By default disable upgrade/downgrade logic */
2471 enable_pressure_upgrade =
2472 property_get_bool("ro.lmk.critical_upgrade", false);
2473 upgrade_pressure =
2474 (int64_t)property_get_int32("ro.lmk.upgrade_pressure", 100);
2475 downgrade_pressure =
2476 (int64_t)property_get_int32("ro.lmk.downgrade_pressure", 100);
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08002477 kill_heaviest_task =
Suren Baghdasaryan818b59b2018-04-13 11:49:54 -07002478 property_get_bool("ro.lmk.kill_heaviest_task", false);
Suren Baghdasaryanff61afb2018-04-13 11:45:38 -07002479 low_ram_device = property_get_bool("ro.config.low_ram", false);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08002480 kill_timeout_ms =
2481 (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);
Suren Baghdasaryanffdc4dd2018-04-13 13:53:43 -07002482 use_minfree_levels =
2483 property_get_bool("ro.lmk.use_minfree_levels", false);
Suren Baghdasaryance13cb52018-06-19 18:38:12 -07002484 per_app_memcg =
2485 property_get_bool("ro.config.per_app_memcg", low_ram_device);
Vic Yang360a1132018-08-07 10:18:22 -07002486 swap_free_low_percentage =
2487 property_get_int32("ro.lmk.swap_free_low_percentage", 10);
Robert Benea58891d52017-07-31 17:15:20 -07002488
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -07002489 ctx = create_android_logger(MEMINFO_LOG_TAG);
2490
Rajeev Kumar70450032018-01-31 17:54:56 -08002491#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08002492 statslog_init(&log_ctx, &enable_stats_log);
Rajeev Kumar70450032018-01-31 17:54:56 -08002493#endif
2494
Mark Salyzyn721d7c72018-03-21 12:24:58 -07002495 if (!init()) {
2496 if (!use_inkernel_interface) {
2497 /*
2498 * MCL_ONFAULT pins pages as they fault instead of loading
2499 * everything immediately all at once. (Which would be bad,
2500 * because as of this writing, we have a lot of mapped pages we
2501 * never use.) Old kernels will see MCL_ONFAULT and fail with
2502 * EINVAL; we ignore this failure.
2503 *
2504 * N.B. read the man page for mlockall. MCL_CURRENT | MCL_ONFAULT
2505 * pins ⊆ MCL_CURRENT, converging to just MCL_CURRENT as we fault
2506 * in pages.
2507 */
Mark Salyzyn64d97d82018-04-09 09:50:32 -07002508 /* CAP_IPC_LOCK required */
Mark Salyzyn721d7c72018-03-21 12:24:58 -07002509 if (mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT) && (errno != EINVAL)) {
2510 ALOGW("mlockall failed %s", strerror(errno));
2511 }
Daniel Colascione4dd5d002018-01-03 12:01:02 -08002512
Mark Salyzyn64d97d82018-04-09 09:50:32 -07002513 /* CAP_NICE required */
2514 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
2515 ALOGW("set SCHED_FIFO failed %s", strerror(errno));
2516 }
Mark Salyzyn721d7c72018-03-21 12:24:58 -07002517 }
2518
Todd Poynor3948f802013-07-09 19:35:14 -07002519 mainloop();
Mark Salyzyn721d7c72018-03-21 12:24:58 -07002520 }
Todd Poynor3948f802013-07-09 19:35:14 -07002521
Rajeev Kumar70450032018-01-31 17:54:56 -08002522#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08002523 statslog_destroy(&log_ctx);
Rajeev Kumar70450032018-01-31 17:54:56 -08002524#endif
2525
Suren Baghdasaryan282ad1a2018-07-26 16:34:27 -07002526 android_log_destroy(&ctx);
2527
Todd Poynor3948f802013-07-09 19:35:14 -07002528 ALOGI("exiting");
2529 return 0;
2530}