Use mmap to read TFLite model.
The buffers in the model file are used directly by TFLite, and so a
small memory saving can be achieved by backing those memory pages with
the file itself.
Bug: 267050081
Test: atest libinput_tests
Change-Id: I743a3c94477d4bb778b6e0c4b4890a44f4e19aa4
diff --git a/include/input/TfLiteMotionPredictor.h b/include/input/TfLiteMotionPredictor.h
index 6e9afc3..54e2851 100644
--- a/include/input/TfLiteMotionPredictor.h
+++ b/include/input/TfLiteMotionPredictor.h
@@ -22,8 +22,8 @@
#include <memory>
#include <optional>
#include <span>
-#include <string>
+#include <android-base/mapped_file.h>
#include <input/RingBuffer.h>
#include <tensorflow/lite/core/api/error_reporter.h>
@@ -124,7 +124,7 @@
std::span<const float> outputPressure() const;
private:
- explicit TfLiteMotionPredictorModel(std::string model);
+ explicit TfLiteMotionPredictorModel(std::unique_ptr<android::base::MappedFile> model);
void allocateTensors();
void attachInputTensors();
@@ -140,7 +140,7 @@
const TfLiteTensor* mOutputPhi = nullptr;
const TfLiteTensor* mOutputPressure = nullptr;
- std::string mFlatBuffer;
+ std::unique_ptr<android::base::MappedFile> mFlatBuffer;
std::unique_ptr<tflite::ErrorReporter> mErrorReporter;
std::unique_ptr<tflite::FlatBufferModel> mModel;
std::unique_ptr<tflite::Interpreter> mInterpreter;