Merge "Simplify width, height, transform variable for BSL" into sc-dev
diff --git a/include/android/input.h b/include/android/input.h
index 7973487..6fe95c0 100644
--- a/include/android/input.h
+++ b/include/android/input.h
@@ -166,6 +166,9 @@
 
     /** Capture event */
     AINPUT_EVENT_TYPE_CAPTURE = 4,
+
+    /** Drag event */
+    AINPUT_EVENT_TYPE_DRAG = 5,
 };
 
 /**
diff --git a/include/input/Input.h b/include/input/Input.h
index aa42db8..f9fe6b9 100644
--- a/include/input/Input.h
+++ b/include/input/Input.h
@@ -792,6 +792,30 @@
     bool mPointerCaptureEnabled;
 };
 
+/*
+ * Drag events.
+ */
+class DragEvent : public InputEvent {
+public:
+    virtual ~DragEvent() {}
+
+    virtual int32_t getType() const override { return AINPUT_EVENT_TYPE_DRAG; }
+
+    inline bool isExiting() const { return mIsExiting; }
+
+    inline float getX() const { return mX; }
+
+    inline float getY() const { return mY; }
+
+    void initialize(int32_t id, float x, float y, bool isExiting);
+
+    void initialize(const DragEvent& from);
+
+protected:
+    bool mIsExiting;
+    float mX, mY;
+};
+
 /**
  * Base class for verified events.
  * Do not create a VerifiedInputEvent explicitly.
@@ -855,6 +879,7 @@
     virtual MotionEvent* createMotionEvent() = 0;
     virtual FocusEvent* createFocusEvent() = 0;
     virtual CaptureEvent* createCaptureEvent() = 0;
+    virtual DragEvent* createDragEvent() = 0;
 };
 
 /*
@@ -870,12 +895,14 @@
     virtual MotionEvent* createMotionEvent() override { return &mMotionEvent; }
     virtual FocusEvent* createFocusEvent() override { return &mFocusEvent; }
     virtual CaptureEvent* createCaptureEvent() override { return &mCaptureEvent; }
+    virtual DragEvent* createDragEvent() override { return &mDragEvent; }
 
 private:
     KeyEvent mKeyEvent;
     MotionEvent mMotionEvent;
     FocusEvent mFocusEvent;
     CaptureEvent mCaptureEvent;
+    DragEvent mDragEvent;
 };
 
 /*
@@ -890,6 +917,7 @@
     virtual MotionEvent* createMotionEvent() override;
     virtual FocusEvent* createFocusEvent() override;
     virtual CaptureEvent* createCaptureEvent() override;
+    virtual DragEvent* createDragEvent() override;
 
     void recycle(InputEvent* event);
 
@@ -900,6 +928,7 @@
     std::queue<std::unique_ptr<MotionEvent>> mMotionEventPool;
     std::queue<std::unique_ptr<FocusEvent>> mFocusEventPool;
     std::queue<std::unique_ptr<CaptureEvent>> mCaptureEventPool;
+    std::queue<std::unique_ptr<DragEvent>> mDragEventPool;
 };
 
 } // namespace android
diff --git a/include/input/InputTransport.h b/include/input/InputTransport.h
index ba9ae20..f1b2258 100644
--- a/include/input/InputTransport.h
+++ b/include/input/InputTransport.h
@@ -69,6 +69,7 @@
         FINISHED,
         FOCUS,
         CAPTURE,
+        DRAG,
     };
 
     struct Header {
@@ -183,6 +184,16 @@
 
             inline size_t size() const { return sizeof(Capture); }
         } capture;
+
+        struct Drag {
+            int32_t eventId;
+            float x;
+            float y;
+            bool isExiting;
+            uint8_t empty[3];
+
+            inline size_t size() const { return sizeof(Drag); }
+        } drag;
     } __attribute__((aligned(8))) body;
 
     bool isValid(size_t actualSize) const;
@@ -354,6 +365,15 @@
      */
     status_t publishCaptureEvent(uint32_t seq, int32_t eventId, bool pointerCaptureEnabled);
 
+    /* Publishes a drag event to the input channel.
+     *
+     * Returns OK on success.
+     * Returns WOULD_BLOCK if the channel is full.
+     * Returns DEAD_OBJECT if the channel's peer has been closed.
+     * Other errors probably indicate that the channel is broken.
+     */
+    status_t publishDragEvent(uint32_t seq, int32_t eventId, float x, float y, bool isExiting);
+
     /* Receives the finished signal from the consumer in reply to the original dispatch signal.
      * If a signal was received, returns the message sequence number,
      * whether the consumer handled the message, and the time the event was first read by the
@@ -601,6 +621,7 @@
     static void initializeMotionEvent(MotionEvent* event, const InputMessage* msg);
     static void initializeFocusEvent(FocusEvent* event, const InputMessage* msg);
     static void initializeCaptureEvent(CaptureEvent* event, const InputMessage* msg);
+    static void initializeDragEvent(DragEvent* event, const InputMessage* msg);
     static void addSample(MotionEvent* event, const InputMessage* msg);
     static bool canAddSample(const Batch& batch, const InputMessage* msg);
     static ssize_t findSampleNoLaterThan(const Batch& batch, nsecs_t time);
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 6df04f0..a17e482 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -214,6 +214,7 @@
         "-misc-redundant-expression",
         "-misc-unused-using-decls",
         "performance*",
+        "-performance-no-int-to-ptr",
         "portability*",
     ],
 
diff --git a/libs/input/Input.cpp b/libs/input/Input.cpp
index 0a00d68..5600eb3 100644
--- a/libs/input/Input.cpp
+++ b/libs/input/Input.cpp
@@ -92,6 +92,9 @@
         case AINPUT_EVENT_TYPE_CAPTURE: {
             return "CAPTURE";
         }
+        case AINPUT_EVENT_TYPE_DRAG: {
+            return "DRAG";
+        }
     }
     return "UNKNOWN";
 }
@@ -770,6 +773,23 @@
     mPointerCaptureEnabled = from.mPointerCaptureEnabled;
 }
 
+// --- DragEvent ---
+
+void DragEvent::initialize(int32_t id, float x, float y, bool isExiting) {
+    InputEvent::initialize(id, ReservedInputDeviceId::VIRTUAL_KEYBOARD_ID, AINPUT_SOURCE_UNKNOWN,
+                           ADISPLAY_ID_NONE, INVALID_HMAC);
+    mIsExiting = isExiting;
+    mX = x;
+    mY = y;
+}
+
+void DragEvent::initialize(const DragEvent& from) {
+    InputEvent::initialize(from);
+    mIsExiting = from.mIsExiting;
+    mX = from.mX;
+    mY = from.mY;
+}
+
 // --- PooledInputEventFactory ---
 
 PooledInputEventFactory::PooledInputEventFactory(size_t maxPoolSize) :
@@ -815,6 +835,15 @@
     return event;
 }
 
+DragEvent* PooledInputEventFactory::createDragEvent() {
+    if (mDragEventPool.empty()) {
+        return new DragEvent();
+    }
+    DragEvent* event = mDragEventPool.front().release();
+    mDragEventPool.pop();
+    return event;
+}
+
 void PooledInputEventFactory::recycle(InputEvent* event) {
     switch (event->getType()) {
     case AINPUT_EVENT_TYPE_KEY:
@@ -842,6 +871,12 @@
             return;
         }
         break;
+    case AINPUT_EVENT_TYPE_DRAG:
+        if (mDragEventPool.size() < mMaxPoolSize) {
+            mDragEventPool.push(std::unique_ptr<DragEvent>(static_cast<DragEvent*>(event)));
+            return;
+        }
+        break;
     }
     delete event;
 }
diff --git a/libs/input/InputTransport.cpp b/libs/input/InputTransport.cpp
index ee2daec..6ef0173 100644
--- a/libs/input/InputTransport.cpp
+++ b/libs/input/InputTransport.cpp
@@ -108,6 +108,8 @@
                 return true;
             case Type::CAPTURE:
                 return true;
+            case Type::DRAG:
+                return true;
         }
     }
     return false;
@@ -125,6 +127,8 @@
             return sizeof(Header) + body.focus.size();
         case Type::CAPTURE:
             return sizeof(Header) + body.capture.size();
+        case Type::DRAG:
+            return sizeof(Header) + body.drag.size();
     }
     return sizeof(Header);
 }
@@ -249,6 +253,13 @@
             msg->body.capture.pointerCaptureEnabled = body.capture.pointerCaptureEnabled;
             break;
         }
+        case InputMessage::Type::DRAG: {
+            msg->body.drag.eventId = body.drag.eventId;
+            msg->body.drag.x = body.drag.x;
+            msg->body.drag.y = body.drag.y;
+            msg->body.drag.isExiting = body.drag.isExiting;
+            break;
+        }
     }
 }
 
@@ -599,6 +610,25 @@
     return mChannel->sendMessage(&msg);
 }
 
+status_t InputPublisher::publishDragEvent(uint32_t seq, int32_t eventId, float x, float y,
+                                          bool isExiting) {
+    if (ATRACE_ENABLED()) {
+        std::string message =
+                StringPrintf("publishDragEvent(inputChannel=%s, x=%f, y=%f, isExiting=%s)",
+                             mChannel->getName().c_str(), x, y, toString(isExiting));
+        ATRACE_NAME(message.c_str());
+    }
+
+    InputMessage msg;
+    msg.header.type = InputMessage::Type::DRAG;
+    msg.header.seq = seq;
+    msg.body.drag.eventId = eventId;
+    msg.body.drag.isExiting = isExiting;
+    msg.body.drag.x = x;
+    msg.body.drag.y = y;
+    return mChannel->sendMessage(&msg);
+}
+
 status_t InputPublisher::receiveFinishedSignal(
         const std::function<void(uint32_t seq, bool handled, nsecs_t consumeTime)>& callback) {
     if (DEBUG_TRANSPORT_ACTIONS) {
@@ -779,6 +809,16 @@
                 *outEvent = captureEvent;
                 break;
             }
+
+            case InputMessage::Type::DRAG: {
+                DragEvent* dragEvent = factory->createDragEvent();
+                if (!dragEvent) return NO_MEMORY;
+
+                initializeDragEvent(dragEvent, &mMsg);
+                *outSeq = mMsg.header.seq;
+                *outEvent = dragEvent;
+                break;
+            }
         }
     }
     return OK;
@@ -1236,6 +1276,11 @@
     event->initialize(msg->body.capture.eventId, msg->body.capture.pointerCaptureEnabled);
 }
 
+void InputConsumer::initializeDragEvent(DragEvent* event, const InputMessage* msg) {
+    event->initialize(msg->body.drag.eventId, msg->body.drag.x, msg->body.drag.y,
+                      msg->body.drag.isExiting);
+}
+
 void InputConsumer::initializeMotionEvent(MotionEvent* event, const InputMessage* msg) {
     uint32_t pointerCount = msg->body.motion.pointerCount;
     PointerProperties pointerProperties[pointerCount];
@@ -1346,6 +1391,12 @@
                                                                         .pointerCaptureEnabled));
                     break;
                 }
+                case InputMessage::Type::DRAG: {
+                    out += android::base::StringPrintf("x=%.1f y=%.1f, isExiting=%s",
+                                                       msg.body.drag.x, msg.body.drag.y,
+                                                       toString(msg.body.drag.isExiting));
+                    break;
+                }
             }
             out += "\n";
         }
diff --git a/libs/input/tests/InputPublisherAndConsumer_test.cpp b/libs/input/tests/InputPublisherAndConsumer_test.cpp
index e7e566d..b5ed8d7 100644
--- a/libs/input/tests/InputPublisherAndConsumer_test.cpp
+++ b/libs/input/tests/InputPublisherAndConsumer_test.cpp
@@ -52,6 +52,7 @@
     void PublishAndConsumeMotionEvent();
     void PublishAndConsumeFocusEvent();
     void PublishAndConsumeCaptureEvent();
+    void PublishAndConsumeDragEvent();
 };
 
 TEST_F(InputPublisherAndConsumerTest, GetChannel_ReturnsTheChannel) {
@@ -301,7 +302,7 @@
     const nsecs_t publishTime = systemTime(SYSTEM_TIME_MONOTONIC);
 
     status = mPublisher->publishFocusEvent(seq, eventId, hasFocus, inTouchMode);
-    ASSERT_EQ(OK, status) << "publisher publishKeyEvent should return OK";
+    ASSERT_EQ(OK, status) << "publisher publishFocusEvent should return OK";
 
     uint32_t consumeSeq;
     InputEvent* event;
@@ -349,7 +350,7 @@
     const nsecs_t publishTime = systemTime(SYSTEM_TIME_MONOTONIC);
 
     status = mPublisher->publishCaptureEvent(seq, eventId, captureEnabled);
-    ASSERT_EQ(OK, status) << "publisher publishKeyEvent should return OK";
+    ASSERT_EQ(OK, status) << "publisher publishCaptureEvent should return OK";
 
     uint32_t consumeSeq;
     InputEvent* event;
@@ -387,6 +388,57 @@
             << "finished signal's consume time should be greater than publish time";
 }
 
+void InputPublisherAndConsumerTest::PublishAndConsumeDragEvent() {
+    status_t status;
+
+    constexpr uint32_t seq = 15;
+    int32_t eventId = InputEvent::nextId();
+    constexpr bool isExiting = false;
+    constexpr float x = 10;
+    constexpr float y = 15;
+    const nsecs_t publishTime = systemTime(SYSTEM_TIME_MONOTONIC);
+
+    status = mPublisher->publishDragEvent(seq, eventId, x, y, isExiting);
+    ASSERT_EQ(OK, status) << "publisher publishDragEvent should return OK";
+
+    uint32_t consumeSeq;
+    InputEvent* event;
+    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    ASSERT_EQ(OK, status) << "consumer consume should return OK";
+
+    ASSERT_TRUE(event != nullptr) << "consumer should have returned non-NULL event";
+    ASSERT_EQ(AINPUT_EVENT_TYPE_DRAG, event->getType())
+            << "consumer should have returned a drag event";
+
+    DragEvent* dragEvent = static_cast<DragEvent*>(event);
+    EXPECT_EQ(seq, consumeSeq);
+    EXPECT_EQ(eventId, dragEvent->getId());
+    EXPECT_EQ(isExiting, dragEvent->isExiting());
+    EXPECT_EQ(x, dragEvent->getX());
+    EXPECT_EQ(y, dragEvent->getY());
+
+    status = mConsumer->sendFinishedSignal(seq, true);
+    ASSERT_EQ(OK, status) << "consumer sendFinishedSignal should return OK";
+
+    uint32_t finishedSeq = 0;
+    bool handled = false;
+    nsecs_t consumeTime;
+    status = mPublisher->receiveFinishedSignal(
+            [&finishedSeq, &handled, &consumeTime](uint32_t inSeq, bool inHandled,
+                                                   nsecs_t inConsumeTime) -> void {
+                finishedSeq = inSeq;
+                handled = inHandled;
+                consumeTime = inConsumeTime;
+            });
+    ASSERT_EQ(OK, status) << "publisher receiveFinishedSignal should return OK";
+    ASSERT_EQ(seq, finishedSeq)
+            << "publisher receiveFinishedSignal should have returned the original sequence number";
+    ASSERT_TRUE(handled)
+            << "publisher receiveFinishedSignal should have set handled to consumer's reply";
+    ASSERT_GE(consumeTime, publishTime)
+            << "finished signal's consume time should be greater than publish time";
+}
+
 TEST_F(InputPublisherAndConsumerTest, PublishKeyEvent_EndToEnd) {
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeKeyEvent());
 }
@@ -403,6 +455,10 @@
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeCaptureEvent());
 }
 
+TEST_F(InputPublisherAndConsumerTest, PublishDragEvent_EndToEnd) {
+    ASSERT_NO_FATAL_FAILURE(PublishAndConsumeDragEvent());
+}
+
 TEST_F(InputPublisherAndConsumerTest, PublishMotionEvent_WhenSequenceNumberIsZero_ReturnsError) {
     status_t status;
     const size_t pointerCount = 1;
@@ -468,6 +524,7 @@
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeMotionEvent());
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeKeyEvent());
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeCaptureEvent());
+    ASSERT_NO_FATAL_FAILURE(PublishAndConsumeDragEvent());
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeMotionEvent());
     ASSERT_NO_FATAL_FAILURE(PublishAndConsumeKeyEvent());
 }
diff --git a/libs/input/tests/StructLayout_test.cpp b/libs/input/tests/StructLayout_test.cpp
index 8f43608..3d80b38 100644
--- a/libs/input/tests/StructLayout_test.cpp
+++ b/libs/input/tests/StructLayout_test.cpp
@@ -87,6 +87,12 @@
   CHECK_OFFSET(InputMessage::Body::Capture, pointerCaptureEnabled, 4);
   CHECK_OFFSET(InputMessage::Body::Capture, empty, 5);
 
+  CHECK_OFFSET(InputMessage::Body::Drag, eventId, 0);
+  CHECK_OFFSET(InputMessage::Body::Drag, x, 4);
+  CHECK_OFFSET(InputMessage::Body::Drag, y, 8);
+  CHECK_OFFSET(InputMessage::Body::Drag, isExiting, 12);
+  CHECK_OFFSET(InputMessage::Body::Drag, empty, 13);
+
   CHECK_OFFSET(InputMessage::Body::Finished, handled, 0);
   CHECK_OFFSET(InputMessage::Body::Finished, empty, 1);
   CHECK_OFFSET(InputMessage::Body::Finished, consumeTime, 8);
@@ -110,6 +116,7 @@
     static_assert(sizeof(InputMessage::Body::Finished) == 16);
     static_assert(sizeof(InputMessage::Body::Focus) == 8);
     static_assert(sizeof(InputMessage::Body::Capture) == 8);
+    static_assert(sizeof(InputMessage::Body::Drag) == 16);
 }
 
 // --- VerifiedInputEvent ---
diff --git a/libs/renderengine/RenderEngine.cpp b/libs/renderengine/RenderEngine.cpp
index 79839c1..0c5a851 100644
--- a/libs/renderengine/RenderEngine.cpp
+++ b/libs/renderengine/RenderEngine.cpp
@@ -85,5 +85,15 @@
 
 RenderEngine::~RenderEngine() = default;
 
+void RenderEngine::validateInputBufferUsage(const sp<GraphicBuffer>& buffer) {
+    LOG_ALWAYS_FATAL_IF(!(buffer->getUsage() & GraphicBuffer::USAGE_HW_TEXTURE),
+                        "input buffer not gpu readable");
+}
+
+void RenderEngine::validateOutputBufferUsage(const sp<GraphicBuffer>& buffer) {
+    LOG_ALWAYS_FATAL_IF(!(buffer->getUsage() & GraphicBuffer::USAGE_HW_RENDER),
+                        "output buffer not gpu writeable");
+}
+
 } // namespace renderengine
 } // namespace android
diff --git a/libs/renderengine/gl/GLESRenderEngine.cpp b/libs/renderengine/gl/GLESRenderEngine.cpp
index 397f038..2b09c15 100644
--- a/libs/renderengine/gl/GLESRenderEngine.cpp
+++ b/libs/renderengine/gl/GLESRenderEngine.cpp
@@ -1125,6 +1125,8 @@
         return BAD_VALUE;
     }
 
+    validateOutputBufferUsage(buffer);
+
     std::unique_ptr<BindNativeBufferAsFramebuffer> fbo;
     // Gathering layers that requested blur, we'll need them to decide when to render to an
     // offscreen buffer, and when to render to the native buffer.
@@ -1249,6 +1251,7 @@
             isOpaque = layer->source.buffer.isOpaque;
 
             sp<GraphicBuffer> gBuf = layer->source.buffer.buffer;
+            validateInputBufferUsage(gBuf);
             bindExternalTextureBuffer(layer->source.buffer.textureName, gBuf,
                                       layer->source.buffer.fence);
 
diff --git a/libs/renderengine/include/renderengine/RenderEngine.h b/libs/renderengine/include/renderengine/RenderEngine.h
index 572d348..ddae34a 100644
--- a/libs/renderengine/include/renderengine/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/RenderEngine.h
@@ -201,6 +201,9 @@
     // we should not allow in general, so remove this.
     RenderEngineType getRenderEngineType() const { return mRenderEngineType; }
 
+    static void validateInputBufferUsage(const sp<GraphicBuffer>&);
+    static void validateOutputBufferUsage(const sp<GraphicBuffer>&);
+
 protected:
     friend class threaded::RenderEngineThreaded;
     const RenderEngineType mRenderEngineType;
diff --git a/libs/renderengine/skia/SkiaGLRenderEngine.cpp b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
index cbb02a3..91b163e 100644
--- a/libs/renderengine/skia/SkiaGLRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
@@ -598,6 +598,8 @@
         return BAD_VALUE;
     }
 
+    validateOutputBufferUsage(buffer);
+
     auto grContext = mInProtectedContext ? mProtectedGrContext : mGrContext;
     auto& cache = mInProtectedContext ? mProtectedTextureCache : mTextureCache;
     AHardwareBuffer_Desc bufferDesc;
@@ -815,6 +817,7 @@
         SkPaint paint;
         if (layer->source.buffer.buffer) {
             ATRACE_NAME("DrawImage");
+            validateInputBufferUsage(layer->source.buffer.buffer);
             const auto& item = layer->source.buffer;
             std::shared_ptr<AutoBackendTexture::LocalRef> imageTextureRef = nullptr;
             auto iter = mTextureCache.find(item.buffer->getId());
diff --git a/libs/renderengine/skia/filters/BlurFilter.cpp b/libs/renderengine/skia/filters/BlurFilter.cpp
index 5960e48..ec710d9 100644
--- a/libs/renderengine/skia/filters/BlurFilter.cpp
+++ b/libs/renderengine/skia/filters/BlurFilter.cpp
@@ -36,13 +36,18 @@
     SkString blurString(R"(
         in shader input;
         uniform float2 in_blurOffset;
+        uniform float2 in_maxSizeXY;
 
         half4 main(float2 xy) {
             half4 c = sample(input, xy);
-            c += sample(input, xy + float2( in_blurOffset.x,  in_blurOffset.y));
-            c += sample(input, xy + float2( in_blurOffset.x, -in_blurOffset.y));
-            c += sample(input, xy + float2(-in_blurOffset.x,  in_blurOffset.y));
-            c += sample(input, xy + float2(-in_blurOffset.x, -in_blurOffset.y));
+            c += sample(input, float2( clamp( in_blurOffset.x + xy.x, 0, in_maxSizeXY.x),
+                                       clamp(in_blurOffset.y + xy.y, 0, in_maxSizeXY.y)));
+            c += sample(input, float2( clamp( in_blurOffset.x + xy.x, 0, in_maxSizeXY.x),
+                                       clamp(-in_blurOffset.y + xy.y, 0, in_maxSizeXY.y)));
+            c += sample(input, float2( clamp( -in_blurOffset.x + xy.x, 0, in_maxSizeXY.x),
+                                       clamp(in_blurOffset.y + xy.y, 0, in_maxSizeXY.y)));
+            c += sample(input, float2( clamp( -in_blurOffset.x + xy.x, 0, in_maxSizeXY.x),
+                                       clamp(-in_blurOffset.y + xy.y, 0, in_maxSizeXY.y)));
 
             return half4(c.rgb * 0.2, 1.0);
         }
@@ -99,6 +104,8 @@
     blurBuilder.child("input") =
             input->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, linear, blurMatrix);
     blurBuilder.uniform("in_blurOffset") = SkV2{stepX * kInputScale, stepY * kInputScale};
+    blurBuilder.uniform("in_maxSizeXY") =
+            SkV2{blurRect.width() * kInputScale - 1, blurRect.height() * kInputScale - 1};
 
     sk_sp<SkImage> tmpBlur(blurBuilder.makeImage(context, nullptr, scaledInfo, false));
 
@@ -108,6 +115,8 @@
         blurBuilder.child("input") =
                 tmpBlur->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, linear);
         blurBuilder.uniform("in_blurOffset") = SkV2{stepX * stepScale, stepY * stepScale};
+        blurBuilder.uniform("in_maxSizeXY") =
+                SkV2{blurRect.width() * kInputScale - 1, blurRect.height() * kInputScale - 1};
         tmpBlur = blurBuilder.makeImage(context, nullptr, scaledInfo, false);
     }
 
diff --git a/services/inputflinger/dispatcher/Entry.cpp b/services/inputflinger/dispatcher/Entry.cpp
index a19b04f..5270b8a 100644
--- a/services/inputflinger/dispatcher/Entry.cpp
+++ b/services/inputflinger/dispatcher/Entry.cpp
@@ -130,6 +130,23 @@
                         pointerCaptureEnabled ? "true" : "false");
 }
 
+// --- DragEntry ---
+
+// Drag notifications always go to apps, so set the flag POLICY_FLAG_PASS_TO_USER for all entries
+DragEntry::DragEntry(int32_t id, nsecs_t eventTime, sp<IBinder> connectionToken, bool isExiting,
+                     float x, float y)
+      : EventEntry(id, Type::DRAG, eventTime, POLICY_FLAG_PASS_TO_USER),
+        connectionToken(connectionToken),
+        isExiting(isExiting),
+        x(x),
+        y(y) {}
+
+DragEntry::~DragEntry() {}
+
+std::string DragEntry::getDescription() const {
+    return StringPrintf("DragEntry(isExiting=%s, x=%f, y=%f)", isExiting ? "true" : "false", x, y);
+}
+
 // --- KeyEntry ---
 
 KeyEntry::KeyEntry(int32_t id, nsecs_t eventTime, int32_t deviceId, uint32_t source,
diff --git a/services/inputflinger/dispatcher/Entry.h b/services/inputflinger/dispatcher/Entry.h
index ed17e68..e5fb26c 100644
--- a/services/inputflinger/dispatcher/Entry.h
+++ b/services/inputflinger/dispatcher/Entry.h
@@ -38,6 +38,7 @@
         MOTION,
         SENSOR,
         POINTER_CAPTURE_CHANGED,
+        DRAG,
     };
 
     int32_t id;
@@ -111,6 +112,18 @@
     virtual ~PointerCaptureChangedEntry();
 };
 
+struct DragEntry : EventEntry {
+    sp<IBinder> connectionToken;
+    bool isExiting;
+    float x, y;
+
+    DragEntry(int32_t id, nsecs_t eventTime, sp<IBinder> connectionToken, bool isExiting, float x,
+              float y);
+    std::string getDescription() const override;
+
+    ~DragEntry() override;
+};
+
 struct KeyEntry : EventEntry {
     int32_t deviceId;
     uint32_t source;
diff --git a/services/inputflinger/dispatcher/InputDispatcher.cpp b/services/inputflinger/dispatcher/InputDispatcher.cpp
index 19f8694..3e80bd7 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.cpp
+++ b/services/inputflinger/dispatcher/InputDispatcher.cpp
@@ -332,10 +332,11 @@
                                                           int32_t inputTargetFlags) {
     if (eventEntry->type == EventEntry::Type::MOTION) {
         const MotionEntry& motionEntry = static_cast<const MotionEntry&>(*eventEntry);
-        if (motionEntry.source & AINPUT_SOURCE_CLASS_JOYSTICK) {
+        if ((motionEntry.source & AINPUT_SOURCE_CLASS_POINTER) == 0) {
             const ui::Transform identityTransform;
-            // Use identity transform for joystick events events because they don't depend on
-            // the window info
+            // Use identity transform for events that are not pointer events because their axes
+            // values do not represent on-screen coordinates, so they should not have any window
+            // transformations applied to them.
             return std::make_unique<DispatchEntry>(eventEntry, inputTargetFlags, identityTransform,
                                                    1.0f /*globalScaleFactor*/);
         }
@@ -751,6 +752,14 @@
             break;
         }
 
+        case EventEntry::Type::DRAG: {
+            std::shared_ptr<DragEntry> typedEntry =
+                    std::static_pointer_cast<DragEntry>(mPendingEvent);
+            dispatchDragLocked(currentTime, typedEntry);
+            done = true;
+            break;
+        }
+
         case EventEntry::Type::KEY: {
             std::shared_ptr<KeyEntry> keyEntry = std::static_pointer_cast<KeyEntry>(mPendingEvent);
             if (isAppSwitchDue) {
@@ -919,7 +928,8 @@
         case EventEntry::Type::CONFIGURATION_CHANGED:
         case EventEntry::Type::DEVICE_RESET:
         case EventEntry::Type::SENSOR:
-        case EventEntry::Type::POINTER_CAPTURE_CHANGED: {
+        case EventEntry::Type::POINTER_CAPTURE_CHANGED:
+        case EventEntry::Type::DRAG: {
             // nothing to do
             break;
         }
@@ -941,7 +951,8 @@
 sp<InputWindowHandle> InputDispatcher::findTouchedWindowAtLocked(int32_t displayId, int32_t x,
                                                                  int32_t y, TouchState* touchState,
                                                                  bool addOutsideTargets,
-                                                                 bool addPortalWindows) {
+                                                                 bool addPortalWindows,
+                                                                 bool ignoreDragWindow) {
     if ((addPortalWindows || addOutsideTargets) && touchState == nullptr) {
         LOG_ALWAYS_FATAL(
                 "Must provide a valid touch state if adding portal windows or outside targets");
@@ -949,6 +960,9 @@
     // Traverse windows from front to back to find touched window.
     const std::vector<sp<InputWindowHandle>>& windowHandles = getWindowHandlesLocked(displayId);
     for (const sp<InputWindowHandle>& windowHandle : windowHandles) {
+        if (ignoreDragWindow && haveSameToken(windowHandle, touchState->dragWindow)) {
+            continue;
+        }
         const InputWindowInfo* windowInfo = windowHandle->getInfo();
         if (windowInfo->displayId == displayId) {
             auto flags = windowInfo->flags;
@@ -1058,7 +1072,8 @@
         case EventEntry::Type::SENSOR: {
             break;
         }
-        case EventEntry::Type::POINTER_CAPTURE_CHANGED: {
+        case EventEntry::Type::POINTER_CAPTURE_CHANGED:
+        case EventEntry::Type::DRAG: {
             break;
         }
         case EventEntry::Type::FOCUS:
@@ -1552,6 +1567,35 @@
     return true;
 }
 
+void InputDispatcher::enqueueDragEventLocked(const sp<InputWindowHandle>& windowHandle,
+                                             bool isExiting, const MotionEntry& motionEntry) {
+    // If the window needs enqueue a drag event, the pointerCount should be 1 and the action should
+    // be AMOTION_EVENT_ACTION_MOVE, that could guarantee the first pointer is always valid.
+    LOG_ALWAYS_FATAL_IF(motionEntry.pointerCount != 1);
+    PointerCoords pointerCoords;
+    pointerCoords.copyFrom(motionEntry.pointerCoords[0]);
+    pointerCoords.transform(windowHandle->getInfo()->transform);
+
+    std::unique_ptr<DragEntry> dragEntry =
+            std::make_unique<DragEntry>(mIdGenerator.nextId(), motionEntry.eventTime,
+                                        windowHandle->getToken(), isExiting, pointerCoords.getX(),
+                                        pointerCoords.getY());
+
+    enqueueInboundEventLocked(std::move(dragEntry));
+}
+
+void InputDispatcher::dispatchDragLocked(nsecs_t currentTime, std::shared_ptr<DragEntry> entry) {
+    std::shared_ptr<InputChannel> channel = getInputChannelLocked(entry->connectionToken);
+    if (channel == nullptr) {
+        return; // Window has gone away
+    }
+    InputTarget target;
+    target.inputChannel = channel;
+    target.flags = InputTarget::FLAG_DISPATCH_AS_IS;
+    entry->dispatchInProgress = true;
+    dispatchEventLocked(currentTime, entry, {target});
+}
+
 void InputDispatcher::logOutboundMotionDetails(const char* prefix, const MotionEntry& entry) {
 #if DEBUG_OUTBOUND_EVENT_DETAILS
     ALOGD("%seventTime=%" PRId64 ", deviceId=%d, source=0x%x, displayId=%" PRId32
@@ -1658,7 +1702,8 @@
         case EventEntry::Type::FOCUS:
         case EventEntry::Type::CONFIGURATION_CHANGED:
         case EventEntry::Type::DEVICE_RESET:
-        case EventEntry::Type::SENSOR: {
+        case EventEntry::Type::SENSOR:
+        case EventEntry::Type::DRAG: {
             ALOGE("%s events do not have a target display", NamedEnum::string(entry.type).c_str());
             return ADISPLAY_ID_NONE;
         }
@@ -2015,6 +2060,8 @@
             goto Failed;
         }
 
+        addDragEventLocked(entry, tempTouchState);
+
         // Check whether touches should slip outside of the current foreground window.
         if (maskedAction == AMOTION_EVENT_ACTION_MOVE && entry.pointerCount == 1 &&
             tempTouchState.isSlippery()) {
@@ -2270,6 +2317,38 @@
     return injectionResult;
 }
 
+void InputDispatcher::addDragEventLocked(const MotionEntry& entry, TouchState& state) {
+    if (entry.pointerCount != 1 || !state.dragWindow) {
+        return;
+    }
+
+    int32_t maskedAction = entry.action & AMOTION_EVENT_ACTION_MASK;
+    int32_t x = static_cast<int32_t>(entry.pointerCoords[0].getAxisValue(AMOTION_EVENT_AXIS_X));
+    int32_t y = static_cast<int32_t>(entry.pointerCoords[0].getAxisValue(AMOTION_EVENT_AXIS_Y));
+    if (maskedAction == AMOTION_EVENT_ACTION_MOVE) {
+        const sp<InputWindowHandle> hoverWindowHandle =
+                findTouchedWindowAtLocked(entry.displayId, x, y, &state,
+                                          false /*addOutsideTargets*/, false /*addPortalWindows*/,
+                                          true /*ignoreDragWindow*/);
+        // enqueue drag exit if needed.
+        if (hoverWindowHandle != state.dragHoverWindowHandle &&
+            !haveSameToken(hoverWindowHandle, state.dragHoverWindowHandle)) {
+            if (state.dragHoverWindowHandle != nullptr) {
+                enqueueDragEventLocked(state.dragHoverWindowHandle, true /*isExiting*/, entry);
+            }
+            state.dragHoverWindowHandle = hoverWindowHandle;
+        }
+        // enqueue drag location if needed.
+        if (hoverWindowHandle != nullptr) {
+            enqueueDragEventLocked(hoverWindowHandle, false /*isExiting*/, entry);
+        }
+    } else if (maskedAction == AMOTION_EVENT_ACTION_UP ||
+               maskedAction == AMOTION_EVENT_ACTION_CANCEL) {
+        state.dragWindow = nullptr;
+        state.dragHoverWindowHandle = nullptr;
+    }
+}
+
 void InputDispatcher::addWindowTargetLocked(const sp<InputWindowHandle>& windowHandle,
                                             int32_t targetFlags, BitSet32 pointerIds,
                                             std::vector<InputTarget>& inputTargets) {
@@ -2541,7 +2620,8 @@
 
 void InputDispatcher::pokeUserActivityLocked(const EventEntry& eventEntry) {
     if (eventEntry.type == EventEntry::Type::FOCUS ||
-        eventEntry.type == EventEntry::Type::POINTER_CAPTURE_CHANGED) {
+        eventEntry.type == EventEntry::Type::POINTER_CAPTURE_CHANGED ||
+        eventEntry.type == EventEntry::Type::DRAG) {
         // Focus or pointer capture changed events are passed to apps, but do not represent user
         // activity.
         return;
@@ -2583,7 +2663,8 @@
         case EventEntry::Type::CONFIGURATION_CHANGED:
         case EventEntry::Type::DEVICE_RESET:
         case EventEntry::Type::SENSOR:
-        case EventEntry::Type::POINTER_CAPTURE_CHANGED: {
+        case EventEntry::Type::POINTER_CAPTURE_CHANGED:
+        case EventEntry::Type::DRAG: {
             LOG_ALWAYS_FATAL("%s events are not user activity",
                              NamedEnum::string(eventEntry.type).c_str());
             break;
@@ -2797,7 +2878,8 @@
             break;
         }
         case EventEntry::Type::FOCUS:
-        case EventEntry::Type::POINTER_CAPTURE_CHANGED: {
+        case EventEntry::Type::POINTER_CAPTURE_CHANGED:
+        case EventEntry::Type::DRAG: {
             break;
         }
         case EventEntry::Type::SENSOR: {
@@ -3020,6 +3102,15 @@
                 break;
             }
 
+            case EventEntry::Type::DRAG: {
+                const DragEntry& dragEntry = static_cast<const DragEntry&>(eventEntry);
+                status = connection->inputPublisher.publishDragEvent(dispatchEntry->seq,
+                                                                     dragEntry.id, dragEntry.x,
+                                                                     dragEntry.y,
+                                                                     dragEntry.isExiting);
+                break;
+            }
+
             case EventEntry::Type::CONFIGURATION_CHANGED:
             case EventEntry::Type::DEVICE_RESET:
             case EventEntry::Type::SENSOR: {
@@ -3317,7 +3408,8 @@
                 break;
             }
             case EventEntry::Type::FOCUS:
-            case EventEntry::Type::POINTER_CAPTURE_CHANGED: {
+            case EventEntry::Type::POINTER_CAPTURE_CHANGED:
+            case EventEntry::Type::DRAG: {
                 LOG_ALWAYS_FATAL("Canceling %s events is not supported",
                                  NamedEnum::string(cancelationEventEntry->type).c_str());
                 break;
@@ -3382,7 +3474,8 @@
             case EventEntry::Type::CONFIGURATION_CHANGED:
             case EventEntry::Type::DEVICE_RESET:
             case EventEntry::Type::POINTER_CAPTURE_CHANGED:
-            case EventEntry::Type::SENSOR: {
+            case EventEntry::Type::SENSOR:
+            case EventEntry::Type::DRAG: {
                 LOG_ALWAYS_FATAL("%s event should not be found inside Connections's queue",
                                  NamedEnum::string(downEventEntry->type).c_str());
                 break;
@@ -4346,6 +4439,15 @@
                 ++i;
             }
         }
+
+        // If drag window is gone, it would receive a cancel event and broadcast the DRAG_END. we
+        // could just clear the state here.
+        if (state.dragWindow &&
+            std::find(windowHandles.begin(), windowHandles.end(), state.dragWindow) ==
+                    windowHandles.end()) {
+            state.dragWindow = nullptr;
+            state.dragHoverWindowHandle = nullptr;
+        }
     }
 
     // Release information for windows that are no longer present.
@@ -4536,7 +4638,8 @@
     mBlockUntrustedTouchesMode = mode;
 }
 
-bool InputDispatcher::transferTouchFocus(const sp<IBinder>& fromToken, const sp<IBinder>& toToken) {
+bool InputDispatcher::transferTouchFocus(const sp<IBinder>& fromToken, const sp<IBinder>& toToken,
+                                         bool isDragDrop) {
     if (fromToken == toToken) {
         if (DEBUG_FOCUS) {
             ALOGD("Trivial transfer to same window.");
@@ -4580,6 +4683,11 @@
                              InputTarget::FLAG_DISPATCH_AS_IS);
                     state.addOrUpdateWindow(toWindowHandle, newTargetFlags, pointerIds);
 
+                    // Store the dragging window.
+                    if (isDragDrop) {
+                        state.dragWindow = toWindowHandle;
+                    }
+
                     found = true;
                     goto Found;
                 }
diff --git a/services/inputflinger/dispatcher/InputDispatcher.h b/services/inputflinger/dispatcher/InputDispatcher.h
index 83094c2..b2f3625 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.h
+++ b/services/inputflinger/dispatcher/InputDispatcher.h
@@ -120,8 +120,8 @@
     virtual void setMaximumObscuringOpacityForTouch(float opacity) override;
     virtual void setBlockUntrustedTouchesMode(android::os::BlockUntrustedTouchesMode mode) override;
 
-    virtual bool transferTouchFocus(const sp<IBinder>& fromToken,
-                                    const sp<IBinder>& toToken) override;
+    virtual bool transferTouchFocus(const sp<IBinder>& fromToken, const sp<IBinder>& toToken,
+                                    bool isDragDrop = false) override;
 
     virtual base::Result<std::unique_ptr<InputChannel>> createInputChannel(
             const std::string& name) override;
@@ -185,6 +185,9 @@
     // Enqueues a focus event.
     void enqueueFocusEventLocked(const sp<IBinder>& windowToken, bool hasFocus,
                                  const std::string& reason) REQUIRES(mLock);
+    // Enqueues a drag event.
+    void enqueueDragEventLocked(const sp<InputWindowHandle>& windowToken, bool isExiting,
+                                const MotionEntry& motionEntry) REQUIRES(mLock);
 
     // Adds an event to a queue of recent events for debugging purposes.
     void addRecentEventLocked(std::shared_ptr<EventEntry> entry) REQUIRES(mLock);
@@ -204,7 +207,8 @@
     sp<InputWindowHandle> findTouchedWindowAtLocked(int32_t displayId, int32_t x, int32_t y,
                                                     TouchState* touchState,
                                                     bool addOutsideTargets = false,
-                                                    bool addPortalWindows = false) REQUIRES(mLock);
+                                                    bool addPortalWindows = false,
+                                                    bool ignoreDragWindow = false) REQUIRES(mLock);
 
     // All registered connections mapped by channel file descriptor.
     std::unordered_map<int, sp<Connection>> mConnectionsByFd GUARDED_BY(mLock);
@@ -387,6 +391,7 @@
                              const std::vector<InputTarget>& inputTargets) REQUIRES(mLock);
     void dispatchSensorLocked(nsecs_t currentTime, std::shared_ptr<SensorEntry> entry,
                               DropReason* dropReason, nsecs_t* nextWakeupTime) REQUIRES(mLock);
+    void dispatchDragLocked(nsecs_t currentTime, std::shared_ptr<DragEntry> entry) REQUIRES(mLock);
     void logOutboundKeyDetails(const char* prefix, const KeyEntry& entry);
     void logOutboundMotionDetails(const char* prefix, const MotionEntry& entry);
 
@@ -489,10 +494,12 @@
                                    std::vector<InputTarget>& inputTargets) REQUIRES(mLock);
     void addGlobalMonitoringTargetsLocked(std::vector<InputTarget>& inputTargets, int32_t displayId,
                                           float xOffset = 0, float yOffset = 0) REQUIRES(mLock);
-
     void pokeUserActivityLocked(const EventEntry& eventEntry) REQUIRES(mLock);
     bool checkInjectionPermission(const sp<InputWindowHandle>& windowHandle,
                                   const InjectionState* injectionState);
+    // Enqueue a drag event if needed, and update the touch state.
+    // Uses findTouchedWindowTargetsLocked to make the decision
+    void addDragEventLocked(const MotionEntry& entry, TouchState& state) REQUIRES(mLock);
 
     struct TouchOcclusionInfo {
         bool hasBlockingOcclusion;
diff --git a/services/inputflinger/dispatcher/TouchState.cpp b/services/inputflinger/dispatcher/TouchState.cpp
index 81b3cf0..4165f49 100644
--- a/services/inputflinger/dispatcher/TouchState.cpp
+++ b/services/inputflinger/dispatcher/TouchState.cpp
@@ -49,6 +49,8 @@
     windows = other.windows;
     portalWindows = other.portalWindows;
     gestureMonitors = other.gestureMonitors;
+    dragHoverWindowHandle = other.dragHoverWindowHandle;
+    dragWindow = other.dragWindow;
 }
 
 void TouchState::addOrUpdateWindow(const sp<InputWindowHandle>& windowHandle, int32_t targetFlags,
diff --git a/services/inputflinger/dispatcher/TouchState.h b/services/inputflinger/dispatcher/TouchState.h
index 623c6a8..d7a561c 100644
--- a/services/inputflinger/dispatcher/TouchState.h
+++ b/services/inputflinger/dispatcher/TouchState.h
@@ -41,6 +41,11 @@
 
     std::vector<TouchedMonitor> gestureMonitors;
 
+    // The last drag hover window which could receive the drag event.
+    sp<InputWindowHandle> dragHoverWindowHandle;
+    // The window being dragged.
+    sp<InputWindowHandle> dragWindow;
+
     TouchState();
     ~TouchState();
     void reset();
diff --git a/services/inputflinger/dispatcher/include/InputDispatcherInterface.h b/services/inputflinger/dispatcher/include/InputDispatcherInterface.h
index 3491893..b601dfc 100644
--- a/services/inputflinger/dispatcher/include/InputDispatcherInterface.h
+++ b/services/inputflinger/dispatcher/include/InputDispatcherInterface.h
@@ -149,8 +149,8 @@
      *
      * Returns true on success.  False if the window did not actually have touch focus.
      */
-    virtual bool transferTouchFocus(const sp<IBinder>& fromToken, const sp<IBinder>& toToken) = 0;
-
+    virtual bool transferTouchFocus(const sp<IBinder>& fromToken, const sp<IBinder>& toToken,
+                                    bool isDragDrop) = 0;
     /**
      * Sets focus on the specified window.
      */
diff --git a/services/inputflinger/reader/mapper/CursorInputMapper.cpp b/services/inputflinger/reader/mapper/CursorInputMapper.cpp
index bb12be7..d6bd823 100644
--- a/services/inputflinger/reader/mapper/CursorInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/CursorInputMapper.cpp
@@ -188,11 +188,29 @@
 
     if (!changes || (changes & InputReaderConfiguration::CHANGE_DISPLAY_INFO)) {
         mOrientation = DISPLAY_ORIENTATION_0;
-        if (mParameters.orientationAware && mParameters.hasAssociatedDisplay) {
-            std::optional<DisplayViewport> internalViewport =
-                    config->getDisplayViewportByType(ViewportType::INTERNAL);
-            if (internalViewport) {
-                mOrientation = internalViewport->orientation;
+        const bool isOrientedDevice =
+                (mParameters.orientationAware && mParameters.hasAssociatedDisplay);
+
+        if (isPerWindowInputRotationEnabled()) {
+            // When per-window input rotation is enabled, InputReader works in the un-rotated
+            // coordinate space, so we don't need to do anything if the device is already
+            // orientation-aware. If the device is not orientation-aware, then we need to apply the
+            // inverse rotation of the display so that when the display rotation is applied later
+            // as a part of the per-window transform, we get the expected screen coordinates.
+            if (!isOrientedDevice) {
+                std::optional<DisplayViewport> internalViewport =
+                        config->getDisplayViewportByType(ViewportType::INTERNAL);
+                if (internalViewport) {
+                    mOrientation = getInverseRotation(internalViewport->orientation);
+                }
+            }
+        } else {
+            if (isOrientedDevice) {
+                std::optional<DisplayViewport> internalViewport =
+                        config->getDisplayViewportByType(ViewportType::INTERNAL);
+                if (internalViewport) {
+                    mOrientation = internalViewport->orientation;
+                }
             }
         }
 
@@ -294,11 +312,8 @@
     float deltaY = mCursorMotionAccumulator.getRelativeY() * mYScale;
     bool moved = deltaX != 0 || deltaY != 0;
 
-    // Rotate delta according to orientation if needed.
-    if (mParameters.orientationAware && mParameters.hasAssociatedDisplay &&
-        (deltaX != 0.0f || deltaY != 0.0f)) {
-        rotateDelta(mOrientation, &deltaX, &deltaY);
-    }
+    // Rotate delta according to orientation.
+    rotateDelta(mOrientation, &deltaX, &deltaY);
 
     // Move the pointer.
     PointerProperties pointerProperties;
@@ -326,7 +341,15 @@
             mPointerController->setPresentation(PointerControllerInterface::Presentation::POINTER);
 
             if (moved) {
-                mPointerController->move(deltaX, deltaY);
+                float dx = deltaX;
+                float dy = deltaY;
+                if (isPerWindowInputRotationEnabled()) {
+                    // Rotate the delta from InputReader's un-rotated coordinate space to
+                    // PointerController's rotated coordinate space that is oriented with the
+                    // viewport.
+                    rotateDelta(getInverseRotation(mOrientation), &dx, &dy);
+                }
+                mPointerController->move(dx, dy);
             }
 
             if (buttonsChanged) {
diff --git a/services/inputflinger/reader/mapper/TouchCursorInputMapperCommon.h b/services/inputflinger/reader/mapper/TouchCursorInputMapperCommon.h
index 5344227..1843b03 100644
--- a/services/inputflinger/reader/mapper/TouchCursorInputMapperCommon.h
+++ b/services/inputflinger/reader/mapper/TouchCursorInputMapperCommon.h
@@ -17,6 +17,7 @@
 #ifndef _UI_INPUTREADER_TOUCH_CURSOR_INPUT_MAPPER_COMMON_H
 #define _UI_INPUTREADER_TOUCH_CURSOR_INPUT_MAPPER_COMMON_H
 
+#include <android-base/properties.h>
 #include <input/DisplayViewport.h>
 #include <stdint.h>
 
@@ -28,6 +29,26 @@
 
 // --- Static Definitions ---
 
+// When per-window input rotation is enabled, display transformations such as rotation and
+// projection are part of the input window's transform. This means InputReader should work in the
+// un-rotated coordinate space.
+static bool isPerWindowInputRotationEnabled() {
+    static const bool PER_WINDOW_INPUT_ROTATION =
+            base::GetBoolProperty("persist.debug.per_window_input_rotation", false);
+    return PER_WINDOW_INPUT_ROTATION;
+}
+
+static int32_t getInverseRotation(int32_t orientation) {
+    switch (orientation) {
+        case DISPLAY_ORIENTATION_90:
+            return DISPLAY_ORIENTATION_270;
+        case DISPLAY_ORIENTATION_270:
+            return DISPLAY_ORIENTATION_90;
+        default:
+            return orientation;
+    }
+}
+
 static void rotateDelta(int32_t orientation, float* deltaX, float* deltaY) {
     float temp;
     switch (orientation) {
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.cpp b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
index 1a7ddee..13712ee 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
@@ -28,6 +28,30 @@
 
 namespace android {
 
+namespace {
+
+// Rotates the given point (x, y) by the supplied orientation. The width and height are the
+// dimensions of the surface prior to this rotation being applied.
+void rotatePoint(int32_t orientation, float& x, float& y, int32_t width, int32_t height) {
+    rotateDelta(orientation, &x, &y);
+    switch (orientation) {
+        case DISPLAY_ORIENTATION_90:
+            y += width;
+            break;
+        case DISPLAY_ORIENTATION_180:
+            x += width;
+            y += height;
+            break;
+        case DISPLAY_ORIENTATION_270:
+            x += height;
+            break;
+        default:
+            break;
+    }
+}
+
+} // namespace
+
 // --- Constants ---
 
 // Maximum amount of latency to add to touch events while waiting for data from an
@@ -729,8 +753,20 @@
             mSurfaceRight = mSurfaceLeft + naturalLogicalWidth;
             mSurfaceBottom = mSurfaceTop + naturalLogicalHeight;
 
-            mSurfaceOrientation =
-                    mParameters.orientationAware ? mViewport.orientation : DISPLAY_ORIENTATION_0;
+            if (isPerWindowInputRotationEnabled()) {
+                // When per-window input rotation is enabled, InputReader works in the un-rotated
+                // coordinate space, so we don't need to do anything if the device is already
+                // orientation-aware. If the device is not orientation-aware, then we need to apply
+                // the inverse rotation of the display so that when the display rotation is applied
+                // later as a part of the per-window transform, we get the expected screen
+                // coordinates.
+                mSurfaceOrientation = mParameters.orientationAware
+                        ? DISPLAY_ORIENTATION_0
+                        : getInverseRotation(mViewport.orientation);
+            } else {
+                mSurfaceOrientation = mParameters.orientationAware ? mViewport.orientation
+                                                                   : DISPLAY_ORIENTATION_0;
+            }
         } else {
             mPhysicalWidth = rawWidth;
             mPhysicalHeight = rawHeight;
@@ -1637,10 +1673,9 @@
     mPointerController->fade(PointerControllerInterface::Transition::GRADUAL);
 
     mPointerController->setButtonState(mCurrentRawState.buttonState);
-    mPointerController->setSpots(mCurrentCookedState.cookedPointerData.pointerCoords,
-                                 mCurrentCookedState.cookedPointerData.idToIndex,
-                                 mCurrentCookedState.cookedPointerData.touchingIdBits,
-                                 mViewport.displayId);
+    setTouchSpots(mCurrentCookedState.cookedPointerData.pointerCoords,
+                  mCurrentCookedState.cookedPointerData.idToIndex,
+                  mCurrentCookedState.cookedPointerData.touchingIdBits, mViewport.displayId);
 }
 
 bool TouchInputMapper::isTouchScreen() {
@@ -2378,10 +2413,9 @@
         }
 
         if (mPointerGesture.currentGestureMode == PointerGesture::Mode::FREEFORM) {
-            mPointerController->setSpots(mPointerGesture.currentGestureCoords,
-                                         mPointerGesture.currentGestureIdToIndex,
-                                         mPointerGesture.currentGestureIdBits,
-                                         mPointerController->getDisplayId());
+            setTouchSpots(mPointerGesture.currentGestureCoords,
+                          mPointerGesture.currentGestureIdToIndex,
+                          mPointerGesture.currentGestureIdBits, mPointerController->getDisplayId());
         }
     } else {
         mPointerController->setPresentation(PointerControllerInterface::Presentation::POINTER);
@@ -2525,8 +2559,7 @@
         // the pointer is hovering again even if the user is not currently touching
         // the touch pad.  This ensures that a view will receive a fresh hover enter
         // event after a tap.
-        float x, y;
-        mPointerController->getPosition(&x, &y);
+        auto [x, y] = getMouseCursorPosition();
 
         PointerProperties pointerProperties;
         pointerProperties.clear();
@@ -2783,13 +2816,12 @@
             // Move the pointer using a relative motion.
             // When using spots, the click will occur at the position of the anchor
             // spot and all other spots will move there.
-            mPointerController->move(deltaX, deltaY);
+            moveMouseCursor(deltaX, deltaY);
         } else {
             mPointerVelocityControl.reset();
         }
 
-        float x, y;
-        mPointerController->getPosition(&x, &y);
+        auto [x, y] = getMouseCursorPosition();
 
         mPointerGesture.currentGestureMode = PointerGesture::Mode::BUTTON_CLICK_OR_DRAG;
         mPointerGesture.currentGestureIdBits.clear();
@@ -2815,8 +2847,7 @@
              mPointerGesture.lastGestureMode == PointerGesture::Mode::TAP_DRAG) &&
             lastFingerCount == 1) {
             if (when <= mPointerGesture.tapDownTime + mConfig.pointerGestureTapInterval) {
-                float x, y;
-                mPointerController->getPosition(&x, &y);
+                auto [x, y] = getMouseCursorPosition();
                 if (fabs(x - mPointerGesture.tapX) <= mConfig.pointerGestureTapSlop &&
                     fabs(y - mPointerGesture.tapY) <= mConfig.pointerGestureTapSlop) {
 #if DEBUG_GESTURES
@@ -2884,8 +2915,7 @@
         mPointerGesture.currentGestureMode = PointerGesture::Mode::HOVER;
         if (mPointerGesture.lastGestureMode == PointerGesture::Mode::TAP) {
             if (when <= mPointerGesture.tapUpTime + mConfig.pointerGestureTapDragInterval) {
-                float x, y;
-                mPointerController->getPosition(&x, &y);
+                auto [x, y] = getMouseCursorPosition();
                 if (fabs(x - mPointerGesture.tapX) <= mConfig.pointerGestureTapSlop &&
                     fabs(y - mPointerGesture.tapY) <= mConfig.pointerGestureTapSlop) {
                     mPointerGesture.currentGestureMode = PointerGesture::Mode::TAP_DRAG;
@@ -2919,7 +2949,7 @@
 
             // Move the pointer using a relative motion.
             // When using spots, the hover or drag will occur at the position of the anchor spot.
-            mPointerController->move(deltaX, deltaY);
+            moveMouseCursor(deltaX, deltaY);
         } else {
             mPointerVelocityControl.reset();
         }
@@ -2941,8 +2971,7 @@
             down = false;
         }
 
-        float x, y;
-        mPointerController->getPosition(&x, &y);
+        auto [x, y] = getMouseCursorPosition();
 
         mPointerGesture.currentGestureIdBits.clear();
         mPointerGesture.currentGestureIdBits.markBit(mPointerGesture.activeGestureId);
@@ -3015,8 +3044,9 @@
             mCurrentRawState.rawPointerData
                     .getCentroidOfTouchingPointers(&mPointerGesture.referenceTouchX,
                                                    &mPointerGesture.referenceTouchY);
-            mPointerController->getPosition(&mPointerGesture.referenceGestureX,
-                                            &mPointerGesture.referenceGestureY);
+            auto [x, y] = getMouseCursorPosition();
+            mPointerGesture.referenceGestureX = x;
+            mPointerGesture.referenceGestureY = y;
         }
 
         // Clear the reference deltas for fingers not yet included in the reference calculation.
@@ -3354,14 +3384,13 @@
     if (!mCurrentCookedState.stylusIdBits.isEmpty()) {
         uint32_t id = mCurrentCookedState.stylusIdBits.firstMarkedBit();
         uint32_t index = mCurrentCookedState.cookedPointerData.idToIndex[id];
-        float x = mCurrentCookedState.cookedPointerData.pointerCoords[index].getX();
-        float y = mCurrentCookedState.cookedPointerData.pointerCoords[index].getY();
-        mPointerController->setPosition(x, y);
+        setMouseCursorPosition(mCurrentCookedState.cookedPointerData.pointerCoords[index].getX(),
+                               mCurrentCookedState.cookedPointerData.pointerCoords[index].getY());
 
         hovering = mCurrentCookedState.cookedPointerData.hoveringIdBits.hasBit(id);
         down = !hovering;
 
-        mPointerController->getPosition(&x, &y);
+        auto [x, y] = getMouseCursorPosition();
         mPointerSimple.currentCoords.copyFrom(
                 mCurrentCookedState.cookedPointerData.pointerCoords[index]);
         mPointerSimple.currentCoords.setAxisValue(AMOTION_EVENT_AXIS_X, x);
@@ -3402,7 +3431,7 @@
             rotateDelta(mSurfaceOrientation, &deltaX, &deltaY);
             mPointerVelocityControl.move(when, &deltaX, &deltaY);
 
-            mPointerController->move(deltaX, deltaY);
+            moveMouseCursor(deltaX, deltaY);
         } else {
             mPointerVelocityControl.reset();
         }
@@ -3410,8 +3439,7 @@
         down = isPointerDown(mCurrentRawState.buttonState);
         hovering = !down;
 
-        float x, y;
-        mPointerController->getPosition(&x, &y);
+        auto [x, y] = getMouseCursorPosition();
         mPointerSimple.currentCoords.copyFrom(
                 mCurrentCookedState.cookedPointerData.pointerCoords[currentIndex]);
         mPointerSimple.currentCoords.setAxisValue(AMOTION_EVENT_AXIS_X, x);
@@ -3451,9 +3479,7 @@
     }
     int32_t displayId = mPointerController->getDisplayId();
 
-    float xCursorPosition;
-    float yCursorPosition;
-    mPointerController->getPosition(&xCursorPosition, &yCursorPosition);
+    auto [xCursorPosition, yCursorPosition] = getMouseCursorPosition();
 
     if (mPointerSimple.down && !down) {
         mPointerSimple.down = false;
@@ -3619,7 +3645,9 @@
     float xCursorPosition = AMOTION_EVENT_INVALID_CURSOR_POSITION;
     float yCursorPosition = AMOTION_EVENT_INVALID_CURSOR_POSITION;
     if (mDeviceMode == DeviceMode::POINTER) {
-        mPointerController->getPosition(&xCursorPosition, &yCursorPosition);
+        auto [x, y] = getMouseCursorPosition();
+        xCursorPosition = x;
+        yCursorPosition = y;
     }
     const int32_t displayId = getAssociatedDisplayId().value_or(ADISPLAY_ID_NONE);
     const int32_t deviceId = getDeviceId();
@@ -3969,4 +3997,63 @@
     return std::nullopt;
 }
 
+void TouchInputMapper::moveMouseCursor(float dx, float dy) const {
+    if (isPerWindowInputRotationEnabled()) {
+        // Convert from InputReader's un-rotated coordinate space to PointerController's coordinate
+        // space that is oriented with the viewport.
+        rotateDelta(mViewport.orientation, &dx, &dy);
+    }
+
+    mPointerController->move(dx, dy);
+}
+
+std::pair<float, float> TouchInputMapper::getMouseCursorPosition() const {
+    float x = 0;
+    float y = 0;
+    mPointerController->getPosition(&x, &y);
+
+    if (!isPerWindowInputRotationEnabled()) return {x, y};
+    if (!mViewport.isValid()) return {x, y};
+
+    // Convert from PointerController's rotated coordinate space that is oriented with the viewport
+    // to InputReader's un-rotated coordinate space.
+    const int32_t orientation = getInverseRotation(mViewport.orientation);
+    rotatePoint(orientation, x, y, mViewport.deviceWidth, mViewport.deviceHeight);
+    return {x, y};
+}
+
+void TouchInputMapper::setMouseCursorPosition(float x, float y) const {
+    if (isPerWindowInputRotationEnabled() && mViewport.isValid()) {
+        // Convert from InputReader's un-rotated coordinate space to PointerController's rotated
+        // coordinate space that is oriented with the viewport.
+        rotatePoint(mViewport.orientation, x, y, mRawSurfaceWidth, mRawSurfaceHeight);
+    }
+
+    mPointerController->setPosition(x, y);
+}
+
+void TouchInputMapper::setTouchSpots(const PointerCoords* spotCoords, const uint32_t* spotIdToIndex,
+                                     BitSet32 spotIdBits, int32_t displayId) {
+    std::array<PointerCoords, MAX_POINTERS> outSpotCoords{};
+
+    for (BitSet32 idBits(spotIdBits); !idBits.isEmpty();) {
+        const uint32_t index = spotIdToIndex[idBits.clearFirstMarkedBit()];
+        float x = spotCoords[index].getX();
+        float y = spotCoords[index].getY();
+        float pressure = spotCoords[index].getAxisValue(AMOTION_EVENT_AXIS_PRESSURE);
+
+        if (isPerWindowInputRotationEnabled()) {
+            // Convert from InputReader's un-rotated coordinate space to PointerController's rotated
+            // coordinate space.
+            rotatePoint(mViewport.orientation, x, y, mRawSurfaceWidth, mRawSurfaceHeight);
+        }
+
+        outSpotCoords[index].setAxisValue(AMOTION_EVENT_AXIS_X, x);
+        outSpotCoords[index].setAxisValue(AMOTION_EVENT_AXIS_Y, y);
+        outSpotCoords[index].setAxisValue(AMOTION_EVENT_AXIS_PRESSURE, pressure);
+    }
+
+    mPointerController->setSpots(outSpotCoords.data(), spotIdToIndex, spotIdBits, displayId);
+}
+
 } // namespace android
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.h b/services/inputflinger/reader/mapper/TouchInputMapper.h
index 9b84ed5..5146299 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.h
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.h
@@ -776,6 +776,14 @@
 
     const char* modeToString(DeviceMode deviceMode);
     void rotateAndScale(float& x, float& y);
+
+    // Wrapper methods for interfacing with PointerController. These are used to convert points
+    // between the coordinate spaces used by InputReader and PointerController, if they differ.
+    void moveMouseCursor(float dx, float dy) const;
+    std::pair<float, float> getMouseCursorPosition() const;
+    void setMouseCursorPosition(float x, float y) const;
+    void setTouchSpots(const PointerCoords* spotCoords, const uint32_t* spotIdToIndex,
+                       BitSet32 spotIdBits, int32_t displayId);
 };
 
 } // namespace android
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index 51f6f5d..32f9b69 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -766,6 +766,9 @@
             case AINPUT_EVENT_TYPE_CAPTURE: {
                 FAIL() << "Use 'consumeCaptureEvent' for CAPTURE events";
             }
+            case AINPUT_EVENT_TYPE_DRAG: {
+                FAIL() << "Use 'consumeDragEvent' for DRAG events";
+            }
             default: {
                 FAIL() << mName.c_str() << ": invalid event type: " << expectedEventType;
             }
@@ -803,6 +806,23 @@
         EXPECT_EQ(hasCapture, captureEvent.getPointerCaptureEnabled());
     }
 
+    void consumeDragEvent(bool isExiting, float x, float y) {
+        const InputEvent* event = consume();
+        ASSERT_NE(nullptr, event) << mName.c_str()
+                                  << ": consumer should have returned non-NULL event.";
+        ASSERT_EQ(AINPUT_EVENT_TYPE_DRAG, event->getType())
+                << "Got " << inputEventTypeToString(event->getType())
+                << " event instead of DRAG event";
+
+        EXPECT_EQ(ADISPLAY_ID_NONE, event->getDisplayId())
+                << mName.c_str() << ": event displayId should always be NONE.";
+
+        const auto& dragEvent = static_cast<const DragEvent&>(*event);
+        EXPECT_EQ(isExiting, dragEvent.isExiting());
+        EXPECT_EQ(x, dragEvent.getX());
+        EXPECT_EQ(y, dragEvent.getY());
+    }
+
     void assertNoEvents() {
         InputEvent* event = consume();
         if (event == nullptr) {
@@ -905,7 +925,7 @@
         mInfo.frameTop = frame.top;
         mInfo.frameRight = frame.right;
         mInfo.frameBottom = frame.bottom;
-        mInfo.transform.set(frame.left, frame.top);
+        mInfo.transform.set(-frame.left, -frame.top);
         mInfo.touchableRegion.clear();
         mInfo.addTouchableRegion(frame);
     }
@@ -1003,6 +1023,10 @@
                                      expectedFlags);
     }
 
+    void consumeDragEvent(bool isExiting, float x, float y) {
+        mInputReceiver->consumeDragEvent(isExiting, x, y);
+    }
+
     std::optional<uint32_t> receiveEvent(InputEvent** outEvent = nullptr) {
         if (mInputReceiver == nullptr) {
             ADD_FAILURE() << "Invalid receive event on window with no receiver";
@@ -2183,7 +2207,7 @@
     EXPECT_EQ(motionArgs.buttonState, verifiedMotion.buttonState);
 }
 
-TEST_F(InputDispatcherTest, NonPointerMotionEvent_JoystickNotTransformed) {
+TEST_F(InputDispatcherTest, NonPointerMotionEvent_NotTransformed) {
     std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
     sp<FakeWindowHandle> window =
             new FakeWindowHandle(application, mDispatcher, "Test window", ADISPLAY_ID_DEFAULT);
@@ -2203,28 +2227,41 @@
     // Second, we consume focus event if it is right or wrong according to onFocusChangedLocked.
     window->consumeFocusEvent(true);
 
-    NotifyMotionArgs motionArgs = generateMotionArgs(AMOTION_EVENT_ACTION_MOVE,
-                                                     AINPUT_SOURCE_JOYSTICK, ADISPLAY_ID_DEFAULT);
-    mDispatcher->notifyMotion(&motionArgs);
+    constexpr const std::array nonPointerSources = {AINPUT_SOURCE_TRACKBALL,
+                                                    AINPUT_SOURCE_MOUSE_RELATIVE,
+                                                    AINPUT_SOURCE_JOYSTICK};
+    for (const int source : nonPointerSources) {
+        // Notify motion with a non-pointer source.
+        NotifyMotionArgs motionArgs =
+                generateMotionArgs(AMOTION_EVENT_ACTION_MOVE, source, ADISPLAY_ID_DEFAULT);
+        mDispatcher->notifyMotion(&motionArgs);
 
-    // Third, we consume motion event.
-    InputEvent* event = window->consume();
-    ASSERT_NE(event, nullptr);
-    ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, event->getType())
-            << name.c_str() << "expected " << inputEventTypeToString(AINPUT_EVENT_TYPE_MOTION)
-            << " event, got " << inputEventTypeToString(event->getType()) << " event";
+        InputEvent* event = window->consume();
+        ASSERT_NE(event, nullptr);
+        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, event->getType())
+                << name.c_str() << "expected " << inputEventTypeToString(AINPUT_EVENT_TYPE_MOTION)
+                << " event, got " << inputEventTypeToString(event->getType()) << " event";
 
-    const MotionEvent& motionEvent = static_cast<const MotionEvent&>(*event);
-    EXPECT_EQ(AINPUT_EVENT_TYPE_MOTION, motionEvent.getAction());
+        const MotionEvent& motionEvent = static_cast<const MotionEvent&>(*event);
+        EXPECT_EQ(AMOTION_EVENT_ACTION_MOVE, motionEvent.getAction());
+        EXPECT_EQ(motionArgs.pointerCount, motionEvent.getPointerCount());
 
-    float expectedX = motionArgs.pointerCoords[0].getX();
-    float expectedY = motionArgs.pointerCoords[0].getY();
+        float expectedX = motionArgs.pointerCoords[0].getX();
+        float expectedY = motionArgs.pointerCoords[0].getY();
 
-    // Finally we test if the axis values from the final motion event are not transformed
-    EXPECT_EQ(expectedX, motionEvent.getX(0)) << "expected " << expectedX << " for x coord of "
-                                              << name.c_str() << ", got " << motionEvent.getX(0);
-    EXPECT_EQ(expectedY, motionEvent.getY(0)) << "expected " << expectedY << " for y coord of "
-                                              << name.c_str() << ", got " << motionEvent.getY(0);
+        // Ensure the axis values from the final motion event are not transformed.
+        EXPECT_EQ(expectedX, motionEvent.getX(0))
+                << "expected " << expectedX << " for x coord of " << name.c_str() << ", got "
+                << motionEvent.getX(0);
+        EXPECT_EQ(expectedY, motionEvent.getY(0))
+                << "expected " << expectedY << " for y coord of " << name.c_str() << ", got "
+                << motionEvent.getY(0);
+        // Ensure the raw and transformed axis values for the motion event are the same.
+        EXPECT_EQ(motionEvent.getRawX(0), motionEvent.getX(0))
+                << "expected raw and transformed X-axis values to be equal";
+        EXPECT_EQ(motionEvent.getRawY(0), motionEvent.getY(0))
+                << "expected raw and transformed Y-axis values to be equal";
+    }
 }
 
 /**
@@ -4669,4 +4706,87 @@
     mTouchWindow->consumeAnyMotionDown();
 }
 
+class InputDispatcherDragTests : public InputDispatcherTest {
+protected:
+    std::shared_ptr<FakeApplicationHandle> mApp;
+    sp<FakeWindowHandle> mWindow;
+    sp<FakeWindowHandle> mSecondWindow;
+    sp<FakeWindowHandle> mDragWindow;
+
+    void SetUp() override {
+        InputDispatcherTest::SetUp();
+        mApp = std::make_shared<FakeApplicationHandle>();
+        mWindow = new FakeWindowHandle(mApp, mDispatcher, "TestWindow", ADISPLAY_ID_DEFAULT);
+        mWindow->setFrame(Rect(0, 0, 100, 100));
+        mWindow->setFlags(InputWindowInfo::Flag::NOT_TOUCH_MODAL);
+
+        mSecondWindow = new FakeWindowHandle(mApp, mDispatcher, "TestWindow2", ADISPLAY_ID_DEFAULT);
+        mSecondWindow->setFrame(Rect(100, 0, 200, 100));
+        mSecondWindow->setFlags(InputWindowInfo::Flag::NOT_TOUCH_MODAL);
+
+        mDispatcher->setFocusedApplication(ADISPLAY_ID_DEFAULT, mApp);
+        mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {mWindow, mSecondWindow}}});
+    }
+
+    // Start performing drag, we will create a drag window and transfer touch to it.
+    void performDrag() {
+        ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+                  injectMotionDown(mDispatcher, AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT,
+                                   {50, 50}))
+                << "Inject motion event should return InputEventInjectionResult::SUCCEEDED";
+
+        // Window should receive motion event.
+        mWindow->consumeMotionDown(ADISPLAY_ID_DEFAULT);
+
+        // The drag window covers the entire display
+        mDragWindow = new FakeWindowHandle(mApp, mDispatcher, "DragWindow", ADISPLAY_ID_DEFAULT);
+        mDispatcher->setInputWindows(
+                {{ADISPLAY_ID_DEFAULT, {mDragWindow, mWindow, mSecondWindow}}});
+
+        // Transfer touch focus to the drag window
+        mDispatcher->transferTouchFocus(mWindow->getToken(), mDragWindow->getToken(),
+                                        true /* isDragDrop */);
+        mWindow->consumeMotionCancel();
+        mDragWindow->consumeMotionDown();
+    }
+};
+
+TEST_F(InputDispatcherDragTests, DragEnterAndDragExit) {
+    performDrag();
+
+    // Move on window.
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher, AMOTION_EVENT_ACTION_MOVE, AINPUT_SOURCE_TOUCHSCREEN,
+                                ADISPLAY_ID_DEFAULT, {50, 50}))
+            << "Inject motion event should return InputEventInjectionResult::SUCCEEDED";
+    mDragWindow->consumeMotionMove(ADISPLAY_ID_DEFAULT);
+    mWindow->consumeDragEvent(false, 50, 50);
+    mSecondWindow->assertNoEvents();
+
+    // Move to another window.
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher, AMOTION_EVENT_ACTION_MOVE, AINPUT_SOURCE_TOUCHSCREEN,
+                                ADISPLAY_ID_DEFAULT, {150, 50}))
+            << "Inject motion event should return InputEventInjectionResult::SUCCEEDED";
+    mDragWindow->consumeMotionMove(ADISPLAY_ID_DEFAULT);
+    mWindow->consumeDragEvent(true, 150, 50);
+    mSecondWindow->consumeDragEvent(false, 50, 50);
+
+    // Move back to original window.
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher, AMOTION_EVENT_ACTION_MOVE, AINPUT_SOURCE_TOUCHSCREEN,
+                                ADISPLAY_ID_DEFAULT, {50, 50}))
+            << "Inject motion event should return InputEventInjectionResult::SUCCEEDED";
+    mDragWindow->consumeMotionMove(ADISPLAY_ID_DEFAULT);
+    mWindow->consumeDragEvent(false, 50, 50);
+    mSecondWindow->consumeDragEvent(true, -50, 50);
+
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionUp(mDispatcher, AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT, {50, 50}))
+            << "Inject motion event should return InputEventInjectionResult::SUCCEEDED";
+    mDragWindow->consumeMotionUp(ADISPLAY_ID_DEFAULT);
+    mWindow->assertNoEvents();
+    mSecondWindow->assertNoEvents();
+}
+
 } // namespace android::inputdispatcher
diff --git a/services/sensorservice/SensorDevice.cpp b/services/sensorservice/SensorDevice.cpp
index 23893ea..d8e8b52 100644
--- a/services/sensorservice/SensorDevice.cpp
+++ b/services/sensorservice/SensorDevice.cpp
@@ -686,9 +686,9 @@
         ALOGD_IF(DEBUG_CONNECTIONS, "enable index=%zd", info.batchParams.indexOfKey(ident));
 
         if (isClientDisabledLocked(ident)) {
-            ALOGE("SensorDevice::activate, isClientDisabledLocked(%p):true, handle:%d",
+            ALOGW("SensorDevice::activate, isClientDisabledLocked(%p):true, handle:%d",
                     ident, handle);
-            return INVALID_OPERATION;
+            return NO_ERROR;
         }
 
         if (info.batchParams.indexOfKey(ident) >= 0) {
diff --git a/services/surfaceflinger/BufferStateLayer.cpp b/services/surfaceflinger/BufferStateLayer.cpp
index f52e708..96a0c3c 100644
--- a/services/surfaceflinger/BufferStateLayer.cpp
+++ b/services/surfaceflinger/BufferStateLayer.cpp
@@ -177,6 +177,8 @@
         // leak.
         ALOGW("Removing the front of pending jank deque from layer - %s to prevent memory leak",
               mName.c_str());
+        std::string miniDump = mPendingJankClassifications.front()->miniDump();
+        ALOGD("Head SurfaceFrame mini dump\n%s", miniDump.c_str());
         mPendingJankClassifications.pop_front();
     }
     mPendingJankClassifications.emplace_back(surfaceFrame);
diff --git a/services/surfaceflinger/CompositionEngine/Android.bp b/services/surfaceflinger/CompositionEngine/Android.bp
index f9e5b9a..1c47691 100644
--- a/services/surfaceflinger/CompositionEngine/Android.bp
+++ b/services/surfaceflinger/CompositionEngine/Android.bp
@@ -103,6 +103,8 @@
     test_suites: ["device-tests"],
     defaults: ["libcompositionengine_defaults"],
     srcs: [
+        "tests/planner/CachedSetTest.cpp",
+        "tests/planner/FlattenerTest.cpp",
         "tests/CompositionEngineTest.cpp",
         "tests/DisplayColorProfileTest.cpp",
         "tests/DisplayTest.cpp",
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h
index 6c86408..582723d 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h
@@ -39,7 +39,8 @@
 
     void setDisplaySize(ui::Size size) { mDisplaySize = size; }
 
-    NonBufferHash flattenLayers(const std::vector<const LayerState*>& layers, NonBufferHash);
+    NonBufferHash flattenLayers(const std::vector<const LayerState*>& layers, NonBufferHash,
+                                std::chrono::steady_clock::time_point now);
 
     void renderCachedSets(renderengine::RenderEngine&);
 
diff --git a/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp b/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp
index 0c09714..d304c9f 100644
--- a/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp
@@ -28,9 +28,7 @@
 namespace android::compositionengine::impl::planner {
 
 NonBufferHash Flattener::flattenLayers(const std::vector<const LayerState*>& layers,
-                                       NonBufferHash hash) {
-    const auto now = std::chrono::steady_clock::now();
-
+                                       NonBufferHash hash, time_point now) {
     const size_t unflattenedDisplayCost = calculateDisplayCost(layers);
     mUnflattenedDisplayCost += unflattenedDisplayCost;
 
diff --git a/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp b/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp
index 52efff5..4570253 100644
--- a/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp
@@ -89,7 +89,8 @@
                    });
 
     const NonBufferHash hash = getNonBufferHash(mCurrentLayers);
-    mFlattenedHash = mFlattener.flattenLayers(mCurrentLayers, hash);
+    mFlattenedHash =
+            mFlattener.flattenLayers(mCurrentLayers, hash, std::chrono::steady_clock::now());
     const bool layersWereFlattened = hash != mFlattenedHash;
     ALOGV("[%s] Initial hash %zx flattened hash %zx", __func__, hash, mFlattenedHash);
 
diff --git a/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp b/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp
new file mode 100644
index 0000000..6d1ce4c
--- /dev/null
+++ b/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <compositionengine/impl/planner/CachedSet.h>
+#include <compositionengine/impl/planner/LayerState.h>
+#include <compositionengine/mock/LayerFE.h>
+#include <compositionengine/mock/OutputLayer.h>
+#include <gtest/gtest.h>
+#include <renderengine/mock/RenderEngine.h>
+
+namespace android::compositionengine {
+using namespace std::chrono_literals;
+
+using testing::_;
+using testing::DoAll;
+using testing::Invoke;
+using testing::Return;
+using testing::ReturnRef;
+using testing::SetArgPointee;
+
+using impl::planner::CachedSet;
+using impl::planner::LayerState;
+using impl::planner::LayerStateField;
+
+namespace {
+
+class CachedSetTest : public testing::Test {
+public:
+    CachedSetTest() = default;
+    void SetUp() override;
+    void TearDown() override;
+
+protected:
+    const std::chrono::steady_clock::time_point kStartTime = std::chrono::steady_clock::now();
+
+    struct TestLayer {
+        mock::OutputLayer outputLayer;
+        impl::OutputLayerCompositionState outputLayerCompositionState;
+        // LayerFE inherits from RefBase and must be held by an sp<>
+        sp<mock::LayerFE> layerFE;
+        LayerFECompositionState layerFECompositionState;
+
+        std::unique_ptr<LayerState> layerState;
+        std::unique_ptr<CachedSet::Layer> cachedSetLayer;
+    };
+
+    static constexpr size_t kNumLayers = 5;
+    std::vector<std::unique_ptr<TestLayer>> mTestLayers;
+
+    android::renderengine::mock::RenderEngine mRenderEngine;
+};
+
+void CachedSetTest::SetUp() {
+    for (size_t i = 0; i < kNumLayers; i++) {
+        auto testLayer = std::make_unique<TestLayer>();
+        auto pos = static_cast<int32_t>(i);
+        testLayer->outputLayerCompositionState.displayFrame = Rect(pos, pos, pos + 1, pos + 1);
+
+        testLayer->layerFE = sp<mock::LayerFE>::make();
+
+        EXPECT_CALL(*testLayer->layerFE, getSequence)
+                .WillRepeatedly(Return(static_cast<int32_t>(i)));
+        EXPECT_CALL(*testLayer->layerFE, getDebugName).WillRepeatedly(Return("testLayer"));
+        EXPECT_CALL(*testLayer->layerFE, getCompositionState)
+                .WillRepeatedly(Return(&testLayer->layerFECompositionState));
+        EXPECT_CALL(testLayer->outputLayer, getLayerFE)
+                .WillRepeatedly(ReturnRef(*testLayer->layerFE));
+        EXPECT_CALL(testLayer->outputLayer, getState)
+                .WillRepeatedly(ReturnRef(testLayer->outputLayerCompositionState));
+
+        testLayer->layerState = std::make_unique<LayerState>(&testLayer->outputLayer);
+        testLayer->layerState->incrementFramesSinceBufferUpdate();
+        testLayer->cachedSetLayer =
+                std::make_unique<CachedSet::Layer>(testLayer->layerState.get(), kStartTime);
+
+        mTestLayers.emplace_back(std::move(testLayer));
+    }
+}
+
+void CachedSetTest::TearDown() {
+    mTestLayers.clear();
+}
+
+void expectEqual(const CachedSet& cachedSet, const CachedSet::Layer& layer) {
+    EXPECT_EQ(layer.getHash(), cachedSet.getFingerprint());
+    EXPECT_EQ(layer.getLastUpdate(), cachedSet.getLastUpdate());
+    EXPECT_EQ(layer.getDisplayFrame(), cachedSet.getBounds());
+    EXPECT_EQ(1u, cachedSet.getLayerCount());
+    EXPECT_EQ(layer.getState(), cachedSet.getFirstLayer().getState());
+    EXPECT_EQ(0u, cachedSet.getAge());
+    EXPECT_EQ(layer.getHash(), cachedSet.getNonBufferHash());
+}
+
+void expectEqual(const CachedSet& cachedSet, const LayerState& layerState,
+                 std::chrono::steady_clock::time_point lastUpdate) {
+    CachedSet::Layer layer(&layerState, lastUpdate);
+    expectEqual(cachedSet, layer);
+}
+
+void expectNoBuffer(const CachedSet& cachedSet) {
+    EXPECT_EQ(nullptr, cachedSet.getBuffer());
+    EXPECT_EQ(nullptr, cachedSet.getDrawFence());
+    EXPECT_FALSE(cachedSet.hasReadyBuffer());
+}
+
+void expectReadyBuffer(const CachedSet& cachedSet) {
+    EXPECT_NE(nullptr, cachedSet.getBuffer());
+    EXPECT_NE(nullptr, cachedSet.getDrawFence());
+    EXPECT_TRUE(cachedSet.hasReadyBuffer());
+}
+
+TEST_F(CachedSetTest, createFromLayer) {
+    CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet cachedSet(layer);
+    expectEqual(cachedSet, layer);
+    expectNoBuffer(cachedSet);
+}
+
+TEST_F(CachedSetTest, createFromLayerState) {
+    LayerState& layerState = *mTestLayers[0]->layerState.get();
+    CachedSet cachedSet(&layerState, kStartTime);
+    expectEqual(cachedSet, layerState, kStartTime);
+    expectNoBuffer(cachedSet);
+}
+
+TEST_F(CachedSetTest, addLayer) {
+    CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
+
+    CachedSet cachedSet(layer1);
+    cachedSet.addLayer(layer2.getState(), kStartTime + 10ms);
+
+    EXPECT_EQ(layer1.getHash(), cachedSet.getFingerprint());
+    EXPECT_EQ(kStartTime, cachedSet.getLastUpdate());
+    EXPECT_EQ(Rect(0, 0, 2, 2), cachedSet.getBounds());
+    EXPECT_EQ(2u, cachedSet.getLayerCount());
+    EXPECT_EQ(0u, cachedSet.getAge());
+    expectNoBuffer(cachedSet);
+    // TODO(b/181192080): check that getNonBufferHash returns the correct hash value
+    // EXPECT_EQ(android::hashCombine(layer1.getHash(), layer2.getHash()),
+    // cachedSet.getNonBufferHash());
+}
+
+TEST_F(CachedSetTest, decompose) {
+    CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
+    CachedSet::Layer& layer3 = *mTestLayers[2]->cachedSetLayer.get();
+
+    CachedSet cachedSet(layer1);
+    cachedSet.addLayer(layer2.getState(), kStartTime + 10ms);
+    cachedSet.addLayer(layer3.getState(), kStartTime + 20ms);
+
+    std::vector<CachedSet> decomposed = cachedSet.decompose();
+    EXPECT_EQ(3u, decomposed.size());
+    expectEqual(decomposed[0], *layer1.getState(), kStartTime);
+    expectNoBuffer(decomposed[0]);
+
+    expectEqual(decomposed[1], *layer2.getState(), kStartTime + 10ms);
+    expectNoBuffer(decomposed[1]);
+
+    expectEqual(decomposed[2], *layer3.getState(), kStartTime + 20ms);
+    expectNoBuffer(decomposed[2]);
+}
+
+TEST_F(CachedSetTest, setLastUpdate) {
+    LayerState& layerState = *mTestLayers[0]->layerState.get();
+    CachedSet cachedSet(&layerState, kStartTime);
+    cachedSet.setLastUpdate(kStartTime + 10ms);
+    expectEqual(cachedSet, layerState, kStartTime + 10ms);
+}
+
+TEST_F(CachedSetTest, incrementAge) {
+    CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet cachedSet(layer);
+    EXPECT_EQ(0u, cachedSet.getAge());
+    cachedSet.incrementAge();
+    EXPECT_EQ(1u, cachedSet.getAge());
+    cachedSet.incrementAge();
+    EXPECT_EQ(2u, cachedSet.getAge());
+}
+
+TEST_F(CachedSetTest, hasBufferUpdate_NoUpdate) {
+    CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
+    CachedSet::Layer& layer3 = *mTestLayers[2]->cachedSetLayer.get();
+
+    CachedSet cachedSet(layer1);
+    cachedSet.addLayer(layer2.getState(), kStartTime + 10ms);
+    cachedSet.addLayer(layer3.getState(), kStartTime + 20ms);
+
+    std::vector<const LayerState*> incomingLayers = {
+            layer1.getState(),
+            layer2.getState(),
+            layer3.getState(),
+    };
+
+    EXPECT_FALSE(cachedSet.hasBufferUpdate(incomingLayers.begin()));
+}
+
+TEST_F(CachedSetTest, hasBufferUpdate_BufferUpdate) {
+    CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
+    CachedSet::Layer& layer3 = *mTestLayers[2]->cachedSetLayer.get();
+
+    CachedSet cachedSet(layer1);
+    cachedSet.addLayer(layer2.getState(), kStartTime + 10ms);
+    cachedSet.addLayer(layer3.getState(), kStartTime + 20ms);
+
+    mTestLayers[1]->layerState->resetFramesSinceBufferUpdate();
+
+    std::vector<const LayerState*> incomingLayers = {
+            layer1.getState(),
+            layer2.getState(),
+            layer3.getState(),
+    };
+
+    EXPECT_TRUE(cachedSet.hasBufferUpdate(incomingLayers.begin()));
+}
+
+TEST_F(CachedSetTest, append) {
+    CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
+    CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
+    CachedSet::Layer& layer3 = *mTestLayers[2]->cachedSetLayer.get();
+
+    CachedSet cachedSet1(layer1);
+    CachedSet cachedSet2(layer2);
+    cachedSet1.addLayer(layer3.getState(), kStartTime + 10ms);
+    cachedSet1.append(cachedSet2);
+
+    EXPECT_EQ(layer1.getHash(), cachedSet1.getFingerprint());
+    EXPECT_EQ(kStartTime, cachedSet1.getLastUpdate());
+    EXPECT_EQ(Rect(0, 0, 3, 3), cachedSet1.getBounds());
+    EXPECT_EQ(3u, cachedSet1.getLayerCount());
+    EXPECT_EQ(0u, cachedSet1.getAge());
+    expectNoBuffer(cachedSet1);
+    // TODO(b/181192080): check that getNonBufferHash returns the correct hash value
+    // EXPECT_EQ(android::hashCombine(layer1.getHash(), layer2.getHash()),
+    // cachedSet1.getNonBufferHash());
+}
+
+TEST_F(CachedSetTest, updateAge_NoUpdate) {
+    CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
+
+    CachedSet cachedSet(layer);
+    cachedSet.incrementAge();
+    EXPECT_EQ(kStartTime, cachedSet.getLastUpdate());
+    EXPECT_EQ(1u, cachedSet.getAge());
+
+    cachedSet.updateAge(kStartTime + 10ms);
+    EXPECT_EQ(kStartTime, cachedSet.getLastUpdate());
+    EXPECT_EQ(1u, cachedSet.getAge());
+}
+
+TEST_F(CachedSetTest, updateAge_BufferUpdate) {
+    CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
+    mTestLayers[0]->layerState->resetFramesSinceBufferUpdate();
+
+    CachedSet cachedSet(layer);
+    cachedSet.incrementAge();
+    EXPECT_EQ(kStartTime, cachedSet.getLastUpdate());
+    EXPECT_EQ(1u, cachedSet.getAge());
+
+    cachedSet.updateAge(kStartTime + 10ms);
+    EXPECT_EQ(kStartTime + 10ms, cachedSet.getLastUpdate());
+    EXPECT_EQ(0u, cachedSet.getAge());
+}
+
+TEST_F(CachedSetTest, render) {
+    CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
+    sp<mock::LayerFE> layerFE1 = mTestLayers[0]->layerFE;
+    CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
+    sp<mock::LayerFE> layerFE2 = mTestLayers[1]->layerFE;
+
+    CachedSet cachedSet(layer1);
+    cachedSet.append(CachedSet(layer2));
+
+    std::vector<compositionengine::LayerFE::LayerSettings> clientCompList1;
+    clientCompList1.push_back({});
+    clientCompList1[0].alpha = 0.5f;
+
+    std::vector<compositionengine::LayerFE::LayerSettings> clientCompList2;
+    clientCompList2.push_back({});
+    clientCompList2[0].alpha = 0.75f;
+
+    const auto drawLayers = [&](const renderengine::DisplaySettings& displaySettings,
+                                const std::vector<const renderengine::LayerSettings*>& layers,
+                                const sp<GraphicBuffer>&, const bool, base::unique_fd&&,
+                                base::unique_fd*) -> size_t {
+        EXPECT_EQ(Rect(0, 0, 2, 2), displaySettings.physicalDisplay);
+        EXPECT_EQ(Rect(0, 0, 2, 2), displaySettings.clip);
+        EXPECT_EQ(0.5f, layers[0]->alpha);
+        EXPECT_EQ(0.75f, layers[1]->alpha);
+
+        return NO_ERROR;
+    };
+
+    EXPECT_CALL(*layerFE1, prepareClientCompositionList(_)).WillOnce(Return(clientCompList1));
+    EXPECT_CALL(*layerFE2, prepareClientCompositionList(_)).WillOnce(Return(clientCompList2));
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Invoke(drawLayers));
+    cachedSet.render(mRenderEngine);
+    expectReadyBuffer(cachedSet);
+}
+
+} // namespace
+} // namespace android::compositionengine
diff --git a/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp b/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp
new file mode 100644
index 0000000..42bbfcc
--- /dev/null
+++ b/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <compositionengine/impl/planner/CachedSet.h>
+#include <compositionengine/impl/planner/Flattener.h>
+#include <compositionengine/impl/planner/LayerState.h>
+#include <compositionengine/impl/planner/Predictor.h>
+#include <compositionengine/mock/LayerFE.h>
+#include <compositionengine/mock/OutputLayer.h>
+#include <gtest/gtest.h>
+#include <renderengine/mock/RenderEngine.h>
+
+namespace android::compositionengine {
+using namespace std::chrono_literals;
+using impl::planner::Flattener;
+using impl::planner::LayerState;
+using impl::planner::NonBufferHash;
+using impl::planner::Predictor;
+
+using testing::_;
+using testing::ByMove;
+using testing::ByRef;
+using testing::DoAll;
+using testing::Invoke;
+using testing::Return;
+using testing::ReturnRef;
+using testing::Sequence;
+using testing::SetArgPointee;
+
+namespace {
+
+class FlattenerTest : public testing::Test {
+public:
+    FlattenerTest() : mFlattener(std::make_unique<Flattener>(mPredictor)) {}
+    void SetUp() override;
+
+protected:
+    void initializeOverrideBuffer(const std::vector<const LayerState*>& layers);
+    void initializeFlattener(const std::vector<const LayerState*>& layers);
+    void expectAllLayersFlattened(const std::vector<const LayerState*>& layers);
+
+    // TODO(b/181192467): Once Flattener starts to do something useful with Predictor,
+    // mPredictor should be mocked and checked for expectations.
+    Predictor mPredictor;
+    std::unique_ptr<Flattener> mFlattener;
+    renderengine::mock::RenderEngine mRenderEngine;
+
+    const std::chrono::steady_clock::time_point kStartTime = std::chrono::steady_clock::now();
+    std::chrono::steady_clock::time_point mTime = kStartTime;
+
+    struct TestLayer {
+        std::string name;
+        mock::OutputLayer outputLayer;
+        impl::OutputLayerCompositionState outputLayerCompositionState;
+        // LayerFE inherits from RefBase and must be held by an sp<>
+        sp<mock::LayerFE> layerFE;
+        LayerFECompositionState layerFECompositionState;
+
+        std::unique_ptr<LayerState> layerState;
+    };
+
+    static constexpr size_t kNumLayers = 5;
+    std::vector<std::unique_ptr<TestLayer>> mTestLayers;
+};
+
+void FlattenerTest::SetUp() {
+    for (size_t i = 0; i < kNumLayers; i++) {
+        auto testLayer = std::make_unique<TestLayer>();
+        auto pos = static_cast<int32_t>(i);
+        std::stringstream ss;
+        ss << "testLayer" << i;
+        testLayer->name = ss.str();
+
+        testLayer->outputLayerCompositionState.displayFrame = Rect(pos, pos, pos + 1, pos + 1);
+
+        testLayer->layerFECompositionState.buffer =
+                new GraphicBuffer(100, 100, HAL_PIXEL_FORMAT_RGBA_8888, 1,
+                                  GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+                                          GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE,
+                                  "output");
+
+        testLayer->layerFE = sp<mock::LayerFE>::make();
+
+        EXPECT_CALL(*testLayer->layerFE, getSequence)
+                .WillRepeatedly(Return(static_cast<int32_t>(i)));
+        EXPECT_CALL(*testLayer->layerFE, getDebugName)
+                .WillRepeatedly(Return(testLayer->name.c_str()));
+        EXPECT_CALL(*testLayer->layerFE, getCompositionState)
+                .WillRepeatedly(Return(&testLayer->layerFECompositionState));
+
+        std::vector<LayerFE::LayerSettings> clientCompositionList = {
+                LayerFE::LayerSettings{},
+        };
+
+        EXPECT_CALL(*testLayer->layerFE, prepareClientCompositionList)
+                .WillRepeatedly(Return(clientCompositionList));
+        EXPECT_CALL(testLayer->outputLayer, getLayerFE)
+                .WillRepeatedly(ReturnRef(*testLayer->layerFE));
+        EXPECT_CALL(testLayer->outputLayer, getState)
+                .WillRepeatedly(ReturnRef(testLayer->outputLayerCompositionState));
+        EXPECT_CALL(testLayer->outputLayer, editState)
+                .WillRepeatedly(ReturnRef(testLayer->outputLayerCompositionState));
+
+        testLayer->layerState = std::make_unique<LayerState>(&testLayer->outputLayer);
+        testLayer->layerState->incrementFramesSinceBufferUpdate();
+
+        mTestLayers.emplace_back(std::move(testLayer));
+    }
+}
+
+void FlattenerTest::initializeOverrideBuffer(const std::vector<const LayerState*>& layers) {
+    for (const auto layer : layers) {
+        layer->getOutputLayer()->editState().overrideInfo = {};
+    }
+}
+
+void FlattenerTest::initializeFlattener(const std::vector<const LayerState*>& layers) {
+    // layer stack is unknown, reset current geomentry
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    // same geometry, update the internal layer stack
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+}
+
+void FlattenerTest::expectAllLayersFlattened(const std::vector<const LayerState*>& layers) {
+    // layers would be flattened but the buffer would not be overridden
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    for (const auto layer : layers) {
+        EXPECT_EQ(nullptr, layer->getOutputLayer()->getState().overrideInfo.buffer);
+    }
+
+    // the new flattened layer is replaced
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    const auto buffer = layers[0]->getOutputLayer()->getState().overrideInfo.buffer;
+    EXPECT_NE(nullptr, buffer);
+    for (const auto layer : layers) {
+        EXPECT_EQ(buffer, layer->getOutputLayer()->getState().overrideInfo.buffer);
+    }
+}
+
+TEST_F(FlattenerTest, flattenLayers_NewLayerStack) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    auto& layerState2 = mTestLayers[1]->layerState;
+
+    const std::vector<const LayerState*> layers = {
+            layerState1.get(),
+            layerState2.get(),
+    };
+    initializeFlattener(layers);
+}
+
+TEST_F(FlattenerTest, flattenLayers_ActiveLayersAreNotFlattened) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    auto& layerState2 = mTestLayers[1]->layerState;
+
+    const std::vector<const LayerState*> layers = {
+            layerState1.get(),
+            layerState2.get(),
+    };
+
+    initializeFlattener(layers);
+
+    // layers cannot be flattened yet, since they are still active
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+}
+
+TEST_F(FlattenerTest, flattenLayers_basicFlatten) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    auto& layerState2 = mTestLayers[1]->layerState;
+    auto& layerState3 = mTestLayers[2]->layerState;
+
+    const std::vector<const LayerState*> layers = {
+            layerState1.get(),
+            layerState2.get(),
+            layerState3.get(),
+    };
+
+    initializeFlattener(layers);
+
+    // make all layers inactive
+    mTime += 200ms;
+    expectAllLayersFlattened(layers);
+}
+
+TEST_F(FlattenerTest, flattenLayers_FlattenedLayersStayFlattenWhenNoUpdate) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    const auto& overrideBuffer1 = layerState1->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState2 = mTestLayers[1]->layerState;
+    const auto& overrideBuffer2 = layerState2->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState3 = mTestLayers[2]->layerState;
+    const auto& overrideBuffer3 = layerState3->getOutputLayer()->getState().overrideInfo.buffer;
+
+    const std::vector<const LayerState*> layers = {
+            layerState1.get(),
+            layerState2.get(),
+            layerState3.get(),
+    };
+
+    initializeFlattener(layers);
+
+    // make all layers inactive
+    mTime += 200ms;
+    expectAllLayersFlattened(layers);
+
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_NE(nullptr, overrideBuffer1);
+    EXPECT_EQ(overrideBuffer1, overrideBuffer2);
+    EXPECT_EQ(overrideBuffer2, overrideBuffer3);
+}
+
+TEST_F(FlattenerTest, flattenLayers_addLayerToFlattenedCauseReset) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    const auto& overrideBuffer1 = layerState1->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState2 = mTestLayers[1]->layerState;
+    const auto& overrideBuffer2 = layerState2->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState3 = mTestLayers[2]->layerState;
+    const auto& overrideBuffer3 = layerState3->getOutputLayer()->getState().overrideInfo.buffer;
+
+    std::vector<const LayerState*> layers = {
+            layerState1.get(),
+            layerState2.get(),
+    };
+
+    initializeFlattener(layers);
+    // make all layers inactive
+    mTime += 200ms;
+
+    initializeOverrideBuffer(layers);
+    expectAllLayersFlattened(layers);
+
+    // add a new layer to the stack, this will cause all the flatenner to reset
+    layers.push_back(layerState3.get());
+
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_EQ(nullptr, overrideBuffer1);
+    EXPECT_EQ(nullptr, overrideBuffer2);
+    EXPECT_EQ(nullptr, overrideBuffer3);
+}
+
+TEST_F(FlattenerTest, flattenLayers_BufferUpdateToFlatten) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    const auto& overrideBuffer1 = layerState1->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState2 = mTestLayers[1]->layerState;
+    const auto& overrideBuffer2 = layerState2->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState3 = mTestLayers[2]->layerState;
+    const auto& overrideBuffer3 = layerState3->getOutputLayer()->getState().overrideInfo.buffer;
+
+    const std::vector<const LayerState*> layers = {
+            layerState1.get(),
+            layerState2.get(),
+            layerState3.get(),
+    };
+
+    initializeFlattener(layers);
+
+    // make all layers inactive
+    mTime += 200ms;
+    expectAllLayersFlattened(layers);
+
+    // Layer 1 posted a buffer update, layers would be decomposed, and a new drawFrame would be
+    // caleed for Layer2 and Layer3
+    layerState1->resetFramesSinceBufferUpdate();
+
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_EQ(nullptr, overrideBuffer1);
+    EXPECT_EQ(nullptr, overrideBuffer2);
+    EXPECT_EQ(nullptr, overrideBuffer3);
+
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_EQ(nullptr, overrideBuffer1);
+    EXPECT_NE(nullptr, overrideBuffer2);
+    EXPECT_EQ(overrideBuffer2, overrideBuffer3);
+
+    layerState1->incrementFramesSinceBufferUpdate();
+    mTime += 200ms;
+
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_EQ(nullptr, overrideBuffer1);
+    EXPECT_NE(nullptr, overrideBuffer2);
+    EXPECT_EQ(overrideBuffer2, overrideBuffer3);
+
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_NE(nullptr, overrideBuffer1);
+    EXPECT_EQ(overrideBuffer1, overrideBuffer2);
+    EXPECT_EQ(overrideBuffer2, overrideBuffer3);
+}
+
+TEST_F(FlattenerTest, flattenLayers_BufferUpdateForMiddleLayer) {
+    auto& layerState1 = mTestLayers[0]->layerState;
+    const auto& overrideBuffer1 = layerState1->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState2 = mTestLayers[1]->layerState;
+    const auto& overrideBuffer2 = layerState2->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState3 = mTestLayers[2]->layerState;
+    const auto& overrideBuffer3 = layerState3->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState4 = mTestLayers[3]->layerState;
+    const auto& overrideBuffer4 = layerState4->getOutputLayer()->getState().overrideInfo.buffer;
+
+    auto& layerState5 = mTestLayers[4]->layerState;
+    const auto& overrideBuffer5 = layerState5->getOutputLayer()->getState().overrideInfo.buffer;
+
+    const std::vector<const LayerState*> layers = {
+            layerState1.get(), layerState2.get(), layerState3.get(),
+            layerState4.get(), layerState5.get(),
+    };
+
+    initializeFlattener(layers);
+
+    // make all layers inactive
+    mTime += 200ms;
+    expectAllLayersFlattened(layers);
+
+    // Layer 3 posted a buffer update, layers would be decomposed, and a new drawFrame would be
+    // called for Layer1 and Layer2
+    layerState3->resetFramesSinceBufferUpdate();
+
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+    initializeOverrideBuffer(layers);
+    EXPECT_EQ(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_EQ(nullptr, overrideBuffer1);
+    EXPECT_EQ(nullptr, overrideBuffer2);
+    EXPECT_EQ(nullptr, overrideBuffer3);
+    EXPECT_EQ(nullptr, overrideBuffer4);
+    EXPECT_EQ(nullptr, overrideBuffer5);
+
+    // Layers 1 and 2 will be flattened a new drawFrame would be called for Layer4 and Layer5
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_NE(nullptr, overrideBuffer1);
+    EXPECT_EQ(overrideBuffer1, overrideBuffer2);
+    EXPECT_EQ(nullptr, overrideBuffer3);
+    EXPECT_EQ(nullptr, overrideBuffer4);
+    EXPECT_EQ(nullptr, overrideBuffer5);
+
+    // Layers 4 and 5 will be flattened
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_NE(nullptr, overrideBuffer1);
+    EXPECT_EQ(overrideBuffer1, overrideBuffer2);
+    EXPECT_EQ(nullptr, overrideBuffer3);
+    EXPECT_NE(nullptr, overrideBuffer4);
+    EXPECT_EQ(overrideBuffer4, overrideBuffer5);
+
+    layerState3->incrementFramesSinceBufferUpdate();
+    mTime += 200ms;
+
+    EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_NE(nullptr, overrideBuffer1);
+    EXPECT_EQ(overrideBuffer1, overrideBuffer2);
+    EXPECT_EQ(nullptr, overrideBuffer3);
+    EXPECT_NE(nullptr, overrideBuffer4);
+    EXPECT_EQ(overrideBuffer4, overrideBuffer5);
+
+    initializeOverrideBuffer(layers);
+    EXPECT_NE(getNonBufferHash(layers),
+              mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+    mFlattener->renderCachedSets(mRenderEngine);
+
+    EXPECT_NE(nullptr, overrideBuffer1);
+    EXPECT_EQ(overrideBuffer1, overrideBuffer2);
+    EXPECT_EQ(overrideBuffer2, overrideBuffer3);
+    EXPECT_EQ(overrideBuffer3, overrideBuffer4);
+    EXPECT_EQ(overrideBuffer4, overrideBuffer5);
+}
+
+} // namespace
+} // namespace android::compositionengine
diff --git a/services/surfaceflinger/EffectLayer.cpp b/services/surfaceflinger/EffectLayer.cpp
index 44d4d75..caef338 100644
--- a/services/surfaceflinger/EffectLayer.cpp
+++ b/services/surfaceflinger/EffectLayer.cpp
@@ -77,7 +77,7 @@
 }
 
 bool EffectLayer::isVisible() const {
-    return !isHiddenByPolicy() && getAlpha() > 0.0_hf && hasSomethingToDraw();
+    return !isHiddenByPolicy() && (getAlpha() > 0.0_hf || hasBlur()) && hasSomethingToDraw();
 }
 
 bool EffectLayer::setColor(const half3& color) {
diff --git a/services/surfaceflinger/FrameTimeline/FrameTimeline.cpp b/services/surfaceflinger/FrameTimeline/FrameTimeline.cpp
index 03e38f3..b1dff8d 100644
--- a/services/surfaceflinger/FrameTimeline/FrameTimeline.cpp
+++ b/services/surfaceflinger/FrameTimeline/FrameTimeline.cpp
@@ -296,7 +296,7 @@
                            frametimeline::TimelineItem&& predictions,
                            std::shared_ptr<TimeStats> timeStats,
                            JankClassificationThresholds thresholds,
-                           TraceCookieCounter* traceCookieCounter)
+                           TraceCookieCounter* traceCookieCounter, bool isBuffer)
       : mToken(frameTimelineInfo.vsyncId),
         mInputEventId(frameTimelineInfo.inputEventId),
         mOwnerPid(ownerPid),
@@ -310,7 +310,8 @@
         mActuals({0, 0, 0}),
         mTimeStats(timeStats),
         mJankClassificationThresholds(thresholds),
-        mTraceCookieCounter(*traceCookieCounter) {}
+        mTraceCookieCounter(*traceCookieCounter),
+        mIsBuffer(isBuffer) {}
 
 void SurfaceFrame::setActualStartTime(nsecs_t actualStartTime) {
     std::scoped_lock lock(mMutex);
@@ -395,6 +396,20 @@
     return mDropTime;
 }
 
+void SurfaceFrame::promoteToBuffer() {
+    std::scoped_lock lock(mMutex);
+    LOG_ALWAYS_FATAL_IF(mIsBuffer == true,
+                        "Trying to promote an already promoted BufferSurfaceFrame from layer %s "
+                        "with token %" PRId64 "",
+                        mDebugName.c_str(), mToken);
+    mIsBuffer = true;
+}
+
+bool SurfaceFrame::getIsBuffer() const {
+    std::scoped_lock lock(mMutex);
+    return mIsBuffer;
+}
+
 void SurfaceFrame::dump(std::string& result, const std::string& indent, nsecs_t baseTime) const {
     std::scoped_lock lock(mMutex);
     StringAppendF(&result, "%s", indent.c_str());
@@ -407,6 +422,8 @@
     StringAppendF(&result, "%s", indent.c_str());
     StringAppendF(&result, "Token: %" PRId64 "\n", mToken);
     StringAppendF(&result, "%s", indent.c_str());
+    StringAppendF(&result, "Is Buffer?: %d\n", mIsBuffer);
+    StringAppendF(&result, "%s", indent.c_str());
     StringAppendF(&result, "Owner Pid : %d\n", mOwnerPid);
     StringAppendF(&result, "%s", indent.c_str());
     StringAppendF(&result, "Scheduled rendering rate: %d fps\n",
@@ -444,37 +461,41 @@
     dumpTable(result, mPredictions, mActuals, indent, mPredictionState, baseTime);
 }
 
-void SurfaceFrame::onPresent(nsecs_t presentTime, int32_t displayFrameJankType, Fps refreshRate,
-                             nsecs_t displayDeadlineDelta, nsecs_t displayPresentDelta) {
+std::string SurfaceFrame::miniDump() const {
     std::scoped_lock lock(mMutex);
+    std::string result;
+    StringAppendF(&result, "Layer - %s\n", mDebugName.c_str());
+    StringAppendF(&result, "Token: %" PRId64 "\n", mToken);
+    StringAppendF(&result, "Is Buffer?: %d\n", mIsBuffer);
+    StringAppendF(&result, "Present State : %s\n", toString(mPresentState).c_str());
+    StringAppendF(&result, "Prediction State : %s\n", toString(mPredictionState).c_str());
+    StringAppendF(&result, "Jank Type : %s\n", jankTypeBitmaskToString(mJankType).c_str());
+    StringAppendF(&result, "Present Metadata : %s\n", toString(mFramePresentMetadata).c_str());
+    StringAppendF(&result, "Finish Metadata: %s\n", toString(mFrameReadyMetadata).c_str());
+    StringAppendF(&result, "Present time: %" PRId64 "", mActuals.presentTime);
+    return result;
+}
 
-    if (mPresentState != PresentState::Presented) {
-        // No need to update dropped buffers
+void SurfaceFrame::classifyJankLocked(int32_t displayFrameJankType, const Fps& refreshRate,
+                                      nsecs_t& deadlineDelta) {
+    if (mPredictionState == PredictionState::Expired ||
+        mActuals.presentTime == Fence::SIGNAL_TIME_INVALID) {
+        // Cannot do any classification for invalid present time.
+        // For prediction expired case, we do not know what happened here to classify this
+        // correctly. This could potentially be AppDeadlineMissed but that's assuming no app will
+        // request frames 120ms apart.
+        mJankType = JankType::Unknown;
+        deadlineDelta = -1;
         return;
     }
 
-    mActuals.presentTime = presentTime;
-    // Jank Analysis for SurfaceFrame
     if (mPredictionState == PredictionState::None) {
         // Cannot do jank classification on frames that don't have a token.
         return;
     }
-    if (mPredictionState == PredictionState::Expired) {
-        // We do not know what happened here to classify this correctly. This could
-        // potentially be AppDeadlineMissed but that's assuming no app will request frames
-        // 120ms apart.
-        mJankType = JankType::Unknown;
-        mFramePresentMetadata = FramePresentMetadata::UnknownPresent;
-        mFrameReadyMetadata = FrameReadyMetadata::UnknownFinish;
-        const constexpr nsecs_t kAppDeadlineDelta = -1;
-        mTimeStats->incrementJankyFrames({refreshRate, mRenderRate, mOwnerUid, mLayerName,
-                                          mJankType, displayDeadlineDelta, displayPresentDelta,
-                                          kAppDeadlineDelta});
-        return;
-    }
 
+    deadlineDelta = mActuals.endTime - mPredictions.endTime;
     const nsecs_t presentDelta = mActuals.presentTime - mPredictions.presentTime;
-    const nsecs_t deadlineDelta = mActuals.endTime - mPredictions.endTime;
     const nsecs_t deltaToVsync = refreshRate.getPeriodNsecs() > 0
             ? std::abs(presentDelta) % refreshRate.getPeriodNsecs()
             : 0;
@@ -558,8 +579,28 @@
             }
         }
     }
-    mTimeStats->incrementJankyFrames({refreshRate, mRenderRate, mOwnerUid, mLayerName, mJankType,
-                                      displayDeadlineDelta, displayPresentDelta, deadlineDelta});
+}
+
+void SurfaceFrame::onPresent(nsecs_t presentTime, int32_t displayFrameJankType, Fps refreshRate,
+                             nsecs_t displayDeadlineDelta, nsecs_t displayPresentDelta) {
+    std::scoped_lock lock(mMutex);
+
+    if (mPresentState != PresentState::Presented) {
+        // No need to update dropped buffers
+        return;
+    }
+
+    mActuals.presentTime = presentTime;
+    nsecs_t deadlineDelta = 0;
+
+    classifyJankLocked(displayFrameJankType, refreshRate, deadlineDelta);
+
+    if (mPredictionState != PredictionState::None) {
+        // Only update janky frames if the app used vsync predictions
+        mTimeStats->incrementJankyFrames({refreshRate, mRenderRate, mOwnerUid, mLayerName,
+                                          mJankType, displayDeadlineDelta, displayPresentDelta,
+                                          deadlineDelta});
+    }
 }
 
 void SurfaceFrame::tracePredictions(int64_t displayFrameToken) const {
@@ -735,13 +776,14 @@
 
 std::shared_ptr<SurfaceFrame> FrameTimeline::createSurfaceFrameForToken(
         const FrameTimelineInfo& frameTimelineInfo, pid_t ownerPid, uid_t ownerUid, int32_t layerId,
-        std::string layerName, std::string debugName) {
+        std::string layerName, std::string debugName, bool isBuffer) {
     ATRACE_CALL();
     if (frameTimelineInfo.vsyncId == FrameTimelineInfo::INVALID_VSYNC_ID) {
         return std::make_shared<SurfaceFrame>(frameTimelineInfo, ownerPid, ownerUid, layerId,
                                               std::move(layerName), std::move(debugName),
                                               PredictionState::None, TimelineItem(), mTimeStats,
-                                              mJankClassificationThresholds, &mTraceCookieCounter);
+                                              mJankClassificationThresholds, &mTraceCookieCounter,
+                                              isBuffer);
     }
     std::optional<TimelineItem> predictions =
             mTokenManager.getPredictionsForToken(frameTimelineInfo.vsyncId);
@@ -750,12 +792,13 @@
                                               std::move(layerName), std::move(debugName),
                                               PredictionState::Valid, std::move(*predictions),
                                               mTimeStats, mJankClassificationThresholds,
-                                              &mTraceCookieCounter);
+                                              &mTraceCookieCounter, isBuffer);
     }
     return std::make_shared<SurfaceFrame>(frameTimelineInfo, ownerPid, ownerUid, layerId,
                                           std::move(layerName), std::move(debugName),
                                           PredictionState::Expired, TimelineItem(), mTimeStats,
-                                          mJankClassificationThresholds, &mTraceCookieCounter);
+                                          mJankClassificationThresholds, &mTraceCookieCounter,
+                                          isBuffer);
 }
 
 FrameTimeline::DisplayFrame::DisplayFrame(std::shared_ptr<TimeStats> timeStats,
@@ -826,25 +869,28 @@
     mSurfaceFlingerActuals.endTime = actualEndTime;
 }
 
-void FrameTimeline::DisplayFrame::onPresent(nsecs_t signalTime) {
-    mSurfaceFlingerActuals.presentTime = signalTime;
-    if (mPredictionState == PredictionState::Expired) {
-        // Cannot do jank classification with expired predictions
+void FrameTimeline::DisplayFrame::classifyJank(nsecs_t& deadlineDelta, nsecs_t& deltaToVsync) {
+    if (mPredictionState == PredictionState::Expired ||
+        mSurfaceFlingerActuals.presentTime == Fence::SIGNAL_TIME_INVALID) {
+        // Cannot do jank classification with expired predictions or invalid signal times.
         mJankType = JankType::Unknown;
+        deadlineDelta = -1;
+        deltaToVsync = -1;
         return;
     }
 
     // Delta between the expected present and the actual present
     const nsecs_t presentDelta =
             mSurfaceFlingerActuals.presentTime - mSurfaceFlingerPredictions.presentTime;
-    const nsecs_t deadlineDelta =
+    deadlineDelta =
             mSurfaceFlingerActuals.endTime - (mSurfaceFlingerPredictions.endTime - mHwcDuration);
 
     // How far off was the presentDelta when compared to the vsyncPeriod. Used in checking if there
     // was a prediction error or not.
-    nsecs_t deltaToVsync = mRefreshRate.getPeriodNsecs() > 0
+    deltaToVsync = mRefreshRate.getPeriodNsecs() > 0
             ? std::abs(presentDelta) % mRefreshRate.getPeriodNsecs()
             : 0;
+
     if (std::abs(presentDelta) > mJankClassificationThresholds.presentThreshold) {
         mFramePresentMetadata = presentDelta > 0 ? FramePresentMetadata::LatePresent
                                                  : FramePresentMetadata::EarlyPresent;
@@ -922,6 +968,14 @@
             mJankType = JankType::Unknown;
         }
     }
+}
+
+void FrameTimeline::DisplayFrame::onPresent(nsecs_t signalTime) {
+    mSurfaceFlingerActuals.presentTime = signalTime;
+    nsecs_t deadlineDelta = 0;
+    nsecs_t deltaToVsync = 0;
+    classifyJank(deadlineDelta, deltaToVsync);
+
     for (auto& surfaceFrame : mSurfaceFrames) {
         surfaceFrame->onPresent(signalTime, mJankType, mRefreshRate, deadlineDelta, deltaToVsync);
     }
@@ -1084,11 +1138,9 @@
                 continue;
             }
         }
-        if (signalTime != Fence::SIGNAL_TIME_INVALID) {
-            auto& displayFrame = pendingPresentFence.second;
-            displayFrame->onPresent(signalTime);
-            displayFrame->trace(mSurfaceFlingerPid);
-        }
+        auto& displayFrame = pendingPresentFence.second;
+        displayFrame->onPresent(signalTime);
+        displayFrame->trace(mSurfaceFlingerPid);
 
         mPendingPresentFences.erase(mPendingPresentFences.begin() + static_cast<int>(i));
         --i;
diff --git a/services/surfaceflinger/FrameTimeline/FrameTimeline.h b/services/surfaceflinger/FrameTimeline/FrameTimeline.h
index 7c6a0cc..3cf35f0 100644
--- a/services/surfaceflinger/FrameTimeline/FrameTimeline.h
+++ b/services/surfaceflinger/FrameTimeline/FrameTimeline.h
@@ -159,7 +159,7 @@
                  int32_t layerId, std::string layerName, std::string debugName,
                  PredictionState predictionState, TimelineItem&& predictions,
                  std::shared_ptr<TimeStats> timeStats, JankClassificationThresholds thresholds,
-                 TraceCookieCounter* traceCookieCounter);
+                 TraceCookieCounter* traceCookieCounter, bool isBuffer);
     ~SurfaceFrame() = default;
 
     // Returns std::nullopt if the frame hasn't been classified yet.
@@ -181,6 +181,10 @@
     void setPresentState(PresentState presentState, nsecs_t lastLatchTime = 0);
     void setRenderRate(Fps renderRate);
 
+    // When a bufferless SurfaceFrame is promoted to a buffer SurfaceFrame, we also have to update
+    // isBuffer.
+    void promoteToBuffer();
+
     // Functions called by FrameTimeline
     // BaseTime is the smallest timestamp in this SurfaceFrame.
     // Used for dumping all timestamps relative to the oldest, making it easy to read.
@@ -192,6 +196,8 @@
                    nsecs_t displayDeadlineDelta, nsecs_t displayPresentDelta);
     // All the timestamps are dumped relative to the baseTime
     void dump(std::string& result, const std::string& indent, nsecs_t baseTime) const;
+    // Dumps only the layer, token, is buffer, jank metadata, prediction and present states.
+    std::string miniDump() const;
     // Emits a packet for perfetto tracing. The function body will be executed only if tracing is
     // enabled. The displayFrameToken is needed to link the SurfaceFrame to the corresponding
     // DisplayFrame at the trace processor side.
@@ -206,6 +212,7 @@
     FrameReadyMetadata getFrameReadyMetadata() const;
     FramePresentMetadata getFramePresentMetadata() const;
     nsecs_t getDropTime() const;
+    bool getIsBuffer() const;
 
     // For prediction expired frames, this delta is subtracted from the actual end time to get a
     // start time decent enough to see in traces.
@@ -216,6 +223,8 @@
 private:
     void tracePredictions(int64_t displayFrameToken) const;
     void traceActuals(int64_t displayFrameToken) const;
+    void classifyJankLocked(int32_t displayFrameJankType, const Fps& refreshRate,
+                            nsecs_t& deadlineDelta) REQUIRES(mMutex);
 
     const int64_t mToken;
     const int32_t mInputEventId;
@@ -251,6 +260,9 @@
     // TraceCookieCounter is used to obtain the cookie for sendig trace packets to perfetto. Using a
     // reference here because the counter is owned by FrameTimeline, which outlives SurfaceFrame.
     TraceCookieCounter& mTraceCookieCounter;
+    // Tells if the SurfaceFrame is representing a buffer or a transaction without a
+    // buffer(animations)
+    bool mIsBuffer;
 };
 
 /*
@@ -270,7 +282,7 @@
     // Debug name is the human-readable debugging string for dumpsys.
     virtual std::shared_ptr<SurfaceFrame> createSurfaceFrameForToken(
             const FrameTimelineInfo& frameTimelineInfo, pid_t ownerPid, uid_t ownerUid,
-            int32_t layerId, std::string layerName, std::string debugName) = 0;
+            int32_t layerId, std::string layerName, std::string debugName, bool isBuffer) = 0;
 
     // Adds a new SurfaceFrame to the current DisplayFrame. Frames from multiple layers can be
     // composited into one display frame.
@@ -355,7 +367,7 @@
         // Sets the token, vsyncPeriod, predictions and SF start time.
         void onSfWakeUp(int64_t token, Fps refreshRate, std::optional<TimelineItem> predictions,
                         nsecs_t wakeUpTime);
-        // Sets the appropriate metadata, classifies the jank and returns the classified jankType.
+        // Sets the appropriate metadata and classifies the jank.
         void onPresent(nsecs_t signalTime);
         // Adds the provided SurfaceFrame to the current display frame.
         void addSurfaceFrame(std::shared_ptr<SurfaceFrame> surfaceFrame);
@@ -383,6 +395,7 @@
         void dump(std::string& result, nsecs_t baseTime) const;
         void tracePredictions(pid_t surfaceFlingerPid) const;
         void traceActuals(pid_t surfaceFlingerPid) const;
+        void classifyJank(nsecs_t& deadlineDelta, nsecs_t& deltaToVsync);
 
         int64_t mToken = FrameTimelineInfo::INVALID_VSYNC_ID;
 
@@ -428,7 +441,7 @@
     frametimeline::TokenManager* getTokenManager() override { return &mTokenManager; }
     std::shared_ptr<SurfaceFrame> createSurfaceFrameForToken(
             const FrameTimelineInfo& frameTimelineInfo, pid_t ownerPid, uid_t ownerUid,
-            int32_t layerId, std::string layerName, std::string debugName) override;
+            int32_t layerId, std::string layerName, std::string debugName, bool isBuffer) override;
     void addSurfaceFrame(std::shared_ptr<frametimeline::SurfaceFrame> surfaceFrame) override;
     void setSfWakeUp(int64_t token, nsecs_t wakeupTime, Fps refreshRate) override;
     void setSfPresent(nsecs_t sfPresentTime,
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index d7d349a..44f1a70 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1058,6 +1058,15 @@
 }
 
 void Layer::commitTransaction(State& stateToCommit) {
+    if (auto& bufferSurfaceFrame = mDrawingState.bufferSurfaceFrameTX;
+        mDrawingState.buffer != stateToCommit.buffer && bufferSurfaceFrame != nullptr &&
+        bufferSurfaceFrame->getPresentState() != PresentState::Presented) {
+        // If the previous buffer was committed but not latched (refreshPending - happens during
+        // back to back invalidates), it gets silently dropped here. Mark the corresponding
+        // SurfaceFrame as dropped to prevent it from getting stuck in the pending classification
+        // list.
+        addSurfaceFrameDroppedForBuffer(bufferSurfaceFrame);
+    }
     mDrawingState = stateToCommit;
 
     // Set the present state for all bufferlessSurfaceFramesTX to Presented. The
@@ -1537,6 +1546,7 @@
         // Promote the bufferlessSurfaceFrame to a bufferSurfaceFrameTX
         mCurrentState.bufferSurfaceFrameTX = it->second;
         mCurrentState.bufferlessSurfaceFramesTX.erase(it);
+        mCurrentState.bufferSurfaceFrameTX->promoteToBuffer();
         mCurrentState.bufferSurfaceFrameTX->setActualQueueTime(postTime);
     } else {
         mCurrentState.bufferSurfaceFrameTX =
@@ -1596,7 +1606,8 @@
     auto surfaceFrame =
             mFlinger->mFrameTimeline->createSurfaceFrameForToken(info, mOwnerPid, mOwnerUid,
                                                                  getSequence(), mName,
-                                                                 mTransactionName);
+                                                                 mTransactionName,
+                                                                 /*isBuffer*/ false);
     // For Transactions, the post time is considered to be both queue and acquire fence time.
     surfaceFrame->setActualQueueTime(postTime);
     surfaceFrame->setAcquireFenceTime(postTime);
@@ -1612,7 +1623,8 @@
         const FrameTimelineInfo& info, nsecs_t queueTime, std::string debugName) {
     auto surfaceFrame =
             mFlinger->mFrameTimeline->createSurfaceFrameForToken(info, mOwnerPid, mOwnerUid,
-                                                                 getSequence(), mName, debugName);
+                                                                 getSequence(), mName, debugName,
+                                                                 /*isBuffer*/ true);
     // For buffers, acquire fence time will set during latch.
     surfaceFrame->setActualQueueTime(queueTime);
     const auto fps = mFlinger->mScheduler->getFrameRateOverride(getOwnerUid());
diff --git a/services/surfaceflinger/tests/BufferGenerator.cpp b/services/surfaceflinger/tests/BufferGenerator.cpp
index 4868c12..03f8e1a 100644
--- a/services/surfaceflinger/tests/BufferGenerator.cpp
+++ b/services/surfaceflinger/tests/BufferGenerator.cpp
@@ -70,7 +70,7 @@
         consumer->setDefaultBufferSize(width, height);
         consumer->setDefaultBufferFormat(format);
 
-        mBufferItemConsumer = new BufferItemConsumer(consumer, 0);
+        mBufferItemConsumer = new BufferItemConsumer(consumer, GraphicBuffer::USAGE_HW_TEXTURE);
 
         mListener = new BufferListener(consumer, callback);
         mBufferItemConsumer->setFrameAvailableListener(mListener);
diff --git a/services/surfaceflinger/tests/EffectLayer_test.cpp b/services/surfaceflinger/tests/EffectLayer_test.cpp
index fafb49e..7a3c45d 100644
--- a/services/surfaceflinger/tests/EffectLayer_test.cpp
+++ b/services/surfaceflinger/tests/EffectLayer_test.cpp
@@ -111,6 +111,72 @@
     }
 }
 
+TEST_F(EffectLayerTest, BlurEffectLayerIsVisible) {
+    if (!deviceSupportsBlurs()) GTEST_SKIP();
+    if (!deviceUsesSkiaRenderEngine()) GTEST_SKIP();
+
+    const auto canvasSize = 256;
+
+    sp<SurfaceControl> leftLayer = createColorLayer("Left", Color::BLUE);
+    sp<SurfaceControl> rightLayer = createColorLayer("Right", Color::GREEN);
+    sp<SurfaceControl> blurLayer;
+    const auto leftRect = Rect(0, 0, canvasSize / 2, canvasSize);
+    const auto rightRect = Rect(canvasSize / 2, 0, canvasSize, canvasSize);
+    const auto blurRect = Rect(0, 0, canvasSize, canvasSize);
+
+    asTransaction([&](Transaction& t) {
+        t.setLayer(leftLayer, mLayerZBase + 1);
+        t.reparent(leftLayer, mParentLayer);
+        t.setCrop_legacy(leftLayer, leftRect);
+        t.setLayer(rightLayer, mLayerZBase + 2);
+        t.reparent(rightLayer, mParentLayer);
+        t.setCrop_legacy(rightLayer, rightRect);
+        t.show(leftLayer);
+        t.show(rightLayer);
+    });
+
+    {
+        auto shot = screenshot();
+        shot->expectColor(leftRect, Color::BLUE);
+        shot->expectColor(rightRect, Color::GREEN);
+    }
+
+    ASSERT_NO_FATAL_FAILURE(blurLayer = createColorLayer("BackgroundBlur", Color::TRANSPARENT));
+
+    const auto blurRadius = canvasSize / 2;
+    asTransaction([&](Transaction& t) {
+        t.setLayer(blurLayer, mLayerZBase + 3);
+        t.reparent(blurLayer, mParentLayer);
+        t.setBackgroundBlurRadius(blurLayer, blurRadius);
+        t.setCrop_legacy(blurLayer, blurRect);
+        t.setFrame(blurLayer, blurRect);
+        t.setAlpha(blurLayer, 0.0f);
+        t.show(blurLayer);
+    });
+
+    {
+        auto shot = screenshot();
+
+        const auto stepSize = 1;
+        const auto blurAreaOffset = blurRadius * 0.7f;
+        const auto blurAreaStartX = canvasSize / 2 - blurRadius + blurAreaOffset;
+        const auto blurAreaEndX = canvasSize / 2 + blurRadius - blurAreaOffset;
+        Color previousColor;
+        Color currentColor;
+        for (int y = 0; y < canvasSize; y++) {
+            shot->checkPixel(0, y, /* r = */ 0, /* g = */ 0, /* b = */ 255);
+            previousColor = shot->getPixelColor(0, y);
+            for (int x = blurAreaStartX; x < blurAreaEndX; x += stepSize) {
+                currentColor = shot->getPixelColor(x, y);
+                ASSERT_GT(currentColor.g, previousColor.g);
+                ASSERT_LT(currentColor.b, previousColor.b);
+                ASSERT_EQ(0, currentColor.r);
+            }
+            shot->checkPixel(canvasSize - 1, y, 0, 255, 0);
+        }
+    }
+}
+
 } // namespace android
 
 // TODO(b/129481165): remove the #pragma below and fix conversion issues
diff --git a/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp b/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp
index 52e1a4d..b35eaa9 100644
--- a/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp
+++ b/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp
@@ -43,6 +43,9 @@
 
 protected:
     LayerRenderPathTestHarness mHarness;
+
+    static constexpr int64_t kUsageFlags = BufferUsage::CPU_READ_OFTEN |
+            BufferUsage::CPU_WRITE_OFTEN | BufferUsage::COMPOSER_OVERLAY | BufferUsage::GPU_TEXTURE;
 };
 
 INSTANTIATE_TEST_CASE_P(LayerRenderTypeTransactionTests, LayerRenderTypeTransactionTest,
@@ -377,10 +380,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
 
     ASSERT_NO_FATAL_FAILURE(
             TransactionUtils::fillGraphicBufferColor(buffer, top, Color::TRANSPARENT));
@@ -405,10 +405,7 @@
         shot->expectColor(bottom, Color::BLACK);
     }
 
-    buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                               BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                       BufferUsage::COMPOSER_OVERLAY,
-                               "test");
+    buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
 
     ASSERT_NO_FATAL_FAILURE(TransactionUtils::fillGraphicBufferColor(buffer, top, Color::RED));
     ASSERT_NO_FATAL_FAILURE(
@@ -1015,10 +1012,7 @@
     ASSERT_NO_FATAL_FAILURE(
             layer = createLayer("test", 32, 64, ISurfaceComposerClient::eFXSurfaceBufferState));
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 64, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 64, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 16), Color::BLUE);
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 16, 32, 64), Color::RED);
 
@@ -1341,10 +1335,7 @@
 
     size_t idx = 0;
     for (auto& buffer : buffers) {
-        buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                                   BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                           BufferUsage::COMPOSER_OVERLAY,
-                                   "test");
+        buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
         Color color = colors[idx % colors.size()];
         TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), color);
         idx++;
@@ -1377,10 +1368,7 @@
 
     size_t idx = 0;
     for (auto& buffer : buffers) {
-        buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                                   BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                           BufferUsage::COMPOSER_OVERLAY,
-                                   "test");
+        buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
         Color color = colors[idx % colors.size()];
         TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), color);
         idx++;
@@ -1413,10 +1401,7 @@
 
     size_t idx = 0;
     for (auto& buffer : buffers) {
-        buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                                   BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                           BufferUsage::COMPOSER_OVERLAY,
-                                   "test");
+        buffer = new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
         Color color = colors[idx % colors.size()];
         TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), color);
         idx++;
@@ -1499,10 +1484,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), Color::RED);
 
     sp<Fence> fence;
@@ -1528,10 +1510,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), Color::RED);
 
     sp<Fence> fence = Fence::NO_FENCE;
@@ -1549,10 +1528,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), Color::RED);
 
     Transaction().setBuffer(layer, buffer).setDataspace(layer, ui::Dataspace::UNKNOWN).apply();
@@ -1568,10 +1544,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), Color::RED);
 
     HdrMetadata hdrMetadata;
@@ -1589,10 +1562,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), Color::RED);
 
     Region region;
@@ -1610,10 +1580,7 @@
             layer = createLayer("test", 32, 32, ISurfaceComposerClient::eFXSurfaceBufferState));
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(32, 32, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, 32, 32), Color::RED);
 
     Transaction().setBuffer(layer, buffer).setApi(layer, NATIVE_WINDOW_API_CPU).apply();
diff --git a/services/surfaceflinger/tests/LayerTransactionTest.h b/services/surfaceflinger/tests/LayerTransactionTest.h
index eba2c25..be6665b 100644
--- a/services/surfaceflinger/tests/LayerTransactionTest.h
+++ b/services/surfaceflinger/tests/LayerTransactionTest.h
@@ -21,6 +21,7 @@
 #pragma clang diagnostic ignored "-Wconversion"
 #pragma clang diagnostic ignored "-Wextra"
 
+#include <cutils/properties.h>
 #include <gtest/gtest.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/SurfaceComposerClient.h>
@@ -138,7 +139,7 @@
         sp<GraphicBuffer> buffer =
                 new GraphicBuffer(bufferWidth, bufferHeight, PIXEL_FORMAT_RGBA_8888, 1,
                                   BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                          BufferUsage::COMPOSER_OVERLAY,
+                                          BufferUsage::COMPOSER_OVERLAY | BufferUsage::GPU_TEXTURE,
                                   "test");
         TransactionUtils::fillGraphicBufferColor(buffer, Rect(0, 0, bufferWidth, bufferHeight),
                                                  color);
@@ -207,7 +208,7 @@
         sp<GraphicBuffer> buffer =
                 new GraphicBuffer(bufferWidth, bufferHeight, PIXEL_FORMAT_RGBA_8888, 1,
                                   BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                          BufferUsage::COMPOSER_OVERLAY,
+                                          BufferUsage::COMPOSER_OVERLAY | BufferUsage::GPU_TEXTURE,
                                   "test");
 
         ASSERT_TRUE(bufferWidth % 2 == 0 && bufferHeight % 2 == 0);
@@ -245,6 +246,18 @@
 
     sp<SurfaceComposerClient> mClient;
 
+    bool deviceSupportsBlurs() {
+        char value[PROPERTY_VALUE_MAX];
+        property_get("ro.surface_flinger.supports_background_blur", value, "0");
+        return atoi(value);
+    }
+
+    bool deviceUsesSkiaRenderEngine() {
+        char value[PROPERTY_VALUE_MAX];
+        property_get("debug.renderengine.backend", value, "default");
+        return strstr(value, "skia") != nullptr;
+    }
+
     sp<IBinder> mDisplay;
     uint32_t mDisplayWidth;
     uint32_t mDisplayHeight;
@@ -307,4 +320,4 @@
 } // namespace android
 
 // TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic pop // ignored "-Wconversion -Wextra"
\ No newline at end of file
+#pragma clang diagnostic pop // ignored "-Wconversion -Wextra"
diff --git a/services/surfaceflinger/tests/LayerTypeAndRenderTypeTransaction_test.cpp b/services/surfaceflinger/tests/LayerTypeAndRenderTypeTransaction_test.cpp
index 782a364..67db717 100644
--- a/services/surfaceflinger/tests/LayerTypeAndRenderTypeTransaction_test.cpp
+++ b/services/surfaceflinger/tests/LayerTypeAndRenderTypeTransaction_test.cpp
@@ -18,7 +18,6 @@
 #pragma clang diagnostic push
 #pragma clang diagnostic ignored "-Wconversion"
 
-#include <cutils/properties.h>
 #include <gui/BufferItemConsumer.h>
 #include "TransactionTestHarnesses.h"
 
@@ -40,6 +39,9 @@
 
 protected:
     LayerRenderPathTestHarness mRenderPathHarness;
+
+    static constexpr int64_t kUsageFlags = BufferUsage::CPU_READ_OFTEN |
+            BufferUsage::CPU_WRITE_OFTEN | BufferUsage::COMPOSER_OVERLAY | BufferUsage::GPU_TEXTURE;
 };
 
 ::testing::Environment* const binderEnv =
@@ -301,47 +303,86 @@
     }
 }
 
-TEST_P(LayerTypeAndRenderTypeTransactionTest, SetBackgroundBlurRadius) {
-    char value[PROPERTY_VALUE_MAX];
-    property_get("ro.surface_flinger.supports_background_blur", value, "0");
-    if (!atoi(value)) {
-        // This device doesn't support blurs, no-op.
-        return;
-    }
+TEST_P(LayerTypeAndRenderTypeTransactionTest, SetBackgroundBlurRadiusSimple) {
+    if (!deviceSupportsBlurs()) GTEST_SKIP();
+    if (!deviceUsesSkiaRenderEngine()) GTEST_SKIP();
 
-    auto size = 256;
-    auto center = size / 2;
-    auto blurRadius = 50;
-
-    sp<SurfaceControl> backgroundLayer;
-    ASSERT_NO_FATAL_FAILURE(backgroundLayer = createLayer("background", size, size));
-    ASSERT_NO_FATAL_FAILURE(fillLayerColor(backgroundLayer, Color::GREEN, size, size));
+    const auto canvasSize = 256;
 
     sp<SurfaceControl> leftLayer;
-    ASSERT_NO_FATAL_FAILURE(leftLayer = createLayer("left", size / 2, size));
-    ASSERT_NO_FATAL_FAILURE(fillLayerColor(leftLayer, Color::RED, size / 2, size));
-
+    sp<SurfaceControl> rightLayer;
+    sp<SurfaceControl> greenLayer;
     sp<SurfaceControl> blurLayer;
-    ASSERT_NO_FATAL_FAILURE(blurLayer = createLayer("blur", size, size));
-    ASSERT_NO_FATAL_FAILURE(fillLayerColor(blurLayer, Color::TRANSPARENT, size, size));
+    const auto leftRect = Rect(0, 0, canvasSize / 2, canvasSize);
+    const auto rightRect = Rect(canvasSize / 2, 0, canvasSize, canvasSize);
+    const auto blurRect = Rect(0, 0, canvasSize, canvasSize);
 
-    Transaction().setBackgroundBlurRadius(blurLayer, blurRadius).apply();
+    ASSERT_NO_FATAL_FAILURE(leftLayer =
+                                    createLayer("Left", leftRect.getWidth(), leftRect.getHeight()));
+    ASSERT_NO_FATAL_FAILURE(
+            fillLayerColor(leftLayer, Color::BLUE, leftRect.getWidth(), leftRect.getHeight()));
+    ASSERT_NO_FATAL_FAILURE(greenLayer = createLayer("Green", canvasSize * 2, canvasSize * 2));
+    ASSERT_NO_FATAL_FAILURE(
+            fillLayerColor(greenLayer, Color::GREEN, canvasSize * 2, canvasSize * 2));
+    ASSERT_NO_FATAL_FAILURE(
+            rightLayer = createLayer("Right", rightRect.getWidth(), rightRect.getHeight()));
+    ASSERT_NO_FATAL_FAILURE(
+            fillLayerColor(rightLayer, Color::RED, rightRect.getWidth(), rightRect.getHeight()));
 
-    auto shot = getScreenCapture();
-    // Edges are mixed
-    shot->expectColor(Rect(center - 1, center - 5, center, center + 5), Color{150, 150, 0, 255},
-                      50 /* tolerance */);
-    shot->expectColor(Rect(center, center - 5, center + 1, center + 5), Color{150, 150, 0, 255},
-                      50 /* tolerance */);
+    Transaction()
+            .setLayer(greenLayer, mLayerZBase)
+            .setFrame(leftLayer, {0, 0, canvasSize * 2, canvasSize * 2})
+            .setLayer(leftLayer, mLayerZBase + 1)
+            .setFrame(leftLayer, leftRect)
+            .setLayer(rightLayer, mLayerZBase + 2)
+            .setPosition(rightLayer, rightRect.left, rightRect.top)
+            .setFrame(rightLayer, rightRect)
+            .apply();
+
+    {
+        auto shot = getScreenCapture();
+        shot->expectColor(leftRect, Color::BLUE);
+        shot->expectColor(rightRect, Color::RED);
+    }
+
+    ASSERT_NO_FATAL_FAILURE(blurLayer = createColorLayer("BackgroundBlur", Color::TRANSPARENT));
+
+    const auto blurRadius = canvasSize / 2;
+    Transaction()
+            .setLayer(blurLayer, mLayerZBase + 3)
+            .setBackgroundBlurRadius(blurLayer, blurRadius)
+            .setCrop_legacy(blurLayer, blurRect)
+            .setFrame(blurLayer, blurRect)
+            .setSize(blurLayer, blurRect.getWidth(), blurRect.getHeight())
+            .setAlpha(blurLayer, 0.0f)
+            .apply();
+
+    {
+        auto shot = getScreenCapture();
+
+        const auto stepSize = 1;
+        const auto blurAreaOffset = blurRadius * 0.7f;
+        const auto blurAreaStartX = canvasSize / 2 - blurRadius + blurAreaOffset;
+        const auto blurAreaEndX = canvasSize / 2 + blurRadius - blurAreaOffset;
+        Color previousColor;
+        Color currentColor;
+        for (int y = 0; y < canvasSize; y++) {
+            shot->checkPixel(0, y, /* r = */ 0, /* g = */ 0, /* b = */ 255);
+            previousColor = shot->getPixelColor(0, y);
+            for (int x = blurAreaStartX; x < blurAreaEndX; x += stepSize) {
+                currentColor = shot->getPixelColor(x, y);
+                ASSERT_GT(currentColor.r, previousColor.r);
+                ASSERT_LT(currentColor.b, previousColor.b);
+                ASSERT_EQ(0, currentColor.g);
+            }
+            shot->checkPixel(canvasSize - 1, y, 255, 0, 0);
+        }
+    }
 }
 
 TEST_P(LayerTypeAndRenderTypeTransactionTest, SetBackgroundBlurRadiusOnMultipleLayers) {
-    char value[PROPERTY_VALUE_MAX];
-    property_get("ro.surface_flinger.supports_background_blur", value, "0");
-    if (!atoi(value)) {
-        // This device doesn't support blurs, no-op.
-        return;
-    }
+    if (!deviceSupportsBlurs()) GTEST_SKIP();
+    if (!deviceUsesSkiaRenderEngine()) GTEST_SKIP();
 
     auto size = 256;
     auto center = size / 2;
@@ -378,25 +419,15 @@
 }
 
 TEST_P(LayerTypeAndRenderTypeTransactionTest, SetBackgroundBlurAffectedByParentAlpha) {
-    char value[PROPERTY_VALUE_MAX];
-    property_get("ro.surface_flinger.supports_background_blur", value, "0");
-    if (!atoi(value)) {
-        // This device doesn't support blurs, no-op.
-        return;
-    }
-
-    property_get("debug.renderengine.backend", value, "");
-    if (strcmp(value, "skiagl") != 0) {
-        // This device isn't using Skia render engine, no-op.
-        return;
-    }
+    if (!deviceSupportsBlurs()) GTEST_SKIP();
+    if (!deviceUsesSkiaRenderEngine()) GTEST_SKIP();
 
     sp<SurfaceControl> left;
     sp<SurfaceControl> right;
     sp<SurfaceControl> blur;
     sp<SurfaceControl> blurParent;
 
-    const auto size = 32;
+    const auto size = 256;
     ASSERT_NO_FATAL_FAILURE(left = createLayer("Left", size, size));
     ASSERT_NO_FATAL_FAILURE(fillLayerColor(left, Color::BLUE, size, size));
     ASSERT_NO_FATAL_FAILURE(right = createLayer("Right", size, size));
@@ -493,10 +524,7 @@
     sp<Surface> surface = layer->getSurface();
 
     sp<GraphicBuffer> buffer =
-            new GraphicBuffer(width, height, PIXEL_FORMAT_RGBX_8888, 1,
-                              BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                      BufferUsage::COMPOSER_OVERLAY,
-                              "test");
+            new GraphicBuffer(width, height, PIXEL_FORMAT_RGBX_8888, 1, kUsageFlags, "test");
     ASSERT_NO_FATAL_FAILURE(
             TransactionUtils::fillGraphicBufferColor(buffer, crop, Color::TRANSPARENT));
 
@@ -512,10 +540,7 @@
         shot->expectColor(crop, Color::BLACK);
     }
 
-    buffer = new GraphicBuffer(width, height, PIXEL_FORMAT_RGBA_8888, 1,
-                               BufferUsage::CPU_READ_OFTEN | BufferUsage::CPU_WRITE_OFTEN |
-                                       BufferUsage::COMPOSER_OVERLAY,
-                               "test");
+    buffer = new GraphicBuffer(width, height, PIXEL_FORMAT_RGBA_8888, 1, kUsageFlags, "test");
     ASSERT_NO_FATAL_FAILURE(
             TransactionUtils::fillGraphicBufferColor(buffer, crop, Color::TRANSPARENT));
 
diff --git a/services/surfaceflinger/tests/unittests/CachingTest.cpp b/services/surfaceflinger/tests/unittests/CachingTest.cpp
index 6bc2318..6a7ec9b 100644
--- a/services/surfaceflinger/tests/unittests/CachingTest.cpp
+++ b/services/surfaceflinger/tests/unittests/CachingTest.cpp
@@ -31,7 +31,8 @@
 
 class SlotGenerationTest : public testing::Test {
 protected:
-    BufferStateLayer::HwcSlotGenerator mHwcSlotGenerator;
+    sp<BufferStateLayer::HwcSlotGenerator> mHwcSlotGenerator =
+            sp<BufferStateLayer::HwcSlotGenerator>::make();
     sp<GraphicBuffer> mBuffer1{new GraphicBuffer(1, 1, HAL_PIXEL_FORMAT_RGBA_8888, 1, 0)};
     sp<GraphicBuffer> mBuffer2{new GraphicBuffer(1, 1, HAL_PIXEL_FORMAT_RGBA_8888, 1, 0)};
     sp<GraphicBuffer> mBuffer3{new GraphicBuffer(10, 10, HAL_PIXEL_FORMAT_RGBA_8888, 1, 0)};
@@ -41,7 +42,7 @@
     sp<IBinder> binder = new BBinder();
     // test getting invalid client_cache_id
     client_cache_t id;
-    uint32_t slot = mHwcSlotGenerator.getHwcCacheSlot(id);
+    uint32_t slot = mHwcSlotGenerator->getHwcCacheSlot(id);
     EXPECT_EQ(BufferQueue::INVALID_BUFFER_SLOT, slot);
 }
 
@@ -50,19 +51,19 @@
     client_cache_t id;
     id.token = binder;
     id.id = 0;
-    uint32_t slot = mHwcSlotGenerator.getHwcCacheSlot(id);
+    uint32_t slot = mHwcSlotGenerator->getHwcCacheSlot(id);
     EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - 1, slot);
 
     client_cache_t idB;
     idB.token = binder;
     idB.id = 1;
-    slot = mHwcSlotGenerator.getHwcCacheSlot(idB);
+    slot = mHwcSlotGenerator->getHwcCacheSlot(idB);
     EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - 2, slot);
 
-    slot = mHwcSlotGenerator.getHwcCacheSlot(idB);
+    slot = mHwcSlotGenerator->getHwcCacheSlot(idB);
     EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - 2, slot);
 
-    slot = mHwcSlotGenerator.getHwcCacheSlot(id);
+    slot = mHwcSlotGenerator->getHwcCacheSlot(id);
     EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - 1, slot);
 }
 
@@ -77,12 +78,12 @@
         id.id = cacheId;
         ids.push_back(id);
 
-        uint32_t slot = mHwcSlotGenerator.getHwcCacheSlot(id);
+        uint32_t slot = mHwcSlotGenerator->getHwcCacheSlot(id);
         EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - (i + 1), slot);
         cacheId++;
     }
     for (uint32_t i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
-        uint32_t slot = mHwcSlotGenerator.getHwcCacheSlot(ids[i]);
+        uint32_t slot = mHwcSlotGenerator->getHwcCacheSlot(ids[i]);
         EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - (i + 1), slot);
     }
 
@@ -90,7 +91,7 @@
         client_cache_t id;
         id.token = binder;
         id.id = cacheId;
-        uint32_t slot = mHwcSlotGenerator.getHwcCacheSlot(id);
+        uint32_t slot = mHwcSlotGenerator->getHwcCacheSlot(id);
         EXPECT_EQ(BufferQueue::NUM_BUFFER_SLOTS - (i + 1), slot);
         cacheId++;
     }
diff --git a/services/surfaceflinger/tests/unittests/FrameTimelineTest.cpp b/services/surfaceflinger/tests/unittests/FrameTimelineTest.cpp
index f2cb951..d1385c0 100644
--- a/services/surfaceflinger/tests/unittests/FrameTimelineTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameTimelineTest.cpp
@@ -193,10 +193,12 @@
 TEST_F(FrameTimelineTest, createSurfaceFrameForToken_getOwnerPidReturnsCorrectPid) {
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, sUidOne, sLayerIdOne,
-                                                       sLayerNameOne, sLayerNameOne);
+                                                       sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({}, sPidTwo, sUidOne, sLayerIdOne,
-                                                       sLayerNameOne, sLayerNameOne);
+                                                       sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     EXPECT_EQ(surfaceFrame1->getOwnerPid(), sPidOne);
     EXPECT_EQ(surfaceFrame2->getOwnerPid(), sPidTwo);
 }
@@ -204,7 +206,8 @@
 TEST_F(FrameTimelineTest, createSurfaceFrameForToken_noToken) {
     auto surfaceFrame =
             mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, sUidOne, sLayerIdOne,
-                                                       sLayerNameOne, sLayerNameOne);
+                                                       sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     EXPECT_EQ(surfaceFrame->getPredictionState(), PredictionState::None);
 }
 
@@ -213,7 +216,8 @@
     flushTokens(systemTime() + maxTokenRetentionTime);
     auto surfaceFrame =
             mFrameTimeline->createSurfaceFrameForToken({token1, sInputEventId}, sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     EXPECT_EQ(surfaceFrame->getPredictionState(), PredictionState::Expired);
 }
@@ -222,7 +226,8 @@
     int64_t token1 = mTokenManager->generateTokenForPredictions({10, 20, 30});
     auto surfaceFrame =
             mFrameTimeline->createSurfaceFrameForToken({token1, sInputEventId}, sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     EXPECT_EQ(surfaceFrame->getPredictionState(), PredictionState::Valid);
     EXPECT_EQ(compareTimelineItems(surfaceFrame->getPredictions(), TimelineItem(10, 20, 30)), true);
@@ -233,7 +238,8 @@
     constexpr int32_t inputEventId = 1;
     auto surfaceFrame =
             mFrameTimeline->createSurfaceFrameForToken({token1, inputEventId}, sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     EXPECT_EQ(inputEventId, surfaceFrame->getInputEventId());
 }
@@ -243,7 +249,8 @@
     int64_t token1 = mTokenManager->generateTokenForPredictions({10, 20, 30});
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({token1, sInputEventId}, sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     // Set up the display frame
     mFrameTimeline->setSfWakeUp(token1, 20, Fps::fromPeriodNsecs(11));
@@ -271,11 +278,11 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdTwo, sLayerNameTwo,
-                                                       sLayerNameTwo);
+                                                       sLayerNameTwo, /*isBuffer*/ true);
     mFrameTimeline->setSfWakeUp(sfToken1, 22, Fps::fromPeriodNsecs(11));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame1);
@@ -317,7 +324,8 @@
         auto surfaceFrame =
                 mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken, sInputEventId},
                                                            sPidOne, sUidOne, sLayerIdOne,
-                                                           sLayerNameOne, sLayerNameOne);
+                                                           sLayerNameOne, sLayerNameOne,
+                                                           /*isBuffer*/ true);
         mFrameTimeline->setSfWakeUp(sfToken, 22 + frameTimeFactor, Fps::fromPeriodNsecs(11));
         surfaceFrame->setPresentState(SurfaceFrame::PresentState::Presented);
         mFrameTimeline->addSurfaceFrame(surfaceFrame);
@@ -339,7 +347,7 @@
     auto surfaceFrame =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     mFrameTimeline->setSfWakeUp(sfToken, 22 + frameTimeFactor, Fps::fromPeriodNsecs(11));
     surfaceFrame->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame);
@@ -353,18 +361,20 @@
 }
 
 TEST_F(FrameTimelineTest, surfaceFrameEndTimeAcquireFenceAfterQueue) {
-    auto surfaceFrame = mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, 0, sLayerIdOne,
-                                                                   "acquireFenceAfterQueue",
-                                                                   "acquireFenceAfterQueue");
+    auto surfaceFrame =
+            mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, 0, sLayerIdOne,
+                                                       "acquireFenceAfterQueue",
+                                                       "acquireFenceAfterQueue", /*isBuffer*/ true);
     surfaceFrame->setActualQueueTime(123);
     surfaceFrame->setAcquireFenceTime(456);
     EXPECT_EQ(surfaceFrame->getActuals().endTime, 456);
 }
 
 TEST_F(FrameTimelineTest, surfaceFrameEndTimeAcquireFenceBeforeQueue) {
-    auto surfaceFrame = mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, 0, sLayerIdOne,
-                                                                   "acquireFenceAfterQueue",
-                                                                   "acquireFenceAfterQueue");
+    auto surfaceFrame =
+            mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, 0, sLayerIdOne,
+                                                       "acquireFenceAfterQueue",
+                                                       "acquireFenceAfterQueue", /*isBuffer*/ true);
     surfaceFrame->setActualQueueTime(456);
     surfaceFrame->setAcquireFenceTime(123);
     EXPECT_EQ(surfaceFrame->getActuals().endTime, 456);
@@ -378,7 +388,8 @@
     for (size_t i = 0; i < *maxDisplayFrames + 10; i++) {
         auto surfaceFrame =
                 mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, sUidOne, sLayerIdOne,
-                                                           sLayerNameOne, sLayerNameOne);
+                                                           sLayerNameOne, sLayerNameOne,
+                                                           /*isBuffer*/ true);
         int64_t sfToken = mTokenManager->generateTokenForPredictions({22, 26, 30});
         mFrameTimeline->setSfWakeUp(sfToken, 22, Fps::fromPeriodNsecs(11));
         surfaceFrame->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -394,7 +405,8 @@
     for (size_t i = 0; i < *maxDisplayFrames + 10; i++) {
         auto surfaceFrame =
                 mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, sUidOne, sLayerIdOne,
-                                                           sLayerNameOne, sLayerNameOne);
+                                                           sLayerNameOne, sLayerNameOne,
+                                                           /*isBuffer*/ true);
         int64_t sfToken = mTokenManager->generateTokenForPredictions({22, 26, 30});
         mFrameTimeline->setSfWakeUp(sfToken, 22, Fps::fromPeriodNsecs(11));
         surfaceFrame->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -410,7 +422,8 @@
     for (size_t i = 0; i < *maxDisplayFrames + 10; i++) {
         auto surfaceFrame =
                 mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, sUidOne, sLayerIdOne,
-                                                           sLayerNameOne, sLayerNameOne);
+                                                           sLayerNameOne, sLayerNameOne,
+                                                           /*isBuffer*/ true);
         int64_t sfToken = mTokenManager->generateTokenForPredictions({22, 26, 30});
         mFrameTimeline->setSfWakeUp(sfToken, 22, Fps::fromPeriodNsecs(11));
         surfaceFrame->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -420,7 +433,54 @@
     EXPECT_EQ(getNumberOfDisplayFrames(), *maxDisplayFrames);
 }
 
+TEST_F(FrameTimelineTest, presentFenceSignaled_invalidSignalTime) {
+    Fps refreshRate = Fps::fromPeriodNsecs(11);
+
+    auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
+    int64_t surfaceFrameToken1 = mTokenManager->generateTokenForPredictions({10, 20, 60});
+    int64_t sfToken1 = mTokenManager->generateTokenForPredictions({52, 60, 60});
+
+    auto surfaceFrame1 =
+            mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
+                                                       sUidOne, sLayerIdOne, sLayerNameOne,
+                                                       sLayerNameOne, /*isBuffer*/ true);
+    mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
+    surfaceFrame1->setAcquireFenceTime(20);
+    surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
+    mFrameTimeline->addSurfaceFrame(surfaceFrame1);
+
+    mFrameTimeline->setSfPresent(59, presentFence1);
+    presentFence1->signalForTest(-1);
+    addEmptyDisplayFrame();
+
+    auto displayFrame0 = getDisplayFrame(0);
+    EXPECT_EQ(displayFrame0->getActuals().presentTime, -1);
+    EXPECT_EQ(displayFrame0->getJankType(), JankType::Unknown);
+    EXPECT_EQ(surfaceFrame1->getActuals().presentTime, -1);
+    EXPECT_EQ(surfaceFrame1->getJankType(), JankType::Unknown);
+}
+
 // Tests related to TimeStats
+TEST_F(FrameTimelineTest, presentFenceSignaled_doesNotReportForInvalidTokens) {
+    Fps refreshRate = Fps::fromPeriodNsecs(11);
+    EXPECT_CALL(*mTimeStats, incrementJankyFrames(_)).Times(0);
+    auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
+    int64_t surfaceFrameToken1 = -1;
+    int64_t sfToken1 = -1;
+
+    auto surfaceFrame1 =
+            mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
+                                                       sUidOne, sLayerIdOne, sLayerNameOne,
+                                                       sLayerNameOne, /*isBuffer*/ true);
+    mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
+    surfaceFrame1->setAcquireFenceTime(20);
+    surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
+    mFrameTimeline->addSurfaceFrame(surfaceFrame1);
+    presentFence1->signalForTest(70);
+
+    mFrameTimeline->setSfPresent(59, presentFence1);
+}
+
 TEST_F(FrameTimelineTest, presentFenceSignaled_reportsLongSfCpu) {
     Fps refreshRate = Fps::fromPeriodNsecs(11);
     // Deadline delta is 2ms because, sf's adjusted deadline is 60 - composerTime(3) = 57ms.
@@ -437,7 +497,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
     surfaceFrame1->setAcquireFenceTime(20);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -461,7 +521,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     surfaceFrame1->setAcquireFenceTime(20);
@@ -485,7 +545,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(45);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
 
@@ -511,7 +571,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(50);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
 
@@ -537,7 +597,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(40);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
 
@@ -563,7 +623,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(40);
     mFrameTimeline->setSfWakeUp(sfToken1, 82, refreshRate);
 
@@ -590,7 +650,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(45);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
 
@@ -603,6 +663,43 @@
     EXPECT_EQ(surfaceFrame1->getJankType(), JankType::AppDeadlineMissed);
 }
 
+TEST_F(FrameTimelineTest, presentFenceSignaled_displayFramePredictionExpiredPresentsSurfaceFrame) {
+    Fps refreshRate = Fps::fromPeriodNsecs(11);
+    Fps renderRate = Fps::fromPeriodNsecs(30);
+
+    EXPECT_CALL(*mTimeStats,
+                incrementJankyFrames(TimeStats::JankyFramesInfo{refreshRate, renderRate, sUidOne,
+                                                                sLayerNameOne, JankType::Unknown,
+                                                                -1, -1, 25}));
+    auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
+    int64_t surfaceFrameToken1 = mTokenManager->generateTokenForPredictions({10, 20, 60});
+    int64_t sfToken1 = mTokenManager->generateTokenForPredictions({82, 90, 90});
+
+    auto surfaceFrame1 =
+            mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
+                                                       sUidOne, sLayerIdOne, sLayerNameOne,
+                                                       sLayerNameOne, /*isBuffer*/ true);
+    surfaceFrame1->setAcquireFenceTime(45);
+    // Trigger a prediction expiry
+    flushTokens(systemTime() + maxTokenRetentionTime);
+    mFrameTimeline->setSfWakeUp(sfToken1, 52, refreshRate);
+
+    surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
+    surfaceFrame1->setRenderRate(renderRate);
+    mFrameTimeline->addSurfaceFrame(surfaceFrame1);
+    presentFence1->signalForTest(90);
+    mFrameTimeline->setSfPresent(86, presentFence1);
+
+    auto displayFrame = getDisplayFrame(0);
+    EXPECT_EQ(displayFrame->getJankType(), JankType::Unknown);
+    EXPECT_EQ(displayFrame->getFrameStartMetadata(), FrameStartMetadata::UnknownStart);
+    EXPECT_EQ(displayFrame->getFrameReadyMetadata(), FrameReadyMetadata::UnknownFinish);
+    EXPECT_EQ(displayFrame->getFramePresentMetadata(), FramePresentMetadata::UnknownPresent);
+
+    EXPECT_EQ(surfaceFrame1->getActuals().presentTime, 90);
+    EXPECT_EQ(surfaceFrame1->getJankType(), JankType::Unknown);
+}
+
 /*
  * Tracing Tests
  *
@@ -617,7 +714,8 @@
     int64_t token1 = mTokenManager->generateTokenForPredictions({10, 20, 30});
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({token1, sInputEventId}, sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     // Set up the display frame
     mFrameTimeline->setSfWakeUp(token1, 20, Fps::fromPeriodNsecs(11));
@@ -643,7 +741,8 @@
     int64_t token2 = mTokenManager->generateTokenForPredictions({40, 50, 60});
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({token1, sInputEventId}, sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     // Set up the display frame
     mFrameTimeline->setSfWakeUp(token2, 20, Fps::fromPeriodNsecs(11));
@@ -688,7 +787,8 @@
     int64_t token1 = mTokenManager->generateTokenForPredictions({10, 20, 30});
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({}, sPidOne, sUidOne, sLayerIdOne,
-                                                       sLayerNameOne, sLayerNameOne);
+                                                       sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
 
     // Set up the display frame
     mFrameTimeline->setSfWakeUp(token1, 20, Fps::fromPeriodNsecs(11));
@@ -1001,11 +1101,11 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setActualQueueTime(10);
     surfaceFrame1->setDropTime(15);
 
@@ -1161,7 +1261,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken, /*inputEventId*/ 0},
                                                        sPidOne, sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setActualQueueTime(appEndTime);
     surfaceFrame1->setAcquireFenceTime(appEndTime);
 
@@ -1231,7 +1331,7 @@
     auto surfaceFrame =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     mFrameTimeline->setSfWakeUp(sfToken1, 22, Fps::fromPeriodNsecs(11));
     surfaceFrame->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame);
@@ -1409,7 +1509,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(16);
     mFrameTimeline->setSfWakeUp(sfToken1, 22, Fps::fromPeriodNsecs(11));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1429,7 +1529,7 @@
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken2, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame2->setAcquireFenceTime(36);
     mFrameTimeline->setSfWakeUp(sfToken2, 52, Fps::fromPeriodNsecs(11));
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1489,7 +1589,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(16);
     mFrameTimeline->setSfWakeUp(sfToken1, 22, Fps::fromPeriodNsecs(11));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1509,7 +1609,7 @@
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken2, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame2->setAcquireFenceTime(36);
     mFrameTimeline->setSfWakeUp(sfToken2, 52, Fps::fromPeriodNsecs(11));
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1568,7 +1668,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(40);
     mFrameTimeline->setSfWakeUp(sfToken1, 42, Fps::fromPeriodNsecs(11));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1612,7 +1712,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(26);
     mFrameTimeline->setSfWakeUp(sfToken1, 32, Fps::fromPeriodNsecs(11));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1632,7 +1732,7 @@
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken2, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame2->setAcquireFenceTime(40);
     mFrameTimeline->setSfWakeUp(sfToken2, 43, Fps::fromPeriodNsecs(11));
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1686,7 +1786,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(50);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, Fps::fromPeriodNsecs(30));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1706,7 +1806,7 @@
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken2, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame2->setAcquireFenceTime(84);
     mFrameTimeline->setSfWakeUp(sfToken2, 112, Fps::fromPeriodNsecs(30));
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented, 54);
@@ -1764,7 +1864,7 @@
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken1, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame1->setAcquireFenceTime(50);
     mFrameTimeline->setSfWakeUp(sfToken1, 52, Fps::fromPeriodNsecs(30));
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
@@ -1784,7 +1884,7 @@
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken({surfaceFrameToken2, sInputEventId}, sPidOne,
                                                        sUidOne, sLayerIdOne, sLayerNameOne,
-                                                       sLayerNameOne);
+                                                       sLayerNameOne, /*isBuffer*/ true);
     surfaceFrame2->setAcquireFenceTime(80);
     mFrameTimeline->setSfWakeUp(sfToken2, 82, Fps::fromPeriodNsecs(30));
     // Setting previous latch time to 54, adjusted deadline will be 54 + vsyncTime(30) = 84
@@ -1839,7 +1939,8 @@
 
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame1);
@@ -1854,7 +1955,8 @@
     const auto twoHundredMs = std::chrono::nanoseconds(200ms).count();
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame1);
@@ -1863,7 +1965,8 @@
 
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame2);
@@ -1878,7 +1981,8 @@
     const auto twoHundredMs = std::chrono::nanoseconds(200ms).count();
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame1);
@@ -1887,7 +1991,8 @@
 
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdTwo, sLayerNameTwo, sLayerNameTwo);
+                                                       sLayerIdTwo, sLayerNameTwo, sLayerNameTwo,
+                                                       /*isBuffer*/ true);
     auto presentFence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame2);
@@ -1902,7 +2007,8 @@
     const auto twoHundredMs = std::chrono::nanoseconds(200ms).count();
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame1);
@@ -1911,7 +2017,8 @@
 
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdTwo, sLayerNameTwo, sLayerNameTwo);
+                                                       sLayerIdTwo, sLayerNameTwo, sLayerNameTwo,
+                                                       /*isBuffer*/ true);
     auto presentFence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame2);
@@ -1929,7 +2036,8 @@
     const auto sixHundredMs = std::chrono::nanoseconds(600ms).count();
     auto surfaceFrame1 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame1->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame1);
@@ -1938,7 +2046,8 @@
 
     auto surfaceFrame2 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame2->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame2);
@@ -1947,7 +2056,8 @@
 
     auto surfaceFrame3 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdTwo, sLayerNameTwo, sLayerNameTwo);
+                                                       sLayerIdTwo, sLayerNameTwo, sLayerNameTwo,
+                                                       /*isBuffer*/ true);
     auto presentFence3 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame3->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame3);
@@ -1956,7 +2066,8 @@
 
     auto surfaceFrame4 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence4 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     surfaceFrame4->setPresentState(SurfaceFrame::PresentState::Presented);
     mFrameTimeline->addSurfaceFrame(surfaceFrame4);
@@ -1965,7 +2076,8 @@
 
     auto surfaceFrame5 =
             mFrameTimeline->createSurfaceFrameForToken(FrameTimelineInfo(), sPidOne, sUidOne,
-                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne);
+                                                       sLayerIdOne, sLayerNameOne, sLayerNameOne,
+                                                       /*isBuffer*/ true);
     auto presentFence5 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
     // Dropped frames will be excluded from fps computation
     surfaceFrame5->setPresentState(SurfaceFrame::PresentState::Dropped);
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index e46a270..38e503f 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -64,7 +64,7 @@
         }
     } mExpectDisableVsync{mSchedulerCallback};
 
-    TestableScheduler mScheduler{mConfigs, mSchedulerCallback};
+    TestableScheduler* mScheduler = new TestableScheduler{mConfigs, mSchedulerCallback};
 
     Scheduler::ConnectionHandle mConnectionHandle;
     mock::EventThread* mEventThread;
@@ -85,8 +85,10 @@
     EXPECT_CALL(*mEventThread, createEventConnection(_, _))
             .WillRepeatedly(Return(mEventThreadConnection));
 
-    mConnectionHandle = mScheduler.createConnection(std::move(eventThread));
+    mConnectionHandle = mScheduler->createConnection(std::move(eventThread));
     EXPECT_TRUE(mConnectionHandle);
+
+    mFlinger.resetScheduler(mScheduler);
 }
 
 } // namespace
@@ -94,85 +96,84 @@
 TEST_F(SchedulerTest, invalidConnectionHandle) {
     Scheduler::ConnectionHandle handle;
 
-    const sp<IDisplayEventConnection> connection = mScheduler.createDisplayEventConnection(handle);
+    const sp<IDisplayEventConnection> connection = mScheduler->createDisplayEventConnection(handle);
 
     EXPECT_FALSE(connection);
-    EXPECT_FALSE(mScheduler.getEventConnection(handle));
+    EXPECT_FALSE(mScheduler->getEventConnection(handle));
 
     // The EXPECT_CALLS make sure we don't call the functions on the subsequent event threads.
     EXPECT_CALL(*mEventThread, onHotplugReceived(_, _)).Times(0);
-    mScheduler.onHotplugReceived(handle, PHYSICAL_DISPLAY_ID, false);
+    mScheduler->onHotplugReceived(handle, PHYSICAL_DISPLAY_ID, false);
 
     EXPECT_CALL(*mEventThread, onScreenAcquired()).Times(0);
-    mScheduler.onScreenAcquired(handle);
+    mScheduler->onScreenAcquired(handle);
 
     EXPECT_CALL(*mEventThread, onScreenReleased()).Times(0);
-    mScheduler.onScreenReleased(handle);
+    mScheduler->onScreenReleased(handle);
 
     std::string output;
     EXPECT_CALL(*mEventThread, dump(_)).Times(0);
-    mScheduler.dump(handle, output);
+    mScheduler->dump(handle, output);
     EXPECT_TRUE(output.empty());
 
     EXPECT_CALL(*mEventThread, setDuration(10ns, 20ns)).Times(0);
-    mScheduler.setDuration(handle, 10ns, 20ns);
+    mScheduler->setDuration(handle, 10ns, 20ns);
 }
 
 TEST_F(SchedulerTest, validConnectionHandle) {
     const sp<IDisplayEventConnection> connection =
-            mScheduler.createDisplayEventConnection(mConnectionHandle);
+            mScheduler->createDisplayEventConnection(mConnectionHandle);
 
     ASSERT_EQ(mEventThreadConnection, connection);
-    EXPECT_TRUE(mScheduler.getEventConnection(mConnectionHandle));
+    EXPECT_TRUE(mScheduler->getEventConnection(mConnectionHandle));
 
     EXPECT_CALL(*mEventThread, onHotplugReceived(PHYSICAL_DISPLAY_ID, false)).Times(1);
-    mScheduler.onHotplugReceived(mConnectionHandle, PHYSICAL_DISPLAY_ID, false);
+    mScheduler->onHotplugReceived(mConnectionHandle, PHYSICAL_DISPLAY_ID, false);
 
     EXPECT_CALL(*mEventThread, onScreenAcquired()).Times(1);
-    mScheduler.onScreenAcquired(mConnectionHandle);
+    mScheduler->onScreenAcquired(mConnectionHandle);
 
     EXPECT_CALL(*mEventThread, onScreenReleased()).Times(1);
-    mScheduler.onScreenReleased(mConnectionHandle);
+    mScheduler->onScreenReleased(mConnectionHandle);
 
     std::string output("dump");
     EXPECT_CALL(*mEventThread, dump(output)).Times(1);
-    mScheduler.dump(mConnectionHandle, output);
+    mScheduler->dump(mConnectionHandle, output);
     EXPECT_FALSE(output.empty());
 
     EXPECT_CALL(*mEventThread, setDuration(10ns, 20ns)).Times(1);
-    mScheduler.setDuration(mConnectionHandle, 10ns, 20ns);
+    mScheduler->setDuration(mConnectionHandle, 10ns, 20ns);
 
     static constexpr size_t kEventConnections = 5;
     EXPECT_CALL(*mEventThread, getEventThreadConnectionCount()).WillOnce(Return(kEventConnections));
-    EXPECT_EQ(kEventConnections, mScheduler.getEventThreadConnectionCount(mConnectionHandle));
+    EXPECT_EQ(kEventConnections, mScheduler->getEventThreadConnectionCount(mConnectionHandle));
 }
 
 TEST_F(SchedulerTest, noLayerHistory) {
     // Layer history should not be created if there is a single config.
-    ASSERT_FALSE(mScheduler.hasLayerHistory());
+    ASSERT_FALSE(mScheduler->hasLayerHistory());
 
-    TestableSurfaceFlinger flinger;
-    mock::MockLayer layer(flinger.flinger());
+    sp<mock::MockLayer> layer = sp<mock::MockLayer>::make(mFlinger.flinger());
 
     // Content detection should be no-op.
-    mScheduler.registerLayer(&layer);
-    mScheduler.recordLayerHistory(&layer, 0, LayerHistory::LayerUpdateType::Buffer);
+    mScheduler->registerLayer(layer.get());
+    mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
 
     constexpr bool kPowerStateNormal = true;
-    mScheduler.setDisplayPowerState(kPowerStateNormal);
+    mScheduler->setDisplayPowerState(kPowerStateNormal);
 
     constexpr uint32_t kDisplayArea = 999'999;
-    mScheduler.onPrimaryDisplayAreaChanged(kDisplayArea);
+    mScheduler->onPrimaryDisplayAreaChanged(kDisplayArea);
 
     EXPECT_CALL(mSchedulerCallback, changeRefreshRate(_, _)).Times(0);
-    mScheduler.chooseRefreshRateForContent();
+    mScheduler->chooseRefreshRateForContent();
 }
 
 TEST_F(SchedulerTest, testDispatchCachedReportedMode) {
     // If the optional fields are cleared, the function should return before
     // onModeChange is called.
-    mScheduler.clearOptionalFieldsInFeatures();
-    EXPECT_NO_FATAL_FAILURE(mScheduler.dispatchCachedReportedMode());
+    mScheduler->clearOptionalFieldsInFeatures();
+    EXPECT_NO_FATAL_FAILURE(mScheduler->dispatchCachedReportedMode());
     EXPECT_CALL(*mEventThread, onModeChanged(_, _, _)).Times(0);
 }
 
@@ -183,9 +184,9 @@
     // If the handle is incorrect, the function should return before
     // onModeChange is called.
     Scheduler::ConnectionHandle invalidHandle = {.id = 123};
-    EXPECT_NO_FATAL_FAILURE(mScheduler.onNonPrimaryDisplayModeChanged(invalidHandle,
-                                                                      PHYSICAL_DISPLAY_ID, modeId,
-                                                                      vsyncPeriod));
+    EXPECT_NO_FATAL_FAILURE(mScheduler->onNonPrimaryDisplayModeChanged(invalidHandle,
+                                                                       PHYSICAL_DISPLAY_ID, modeId,
+                                                                       vsyncPeriod));
     EXPECT_CALL(*mEventThread, onModeChanged(_, _, _)).Times(0);
 }
 
diff --git a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
index 363bd80..623a5e0 100644
--- a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
@@ -116,6 +116,7 @@
         const auto surfaceFrame = layer->mCurrentState.bufferlessSurfaceFramesTX.at(/*token*/ 1);
         commitTransaction(layer.get());
         EXPECT_EQ(1, surfaceFrame->getToken());
+        EXPECT_EQ(false, surfaceFrame->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, surfaceFrame->getPresentState());
     }
 
@@ -139,6 +140,7 @@
         layer->updateTexImage(computeVisisbleRegions, 15, 0);
 
         EXPECT_EQ(1, surfaceFrame->getToken());
+        EXPECT_EQ(true, surfaceFrame->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, surfaceFrame->getPresentState());
     }
 
@@ -172,12 +174,14 @@
         layer->updateTexImage(computeVisisbleRegions, 15, 0);
 
         EXPECT_EQ(1, droppedSurfaceFrame->getToken());
+        EXPECT_EQ(true, droppedSurfaceFrame->getIsBuffer());
         EXPECT_EQ(PresentState::Dropped, droppedSurfaceFrame->getPresentState());
         EXPECT_EQ(0u, droppedSurfaceFrame->getActuals().endTime);
         auto dropTime = droppedSurfaceFrame->getDropTime();
         EXPECT_TRUE(dropTime > start && dropTime < end);
 
         EXPECT_EQ(1, presentedSurfaceFrame->getToken());
+        EXPECT_EQ(true, presentedSurfaceFrame->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, presentedSurfaceFrame->getPresentState());
     }
 
@@ -204,6 +208,7 @@
 
         commitTransaction(layer.get());
         EXPECT_EQ(1, surfaceFrame->getToken());
+        EXPECT_EQ(true, surfaceFrame->getIsBuffer());
         // Buffers are presented only at latch time.
         EXPECT_EQ(PresentState::Unknown, surfaceFrame->getPresentState());
 
@@ -260,12 +265,15 @@
         commitTransaction(layer.get());
 
         EXPECT_EQ(1, bufferlessSurfaceFrame1->getToken());
+        EXPECT_EQ(false, bufferlessSurfaceFrame1->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, bufferlessSurfaceFrame1->getPresentState());
 
         EXPECT_EQ(4, bufferlessSurfaceFrame2->getToken());
+        EXPECT_EQ(false, bufferlessSurfaceFrame2->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, bufferlessSurfaceFrame2->getPresentState());
 
         EXPECT_EQ(3, bufferSurfaceFrameTX->getToken());
+        EXPECT_EQ(true, bufferSurfaceFrameTX->getIsBuffer());
         // Buffers are presented only at latch time.
         EXPECT_EQ(PresentState::Unknown, bufferSurfaceFrameTX->getPresentState());
 
@@ -297,10 +305,12 @@
         commitTransaction(layer.get());
 
         EXPECT_EQ(1, bufferlessSurfaceFrame1->getToken());
+        EXPECT_EQ(false, bufferlessSurfaceFrame1->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, bufferlessSurfaceFrame1->getPresentState());
         EXPECT_EQ(10, bufferlessSurfaceFrame1->getActuals().endTime);
 
         EXPECT_EQ(2, bufferlessSurfaceFrame2->getToken());
+        EXPECT_EQ(false, bufferlessSurfaceFrame2->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, bufferlessSurfaceFrame2->getPresentState());
         EXPECT_EQ(12, bufferlessSurfaceFrame2->getActuals().endTime);
     }
@@ -327,9 +337,11 @@
         commitTransaction(layer.get());
 
         EXPECT_EQ(1, bufferlessSurfaceFrame1->getToken());
+        EXPECT_EQ(false, bufferlessSurfaceFrame1->getIsBuffer());
         EXPECT_EQ(PresentState::Unknown, bufferlessSurfaceFrame1->getPresentState());
 
         EXPECT_EQ(1, bufferlessSurfaceFrame2->getToken());
+        EXPECT_EQ(false, bufferlessSurfaceFrame2->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, bufferlessSurfaceFrame2->getPresentState());
         EXPECT_EQ(12, bufferlessSurfaceFrame2->getActuals().endTime);
     }
@@ -410,20 +422,85 @@
         layer->updateTexImage(computeVisisbleRegions, 15, 0);
 
         EXPECT_EQ(1, droppedSurfaceFrame1->getToken());
+        EXPECT_EQ(true, droppedSurfaceFrame1->getIsBuffer());
         EXPECT_EQ(PresentState::Dropped, droppedSurfaceFrame1->getPresentState());
         EXPECT_EQ(0u, droppedSurfaceFrame1->getActuals().endTime);
         auto dropTime1 = droppedSurfaceFrame1->getDropTime();
         EXPECT_TRUE(dropTime1 > dropStartTime1 && dropTime1 < dropEndTime1);
 
         EXPECT_EQ(FrameTimelineInfo::INVALID_VSYNC_ID, droppedSurfaceFrame2->getToken());
+        EXPECT_EQ(true, droppedSurfaceFrame2->getIsBuffer());
         EXPECT_EQ(PresentState::Dropped, droppedSurfaceFrame2->getPresentState());
         EXPECT_EQ(0u, droppedSurfaceFrame2->getActuals().endTime);
         auto dropTime2 = droppedSurfaceFrame2->getDropTime();
         EXPECT_TRUE(dropTime2 > dropStartTime2 && dropTime2 < dropEndTime2);
 
         EXPECT_EQ(2, presentedSurfaceFrame->getToken());
+        EXPECT_EQ(true, presentedSurfaceFrame->getIsBuffer());
         EXPECT_EQ(PresentState::Presented, presentedSurfaceFrame->getPresentState());
     }
+
+    void MultipleCommitsBeforeLatch() {
+        sp<BufferStateLayer> layer = createBufferStateLayer();
+        uint32_t surfaceFramesPendingClassification = 0;
+        std::vector<std::shared_ptr<frametimeline::SurfaceFrame>> bufferlessSurfaceFrames;
+        for (int i = 0; i < 10; i += 2) {
+            sp<Fence> fence1(new Fence());
+            sp<GraphicBuffer> buffer1{new GraphicBuffer(1, 1, HAL_PIXEL_FORMAT_RGBA_8888, 1, 0)};
+            layer->setBuffer(buffer1, fence1, 10, 20, false, mClientCache, 1, std::nullopt,
+                             {/*vsyncId*/ 1, /*inputEventId*/ 0});
+            layer->setFrameTimelineVsyncForBufferlessTransaction({/*vsyncId*/ 2,
+                                                                  /*inputEventId*/ 0},
+                                                                 10);
+            ASSERT_NE(nullptr, layer->mCurrentState.bufferSurfaceFrameTX);
+            EXPECT_EQ(1u, layer->mCurrentState.bufferlessSurfaceFramesTX.size());
+            auto& bufferlessSurfaceFrame =
+                    layer->mCurrentState.bufferlessSurfaceFramesTX.at(/*vsyncId*/ 2);
+            bufferlessSurfaceFrames.push_back(bufferlessSurfaceFrame);
+
+            commitTransaction(layer.get());
+            surfaceFramesPendingClassification += 2;
+            EXPECT_EQ(surfaceFramesPendingClassification,
+                      layer->mPendingJankClassifications.size());
+        }
+
+        auto presentedBufferSurfaceFrame = layer->mDrawingState.bufferSurfaceFrameTX;
+        bool computeVisisbleRegions;
+        layer->updateTexImage(computeVisisbleRegions, 15, 0);
+        // BufferlessSurfaceFrames are immediately set to presented and added to the DisplayFrame.
+        // Since we don't have access to DisplayFrame here, trigger an onPresent directly.
+        for (auto& surfaceFrame : bufferlessSurfaceFrames) {
+            surfaceFrame->onPresent(20, JankType::None, Fps::fromPeriodNsecs(11),
+                                    /*displayDeadlineDelta*/ 0, /*displayPresentDelta*/ 0);
+        }
+        presentedBufferSurfaceFrame->onPresent(20, JankType::None, Fps::fromPeriodNsecs(11),
+                                               /*displayDeadlineDelta*/ 0,
+                                               /*displayPresentDelta*/ 0);
+
+        // There should be 10 bufferlessSurfaceFrames and 1 bufferSurfaceFrame
+        ASSERT_EQ(10u, surfaceFramesPendingClassification);
+        ASSERT_EQ(surfaceFramesPendingClassification, layer->mPendingJankClassifications.size());
+
+        // For the frames upto 8, the bufferSurfaceFrame should have been dropped while the
+        // bufferlessSurfaceFrame presented
+        for (uint32_t i = 0; i < 8; i += 2) {
+            auto& bufferSurfaceFrame = layer->mPendingJankClassifications[i];
+            auto& bufferlessSurfaceFrame = layer->mPendingJankClassifications[i + 1];
+            EXPECT_EQ(bufferSurfaceFrame->getPresentState(), PresentState::Dropped);
+            EXPECT_EQ(bufferlessSurfaceFrame->getPresentState(), PresentState::Presented);
+        }
+        {
+            auto& bufferSurfaceFrame = layer->mPendingJankClassifications[8u];
+            auto& bufferlessSurfaceFrame = layer->mPendingJankClassifications[9u];
+            EXPECT_EQ(bufferSurfaceFrame->getPresentState(), PresentState::Presented);
+            EXPECT_EQ(bufferlessSurfaceFrame->getPresentState(), PresentState::Presented);
+        }
+
+        layer->releasePendingBuffer(25);
+
+        // There shouldn't be any pending classifications. Everything should have been cleared.
+        EXPECT_EQ(0u, layer->mPendingJankClassifications.size());
+    }
 };
 
 TEST_F(TransactionSurfaceFrameTest, PresentedBufferlessSurfaceFrame) {
@@ -469,4 +546,8 @@
     BufferSurfaceFrame_ReplaceValidTokenBufferWithInvalidTokenBuffer();
 }
 
+TEST_F(TransactionSurfaceFrameTest, MultipleCommitsBeforeLatch) {
+    MultipleCommitsBeforeLatch();
+}
+
 } // namespace android
\ No newline at end of file
diff --git a/services/surfaceflinger/tests/utils/ScreenshotUtils.h b/services/surfaceflinger/tests/utils/ScreenshotUtils.h
index 2fefa45..7efd730 100644
--- a/services/surfaceflinger/tests/utils/ScreenshotUtils.h
+++ b/services/surfaceflinger/tests/utils/ScreenshotUtils.h
@@ -159,6 +159,15 @@
         }
     }
 
+    Color getPixelColor(uint32_t x, uint32_t y) {
+        if (!mOutBuffer || mOutBuffer->getPixelFormat() != HAL_PIXEL_FORMAT_RGBA_8888) {
+            return {0, 0, 0, 0};
+        }
+
+        const uint8_t* pixel = mPixels + (4 * (y * mOutBuffer->getStride() + x));
+        return {pixel[0], pixel[1], pixel[2], pixel[3]};
+    }
+
     void expectFGColor(uint32_t x, uint32_t y) { checkPixel(x, y, 195, 63, 63); }
 
     void expectBGColor(uint32_t x, uint32_t y) { checkPixel(x, y, 63, 63, 195); }
diff --git a/vulkan/vkjson/vkjson_info.cc b/vulkan/vkjson/vkjson_info.cc
deleted file mode 100644
index 3c4b08b..0000000
--- a/vulkan/vkjson/vkjson_info.cc
+++ /dev/null
@@ -1,184 +0,0 @@
-///////////////////////////////////////////////////////////////////////////////
-//
-// Copyright (c) 2015-2016 The Khronos Group Inc.
-// Copyright (c) 2015-2016 Valve Corporation
-// Copyright (c) 2015-2016 LunarG, Inc.
-// Copyright (c) 2015-2016 Google, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef VK_PROTOTYPES
-#define VK_PROTOTYPES
-#endif
-
-#include "vkjson.h"
-
-#include <assert.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <iostream>
-#include <vector>
-
-const uint32_t unsignedNegOne = (uint32_t)(-1);
-
-struct Options {
-  bool instance = false;
-  uint32_t device_index = unsignedNegOne;
-  std::string device_name;
-  std::string output_file;
-};
-
-bool ParseOptions(int argc, char* argv[], Options* options) {
-  for (int i = 1; i < argc; ++i) {
-    std::string arg(argv[i]);
-    if (arg == "--instance" || arg == "-i") {
-      options->instance = true;
-    } else if (arg == "--first" || arg == "-f") {
-      options->device_index = 0;
-    } else {
-      ++i;
-      if (i >= argc) {
-        std::cerr << "Missing parameter after: " << arg << std::endl;
-        return false;
-      }
-      std::string arg2(argv[i]);
-      if (arg == "--device-index" || arg == "-d") {
-        int result = sscanf(arg2.c_str(), "%u", &options->device_index);
-        if (result != 1) {
-          options->device_index = static_cast<uint32_t>(-1);
-          std::cerr << "Unable to parse index: " << arg2 << std::endl;
-          return false;
-        }
-      } else if (arg == "--device-name" || arg == "-n") {
-        options->device_name = arg2;
-      } else if (arg == "--output" || arg == "-o") {
-        options->output_file = arg2;
-      } else {
-        std::cerr << "Unknown argument: " << arg << std::endl;
-        return false;
-      }
-    }
-  }
-  if (options->instance && (options->device_index != unsignedNegOne ||
-                            !options->device_name.empty())) {
-    std::cerr << "Specifying a specific device is incompatible with dumping "
-                 "the whole instance." << std::endl;
-    return false;
-  }
-  if (options->device_index != unsignedNegOne && !options->device_name.empty()) {
-    std::cerr << "Must specify only one of device index and device name."
-              << std::endl;
-    return false;
-  }
-  if (options->instance && options->output_file.empty()) {
-    std::cerr << "Must specify an output file when dumping the whole instance."
-              << std::endl;
-    return false;
-  }
-  if (!options->output_file.empty() && !options->instance &&
-      options->device_index == unsignedNegOne && options->device_name.empty()) {
-    std::cerr << "Must specify instance, device index, or device name when "
-                 "specifying "
-                 "output file." << std::endl;
-    return false;
-  }
-  return true;
-}
-
-bool Dump(const VkJsonInstance& instance, const Options& options) {
-  const VkJsonDevice* out_device = nullptr;
-  if (options.device_index != unsignedNegOne) {
-    if (static_cast<uint32_t>(options.device_index) >=
-        instance.devices.size()) {
-      std::cerr << "Error: device " << options.device_index
-                << " requested but only " << instance.devices.size()
-                << " devices found." << std::endl;
-      return false;
-    }
-    out_device = &instance.devices[options.device_index];
-  } else if (!options.device_name.empty()) {
-    for (const auto& device : instance.devices) {
-      if (device.properties.deviceName == options.device_name) {
-        out_device = &device;
-      }
-    }
-    if (!out_device) {
-      std::cerr << "Error: device '" << options.device_name
-                << "' requested but not found." << std::endl;
-      return false;
-    }
-  }
-
-  std::string output_file;
-  if (options.output_file.empty()) {
-    assert(out_device);
-#if defined(ANDROID)
-    output_file.assign("/sdcard/Android/" + std::string(out_device->properties.deviceName));
-#else
-    output_file.assign(out_device->properties.deviceName);
-#endif
-    output_file.append(".json");
-  } else {
-    output_file = options.output_file;
-  }
-  FILE* file = nullptr;
-  if (output_file == "-") {
-    file = stdout;
-  } else {
-    file = fopen(output_file.c_str(), "w");
-    if (!file) {
-      std::cerr << "Unable to open file " << output_file << "." << std::endl;
-      return false;
-    }
-  }
-
-  std::string json = out_device ? VkJsonDeviceToJson(*out_device)
-                                : VkJsonInstanceToJson(instance);
-  fwrite(json.data(), 1, json.size(), file);
-  fputc('\n', file);
-
-  if (output_file != "-") {
-    fclose(file);
-    std::cout << "Wrote file " << output_file;
-    if (out_device)
-      std::cout << " for device " << out_device->properties.deviceName;
-    std::cout << "." << std::endl;
-  }
-  return true;
-}
-
-int main(int argc, char* argv[]) {
-#if defined(ANDROID)
-  int vulkanSupport = InitVulkan();
-  if (vulkanSupport == 0)
-    return 1;
-#endif
-  Options options;
-  if (!ParseOptions(argc, argv, &options))
-    return 1;
-
-  VkJsonInstance instance = VkJsonGetInstance();
-  if (options.instance || options.device_index != unsignedNegOne ||
-      !options.device_name.empty()) {
-    Dump(instance, options);
-  } else {
-    for (uint32_t i = 0, n = static_cast<uint32_t>(instance.devices.size()); i < n; i++) {
-      options.device_index = i;
-      Dump(instance, options);
-    }
-  }
-
-  return 0;
-}
diff --git a/vulkan/vkjson/vkjson_unittest.cc b/vulkan/vkjson/vkjson_unittest.cc
deleted file mode 100644
index de765cd..0000000
--- a/vulkan/vkjson/vkjson_unittest.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-///////////////////////////////////////////////////////////////////////////////
-//
-// Copyright (c) 2015-2016 The Khronos Group Inc.
-// Copyright (c) 2015-2016 Valve Corporation
-// Copyright (c) 2015-2016 LunarG, Inc.
-// Copyright (c) 2015-2016 Google, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-///////////////////////////////////////////////////////////////////////////////
-
-#include "vkjson.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <iostream>
-
-#define EXPECT(X) if (!(X)) \
-  ReportFailure(__FILE__, __LINE__, #X);
-
-#define ASSERT(X) if (!(X)) { \
-  ReportFailure(__FILE__, __LINE__, #X); \
-  return 2; \
-}
-
-int g_failures;
-
-void ReportFailure(const char* file, int line, const char* assertion) {
-  std::cout << file << ":" << line << ": \"" << assertion << "\" failed."
-            << std::endl;
-  ++g_failures;
-}
-
-int main(int argc, char* argv[]) {
-  std::string errors;
-  bool result = false;
-
-  VkJsonInstance instance;
-  instance.devices.resize(1);
-  VkJsonDevice& device = instance.devices[0];
-
-  const char name[] = "Test device";
-  memcpy(device.properties.deviceName, name, sizeof(name));
-  device.properties.limits.maxImageDimension1D = 3;
-  device.properties.limits.maxSamplerLodBias = 3.5f;
-  device.properties.limits.bufferImageGranularity = 0x1ffffffffull;
-  device.properties.limits.maxViewportDimensions[0] = 1;
-  device.properties.limits.maxViewportDimensions[1] = 2;
-  VkFormatProperties format_props = {
-      VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT,
-      VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT,
-      VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT};
-  device.formats.insert(std::make_pair(VK_FORMAT_R8_UNORM, format_props));
-  device.formats.insert(std::make_pair(VK_FORMAT_R8G8_UNORM, format_props));
-
-  std::string json = VkJsonInstanceToJson(instance);
-  std::cout << json << std::endl;
-
-  VkJsonInstance instance2;
-  result = VkJsonInstanceFromJson(json, &instance2, &errors);
-  EXPECT(result);
-  if (!result)
-    std::cout << "Error: " << errors << std::endl;
-  const VkJsonDevice& device2 = instance2.devices.at(0);
-
-  EXPECT(!memcmp(&device.properties, &device2.properties,
-                 sizeof(device.properties)));
-  for (auto& kv : device.formats) {
-    auto it = device2.formats.find(kv.first);
-    EXPECT(it != device2.formats.end());
-    EXPECT(!memcmp(&kv.second, &it->second, sizeof(kv.second)));
-  }
-
-  VkImageFormatProperties props = {};
-  json = VkJsonImageFormatPropertiesToJson(props);
-  VkImageFormatProperties props2 = {};
-  result = VkJsonImageFormatPropertiesFromJson(json, &props2, &errors);
-  EXPECT(result);
-  if (!result)
-    std::cout << "Error: " << errors << std::endl;
-
-  EXPECT(!memcmp(&props, &props2, sizeof(props)));
-
-  if (g_failures) {
-    std::cout << g_failures << " failures." << std::endl;
-    return 1;
-  } else {
-    std::cout << "Success." << std::endl;
-    return 0;
-  }
-}