Merge "Camera API1: Ignore the video size change during recording" into lmp-dev
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
new file mode 100644
index 0000000..97847a0
--- /dev/null
+++ b/include/media/AudioResamplerPublic.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_PUBLIC_H
+#define ANDROID_AUDIO_RESAMPLER_PUBLIC_H
+
+// AUDIO_RESAMPLER_DOWN_RATIO_MAX is the maximum ratio between the original
+// audio sample rate and the target rate when downsampling,
+// as permitted in the audio framework, e.g. AudioTrack and AudioFlinger.
+// In practice, it is not recommended to downsample more than 6:1
+// for best audio quality, even though the audio framework permits a larger
+// downsampling ratio.
+// TODO: replace with an API
+#define AUDIO_RESAMPLER_DOWN_RATIO_MAX 256
+
+#endif // ANDROID_AUDIO_RESAMPLER_PUBLIC_H
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index 29315ce..ab7a4b8 100644
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -45,7 +45,7 @@
         void getSupportedProfileLevels(Vector<ProfileLevel> *profileLevels) const;
         void getSupportedColorFormats(Vector<uint32_t> *colorFormats) const;
         uint32_t getFlags() const;
-        const sp<AMessage> &getDetails() const;
+        const sp<AMessage> getDetails() const;
 
     private:
         Vector<ProfileLevel> mProfileLevels;
@@ -67,7 +67,7 @@
     bool isEncoder() const;
     bool hasQuirk(const char *name) const;
     void getSupportedMimes(Vector<AString> *mimes) const;
-    const sp<Capabilities> &getCapabilitiesFor(const char *mime) const;
+    const sp<Capabilities> getCapabilitiesFor(const char *mime) const;
     const char *getCodecName() const;
 
     /**
@@ -107,6 +107,7 @@
     status_t initializeCapabilities(const CodecCapabilities &caps);
     void addDetail(const AString &key, const AString &value);
     void addFeature(const AString &key, int32_t value);
+    void removeMime(const char *mime);
     void complete();
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecInfo);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index b5c9125..d87e6f5 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -28,6 +28,7 @@
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
 #include <media/IAudioFlinger.h>
+#include <media/AudioResamplerPublic.h>
 
 #define WAIT_PERIOD_MS                  10
 #define WAIT_STREAM_END_TIMEOUT_SEC     120
@@ -82,7 +83,7 @@
     }
 
     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
-            afFrameCount * minBufCount * sampleRate / afSampleRate;
+            afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
     // The formula above should always produce a non-zero value, but return an error
     // in the unlikely event that it does not, as that's part of the API contract.
     if (*frameCount == 0) {
@@ -646,8 +647,7 @@
     if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
         return NO_INIT;
     }
-    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
-    if (rate == 0 || rate > afSamplingRate*2 ) {
+    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
         return BAD_VALUE;
     }
 
@@ -1002,7 +1002,7 @@
             minBufCount = nBuffering;
         }
 
-        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
+        size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
         ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
                 ", afLatency=%d",
                 minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 7900eae..446c582 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -46,7 +46,7 @@
     return mFlags;
 }
 
-const sp<AMessage> &MediaCodecInfo::Capabilities::getDetails() const {
+const sp<AMessage> MediaCodecInfo::Capabilities::getDetails() const {
     return mDetails;
 }
 
@@ -121,7 +121,7 @@
     }
 }
 
-const sp<MediaCodecInfo::Capabilities> &
+const sp<MediaCodecInfo::Capabilities>
 MediaCodecInfo::getCapabilitiesFor(const char *mime) const {
     ssize_t ix = getCapabilityIndex(mime);
     if (ix >= 0) {
@@ -206,6 +206,14 @@
     return OK;
 }
 
+void MediaCodecInfo::removeMime(const char *mime) {
+    ssize_t ix = getCapabilityIndex(mime);
+    if (ix >= 0) {
+        mCaps.removeItemsAt(ix);
+        // mCurrentCaps will be removed when completed
+    }
+}
+
 status_t MediaCodecInfo::initializeCapabilities(const CodecCapabilities &caps) {
     mCurrentCaps->mProfileLevels.clear();
     mCurrentCaps->mColorFormats.clear();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 55fd708..9a4e811 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -901,7 +901,11 @@
                 ALOGE("Received error from %s decoder, aborting playback.",
                      audio ? "audio" : "video");
 
-                mRenderer->queueEOS(audio, UNKNOWN_ERROR);
+                status_t err;
+                if (!msg->findInt32("err", &err)) {
+                    err = UNKNOWN_ERROR;
+                }
+                mRenderer->queueEOS(audio, err);
             } else if (what == Decoder::kWhatDrainThisBuffer) {
                 renderBuffer(audio, msg);
             } else {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 7f8b7f5..2f2a0b3 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -482,11 +482,21 @@
     }
 
     mCurrentInfo = new MediaCodecInfo(name, encoder, type);
-    mCodecInfos.push_back(mCurrentInfo);
-    return initializeCapabilities(type);
+    // The next step involves trying to load the codec, which may
+    // fail.  Only list the codec if this succeeds.
+    // However, keep mCurrentInfo object around until parsing
+    // of full codec info is completed.
+    if (initializeCapabilities(type) == OK) {
+        mCodecInfos.push_back(mCurrentInfo);
+    }
+    return OK;
 }
 
 status_t MediaCodecList::initializeCapabilities(const char *type) {
+    if (type == NULL) {
+        return OK;
+    }
+
     ALOGV("initializeCapabilities %s:%s",
             mCurrentInfo->mName.c_str(), type);
 
@@ -553,10 +563,16 @@
     }
 
     status_t ret = mCurrentInfo->addMime(name);
-    if (ret == OK) {
-        ret = initializeCapabilities(name);
+    if (ret != OK) {
+        return ret;
     }
-    return ret;
+
+    // The next step involves trying to load the codec, which may
+    // fail.  Handle this gracefully (by not reporting such mime).
+    if (initializeCapabilities(name) != OK) {
+        mCurrentInfo->removeMime(name);
+    }
+    return OK;
 }
 
 // legacy method for non-advanced codecs
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index 159ab70..0eeb201 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -393,7 +393,7 @@
     mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction)
             * phaseWrapLimit / oldPhaseWrapLimit;
     mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case.
-    mPhaseIncrement = static_cast<uint32_t>(static_cast<double>(phaseWrapLimit)
+    mPhaseIncrement = static_cast<uint32_t>(static_cast<uint64_t>(phaseWrapLimit)
             * inSampleRate / mSampleRate);
 
     // determine which resampler to use
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index ec3d731..365f271 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -44,6 +44,8 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
 namespace android {
 
 // ----------------------------------------------------------------------------
@@ -1391,7 +1393,8 @@
     // and sample format changes for effects.
     // Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT
     // (4 bytes frame size)
-    const size_t frameSize = audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * FCC_2;
+    const size_t frameSize =
+            audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * min(FCC_2, thread->channelCount());
     memset(mInBuffer, 0, thread->frameCount() * frameSize);
 }
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7d583bb5..30cebf4 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -26,6 +26,7 @@
 #include <sys/stat.h>
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
+#include <media/AudioResamplerPublic.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
@@ -1479,8 +1480,7 @@
                 lStatus = BAD_VALUE;
                 goto Exit;
         }
-        // Resampler implementation limits input sampling rate to 2 x output sampling rate.
-        if (sampleRate > mSampleRate*2) {
+        if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
             ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
             lStatus = BAD_VALUE;
             goto Exit;
@@ -3500,7 +3500,7 @@
                 AudioMixer::TRACK,
                 AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask);
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
-            uint32_t maxSampleRate = mSampleRate * 2;
+            uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
             uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
             if (reqSampleRate == 0) {
                 reqSampleRate = mSampleRate;
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 850fe86..9e59488 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -254,7 +254,7 @@
                 return NO_MEMORY;
             }
 
-            if (checkOutputsForDevice(device, state, outputs, address) != NO_ERROR) {
+            if (checkOutputsForDevice(devDesc, state, outputs, address) != NO_ERROR) {
                 mAvailableOutputDevices.remove(devDesc);
                 return INVALID_OPERATION;
             }
@@ -275,7 +275,7 @@
             // remove device from available output devices
             mAvailableOutputDevices.remove(devDesc);
 
-            checkOutputsForDevice(device, state, outputs, address);
+            checkOutputsForDevice(devDesc, state, outputs, address);
             } break;
 
         default:
@@ -2983,7 +2983,7 @@
                         patchDesc->mPatch.sinks[j].ext.device.address;
                 if (strncmp(patchAddr,
                         address.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
-                    ALOGV("checkOutputsForDevice(): adding opened output %d on same address %s",
+                    ALOGV("findIoHandlesByAddress(): adding opened output %d on same address %s",
                             desc->mIoHandle,  patchDesc->mPatch.sinks[j].ext.device.address);
                     outputs.add(desc->mIoHandle);
                     break;
@@ -2993,12 +2993,15 @@
     }
 }
 
-status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device,
+status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
                                                        audio_policy_dev_state_t state,
                                                        SortedVector<audio_io_handle_t>& outputs,
                                                        const String8 address)
 {
+    audio_devices_t device = devDesc->mDeviceType;
     sp<AudioOutputDescriptor> desc;
+    // erase all current sample rates, formats and channel masks
+    devDesc->clearCapabilities();
 
     if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
         // first list already open outputs that can be routed to this device
@@ -3047,6 +3050,9 @@
             for (j = 0; j < outputs.size(); j++) {
                 desc = mOutputs.valueFor(outputs.itemAt(j));
                 if (!desc->isDuplicated() && desc->mProfile == profile) {
+                    // matching profile: save the sample rates, format and channel masks supported
+                    // by the profile in our device descriptor
+                    devDesc->importAudioPort(profile);
                     break;
                 }
             }
@@ -3196,6 +3202,8 @@
                 profile_index--;
             } else {
                 outputs.add(output);
+                devDesc->importAudioPort(profile);
+
                 if (deviceDistinguishesOnAddress(device)) {
                     ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)",
                             device, address.string());
@@ -5575,15 +5583,21 @@
     port->type = mType;
     unsigned int i;
     for (i = 0; i < mSamplingRates.size() && i < AUDIO_PORT_MAX_SAMPLING_RATES; i++) {
-        port->sample_rates[i] = mSamplingRates[i];
+        if (mSamplingRates[i] != 0) {
+            port->sample_rates[i] = mSamplingRates[i];
+        }
     }
     port->num_sample_rates = i;
     for (i = 0; i < mChannelMasks.size() && i < AUDIO_PORT_MAX_CHANNEL_MASKS; i++) {
-        port->channel_masks[i] = mChannelMasks[i];
+        if (mChannelMasks[i] != 0) {
+            port->channel_masks[i] = mChannelMasks[i];
+        }
     }
     port->num_channel_masks = i;
     for (i = 0; i < mFormats.size() && i < AUDIO_PORT_MAX_FORMATS; i++) {
-        port->formats[i] = mFormats[i];
+        if (mFormats[i] != 0) {
+            port->formats[i] = mFormats[i];
+        }
     }
     port->num_formats = i;
 
@@ -5595,6 +5609,59 @@
     port->num_gains = i;
 }
 
+void AudioPolicyManager::AudioPort::importAudioPort(const sp<AudioPort> port) {
+    for (size_t k = 0 ; k < port->mSamplingRates.size() ; k++) {
+        const uint32_t rate = port->mSamplingRates.itemAt(k);
+        if (rate != 0) { // skip "dynamic" rates
+            bool hasRate = false;
+            for (size_t l = 0 ; l < mSamplingRates.size() ; l++) {
+                if (rate == mSamplingRates.itemAt(l)) {
+                    hasRate = true;
+                    break;
+                }
+            }
+            if (!hasRate) { // never import a sampling rate twice
+                mSamplingRates.add(rate);
+            }
+        }
+    }
+    for (size_t k = 0 ; k < port->mChannelMasks.size() ; k++) {
+        const audio_channel_mask_t mask = port->mChannelMasks.itemAt(k);
+        if (mask != 0) { // skip "dynamic" masks
+            bool hasMask = false;
+            for (size_t l = 0 ; l < mChannelMasks.size() ; l++) {
+                if (mask == mChannelMasks.itemAt(l)) {
+                    hasMask = true;
+                    break;
+                }
+            }
+            if (!hasMask) { // never import a channel mask twice
+                mChannelMasks.add(mask);
+            }
+        }
+    }
+    for (size_t k = 0 ; k < port->mFormats.size() ; k++) {
+        const audio_format_t format = port->mFormats.itemAt(k);
+        if (format != 0) { // skip "dynamic" formats
+            bool hasFormat = false;
+            for (size_t l = 0 ; l < mFormats.size() ; l++) {
+                if (format == mFormats.itemAt(l)) {
+                    hasFormat = true;
+                    break;
+                }
+            }
+            if (!hasFormat) { // never import a channel mask twice
+                mFormats.add(format);
+            }
+        }
+    }
+}
+
+void AudioPolicyManager::AudioPort::clearCapabilities() {
+    mChannelMasks.clear();
+    mFormats.clear();
+    mSamplingRates.clear();
+}
 
 void AudioPolicyManager::AudioPort::loadSamplingRates(char *name)
 {
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 95aab65..f071675 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -238,6 +238,9 @@
 
             virtual void toAudioPort(struct audio_port *port) const;
 
+            void importAudioPort(const sp<AudioPort> port);
+            void clearCapabilities();
+
             void loadSamplingRates(char *name);
             void loadFormats(char *name);
             void loadOutChannels(char *name);
@@ -628,7 +631,7 @@
         // when a device is disconnected, checks if an output is not used any more and
         // returns its handle if any.
         // transfers the audio tracks and effects from one output thread to another accordingly.
-        status_t checkOutputsForDevice(audio_devices_t device,
+        status_t checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
                                        audio_policy_dev_state_t state,
                                        SortedVector<audio_io_handle_t>& outputs,
                                        const String8 address);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 046988e..5eb5181 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1368,7 +1368,8 @@
     ATRACE_CALL();
     ALOGV("%s: Camera %d", __FUNCTION__, mCameraId);
     Mutex::Autolock icl(mBinderSerializationLock);
-    if ( checkPid(__FUNCTION__) != OK) return String8();
+    // The camera service can unconditionally get the parameters at all times
+    if (getCallingPid() != mServicePid && checkPid(__FUNCTION__) != OK) return String8();
 
     SharedParameters::ReadLock l(mParameters);
 
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 517226d..fb6b678 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -556,7 +556,8 @@
 // get preview/capture parameters - key/value pairs
 String8 CameraClient::getParameters() const {
     Mutex::Autolock lock(mLock);
-    if (checkPidAndHardware() != NO_ERROR) return String8();
+    // The camera service can unconditionally get the parameters at all times
+    if (getCallingPid() != mServicePid && checkPidAndHardware() != NO_ERROR) return String8();
 
     String8 params(mHardware->getParameters().flatten());
     LOG1("getParameters (pid %d) (%s)", getCallingPid(), params.string());
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index c266213..bf3318e 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -155,7 +155,7 @@
                 callbackFormat, params.previewFormat);
         res = device->createStream(mCallbackWindow,
                 params.previewWidth, params.previewHeight,
-                callbackFormat, 0, &mCallbackStreamId);
+                callbackFormat, &mCallbackStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index 964d278..cda98be 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -73,11 +73,10 @@
     }
 
     // Find out buffer size for JPEG
-    camera_metadata_ro_entry_t maxJpegSize =
-            params.staticInfo(ANDROID_JPEG_MAX_SIZE);
-    if (maxJpegSize.count == 0) {
-        ALOGE("%s: Camera %d: Can't find ANDROID_JPEG_MAX_SIZE!",
-                __FUNCTION__, mId);
+    ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight);
+    if (maxJpegSize <= 0) {
+        ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ",
+                __FUNCTION__, mId, maxJpegSize);
         return INVALID_OPERATION;
     }
 
@@ -91,8 +90,7 @@
         mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
         mCaptureWindow = new Surface(producer);
         // Create memory for API consumption
-        mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
-                                       "Camera2Client::CaptureHeap");
+        mCaptureHeap = new MemoryHeapBase(maxJpegSize, 0, "Camera2Client::CaptureHeap");
         if (mCaptureHeap->getSize() == 0) {
             ALOGE("%s: Camera %d: Unable to allocate memory for capture",
                     __FUNCTION__, mId);
@@ -134,8 +132,7 @@
         // Create stream for HAL production
         res = device->createStream(mCaptureWindow,
                 params.pictureWidth, params.pictureHeight,
-                HAL_PIXEL_FORMAT_BLOB, maxJpegSize.data.i32[0],
-                &mCaptureStreamId);
+                HAL_PIXEL_FORMAT_BLOB, &mCaptureStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for capture: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 911f55a..ab0af0d 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -181,8 +181,7 @@
     if (mPreviewStreamId == NO_STREAM) {
         res = device->createStream(mPreviewWindow,
                 params.previewWidth, params.previewHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0,
-                &mPreviewStreamId);
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, &mPreviewStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
                     __FUNCTION__, mId, strerror(-res), res);
@@ -385,7 +384,7 @@
         mRecordingFrameCount = 0;
         res = device->createStream(mRecordingWindow,
                 params.videoWidth, params.videoHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, 0, &mRecordingStreamId);
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, &mRecordingStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for recording: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 10463c1..8fb876e 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -183,8 +183,7 @@
                 (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
         res = device->createStream(mZslWindow,
                 params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
-                streamType, 0,
-                &mZslStreamId);
+                streamType, &mZslStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index b8611f8..86f82a3 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -384,23 +384,7 @@
     // after each call, but only once we are done with all.
 
     int streamId = -1;
-    if (format == HAL_PIXEL_FORMAT_BLOB) {
-        // JPEG buffers need to be sized for maximum possible compressed size
-        CameraMetadata staticInfo = mDevice->info();
-        camera_metadata_entry_t entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
-        if (entry.count == 0) {
-            ALOGE("%s: Camera %d: Can't find maximum JPEG size in "
-                    "static metadata!", __FUNCTION__, mCameraId);
-            return INVALID_OPERATION;
-        }
-        int32_t maxJpegSize = entry.data.i32[0];
-        res = mDevice->createStream(anw, width, height, format, maxJpegSize,
-                &streamId);
-    } else {
-        // All other streams are a known size
-        res = mDevice->createStream(anw, width, height, format, /*size*/0,
-                &streamId);
-    }
+    res = mDevice->createStream(anw, width, height, format, &streamId);
 
     if (res == OK) {
         mStreamMap.add(bufferProducer->asBinder(), streamId);
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
index 0f6d278..f8823a3 100644
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
@@ -280,7 +280,7 @@
         window = new Surface(bufferProducer);
     }
 
-    return mDevice->createStream(window, width, height, format, /*size*/1,
+    return mDevice->createStream(window, width, height, format,
                                  streamId);
 }
 
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 037695d..9e124b0 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -109,8 +109,7 @@
      * other formats, the size parameter is ignored.
      */
     virtual status_t createStream(sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, int format, size_t size,
-            int *id) = 0;
+            uint32_t width, uint32_t height, int format, int *id) = 0;
 
     /**
      * Create an input reprocess stream that uses buffers from an existing
@@ -156,6 +155,12 @@
     virtual status_t waitUntilDrained() = 0;
 
     /**
+     * Get Jpeg buffer size for a given jpeg resolution.
+     * Negative values are error codes.
+     */
+    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const = 0;
+
+    /**
      * Abstract class for HAL notification listeners
      */
     class NotificationListener {
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index 8c2520e..d473a76 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -242,13 +242,16 @@
 }
 
 status_t Camera2Device::createStream(sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, int format, size_t size, int *id) {
+        uint32_t width, uint32_t height, int format, int *id) {
     ATRACE_CALL();
     status_t res;
     ALOGV("%s: E", __FUNCTION__);
 
     sp<StreamAdapter> stream = new StreamAdapter(mHal2Device);
-
+    size_t size = 0;
+    if (format == HAL_PIXEL_FORMAT_BLOB) {
+        size = getJpegBufferSize(width, height);
+    }
     res = stream->connectToDevice(consumer, width, height, format, size);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to create stream (%d x %d, format %x):"
@@ -263,6 +266,17 @@
     return OK;
 }
 
+ssize_t Camera2Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
+    // Always give the max jpeg buffer size regardless of the actual jpeg resolution.
+    camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+    if (jpegBufMaxSize.count == 0) {
+        ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
+        return BAD_VALUE;
+    }
+
+    return jpegBufMaxSize.data.i32[0];
+}
+
 status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
     ATRACE_CALL();
     status_t res;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 46182f8..d0ca46e 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -57,8 +57,7 @@
     virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
     virtual status_t createStream(sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, int format, size_t size,
-            int *id);
+            uint32_t width, uint32_t height, int format, int *id);
     virtual status_t createReprocessStreamFromStream(int outputId, int *id);
     virtual status_t getStreamInfo(int id,
             uint32_t *width, uint32_t *height, uint32_t *format);
@@ -79,6 +78,7 @@
     // Flush implemented as just a wait
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
     virtual uint32_t getDeviceVersion();
+    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
 
   private:
     const int mId;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index a6214cc..ed350c1 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -793,12 +793,12 @@
 }
 
 status_t Camera3Device::createStream(sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, int format, size_t size, int *id) {
+        uint32_t width, uint32_t height, int format, int *id) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, size %zu",
-            mId, mNextStreamId, width, height, format, size);
+    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d",
+            mId, mNextStreamId, width, height, format);
 
     status_t res;
     bool wasActive = false;
@@ -832,10 +832,7 @@
     sp<Camera3OutputStream> newStream;
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ssize_t jpegBufferSize = getJpegBufferSize(width, height);
-        if (jpegBufferSize > 0) {
-            ALOGV("%s: Overwrite Jpeg output buffer size from %zu to %zu",
-                    __FUNCTION__, size, jpegBufferSize);
-        } else {
+        if (jpegBufferSize <= 0) {
             SET_ERR_L("Invalid jpeg buffer size %zd", jpegBufferSize);
             return BAD_VALUE;
         }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index b1b0033..7656237 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -93,11 +93,9 @@
 
     // Actual stream creation/deletion is delayed until first request is submitted
     // If adding streams while actively capturing, will pause device before adding
-    // stream, reconfiguring device, and unpausing. Note that, for JPEG stream, the
-    // buffer size may be overwritten by an more accurate value calculated by Camera3Device.
+    // stream, reconfiguring device, and unpausing.
     virtual status_t createStream(sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, int format, size_t size,
-            int *id);
+            uint32_t width, uint32_t height, int format, int *id);
     virtual status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id);
@@ -137,6 +135,8 @@
 
     virtual uint32_t getDeviceVersion();
 
+    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
+
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
 
@@ -316,12 +316,6 @@
      */
     Size getMaxJpegResolution() const;
 
-    /**
-     * Get Jpeg buffer size for a given jpeg resolution.
-     * Negative values are error codes.
-     */
-    ssize_t             getJpegBufferSize(uint32_t width, uint32_t height) const;
-
     struct RequestTrigger {
         // Metadata tag number, e.g. android.control.aePrecaptureTrigger
         uint32_t metadataTag;