Fix data corruption issues when decompressing large JPEG images and/or using buffered I/O. Specifically, decode_mcu_fast() can potentially process more than 1 MCU, so make sure there is enough space in the buffer to accommodate this case. Otherwise, the buffer pointer goes negative, and bad mojo ensues. Also, the fast decoder's method of handling unread markers doesn't make libjpeg's restart handler happy, so disable fast decode when restarts are used.
git-svn-id: svn://svn.code.sf.net/p/tigervnc/code/trunk@4012 3789f03b-4d11-0410-bbf8-ca57d06f2519
diff --git a/common/jpeg/jdhuff.c b/common/jpeg/jdhuff.c
index 18a0c7e..9710142 100644
--- a/common/jpeg/jdhuff.c
+++ b/common/jpeg/jdhuff.c
@@ -765,20 +765,25 @@
decode_mcu (j_decompress_ptr cinfo, JBLOCKROW *MCU_data)
{
huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy;
+ int usefast = 1;
/* Process restart marker if needed; may have to suspend */
if (cinfo->restart_interval) {
if (entropy->restarts_to_go == 0)
if (! process_restart(cinfo))
return FALSE;
+ usefast = 0;
}
+ if (cinfo->src->bytes_in_buffer < BUFSIZE * cinfo->blocks_in_MCU)
+ usefast = 0;
+
/* If we've run out of data, just leave the MCU set to zeroes.
* This way, we return uniform gray for the remainder of the segment.
*/
if (! entropy->pub.insufficient_data) {
- if (cinfo->src->bytes_in_buffer >= BUFSIZE) {
+ if (usefast) {
if (!decode_mcu_fast(cinfo, MCU_data)) return FALSE;
}
else {