Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * jmemmgr.c |
| 3 | * |
| 4 | * Copyright (C) 1991-1997, Thomas G. Lane. |
| 5 | * This file is part of the Independent JPEG Group's software. |
| 6 | * For conditions of distribution and use, see the accompanying README file. |
| 7 | * |
| 8 | * This file contains the JPEG system-independent memory management |
| 9 | * routines. This code is usable across a wide variety of machines; most |
| 10 | * of the system dependencies have been isolated in a separate file. |
| 11 | * The major functions provided here are: |
| 12 | * * pool-based allocation and freeing of memory; |
| 13 | * * policy decisions about how to divide available memory among the |
| 14 | * virtual arrays; |
| 15 | * * control logic for swapping virtual arrays between main memory and |
| 16 | * backing storage. |
| 17 | * The separate system-dependent file provides the actual backing-storage |
| 18 | * access code, and it contains the policy decision about how much total |
| 19 | * main memory to use. |
| 20 | * This file is system-dependent in the sense that some of its functions |
| 21 | * are unnecessary in some systems. For example, if there is enough virtual |
| 22 | * memory so that backing storage will never be used, much of the virtual |
| 23 | * array control logic could be removed. (Of course, if you have that much |
| 24 | * memory then you shouldn't care about a little bit of unused code...) |
| 25 | */ |
| 26 | |
| 27 | #define JPEG_INTERNALS |
| 28 | #define AM_MEMORY_MANAGER /* we define jvirt_Xarray_control structs */ |
| 29 | #include "jinclude.h" |
| 30 | #include "jpeglib.h" |
| 31 | #include "jmemsys.h" /* import the system-dependent declarations */ |
| 32 | |
| 33 | #ifndef NO_GETENV |
| 34 | #ifndef HAVE_STDLIB_H /* <stdlib.h> should declare getenv() */ |
| 35 | extern char * getenv JPP((const char * name)); |
| 36 | #endif |
| 37 | #endif |
| 38 | |
| 39 | |
| 40 | /* |
| 41 | * Some important notes: |
| 42 | * The allocation routines provided here must never return NULL. |
| 43 | * They should exit to error_exit if unsuccessful. |
| 44 | * |
| 45 | * It's not a good idea to try to merge the sarray and barray routines, |
| 46 | * even though they are textually almost the same, because samples are |
| 47 | * usually stored as bytes while coefficients are shorts or ints. Thus, |
| 48 | * in machines where byte pointers have a different representation from |
| 49 | * word pointers, the resulting machine code could not be the same. |
| 50 | */ |
| 51 | |
| 52 | |
| 53 | /* |
| 54 | * Many machines require storage alignment: longs must start on 4-byte |
| 55 | * boundaries, doubles on 8-byte boundaries, etc. On such machines, malloc() |
| 56 | * always returns pointers that are multiples of the worst-case alignment |
| 57 | * requirement, and we had better do so too. |
| 58 | * There isn't any really portable way to determine the worst-case alignment |
| 59 | * requirement. This module assumes that the alignment requirement is |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 60 | * multiples of ALIGN_SIZE. |
| 61 | * By default, we define ALIGN_SIZE as sizeof(double). This is necessary on some |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 62 | * workstations (where doubles really do need 8-byte alignment) and will work |
| 63 | * fine on nearly everything. If your machine has lesser alignment needs, |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 64 | * you can save a few bytes by making ALIGN_SIZE smaller. |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 65 | * The only place I know of where this will NOT work is certain Macintosh |
| 66 | * 680x0 compilers that define double as a 10-byte IEEE extended float. |
| 67 | * Doing 10-byte alignment is counterproductive because longwords won't be |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 68 | * aligned well. Put "#define ALIGN_SIZE 4" in jconfig.h if you have |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 69 | * such a compiler. |
| 70 | */ |
| 71 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 72 | #ifndef ALIGN_SIZE /* so can override from jconfig.h */ |
| 73 | #define ALIGN_SIZE SIZEOF(double) |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 74 | #endif |
| 75 | |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 76 | /* |
| 77 | * We allocate objects from "pools", where each pool is gotten with a single |
| 78 | * request to jpeg_get_small() or jpeg_get_large(). There is no per-object |
| 79 | * overhead within a pool, except for alignment padding. Each pool has a |
| 80 | * header with a link to the next pool of the same class. |
| 81 | * Small and large pool headers are identical except that the latter's |
| 82 | * link pointer must be FAR on 80x86 machines. |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 83 | */ |
| 84 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 85 | typedef struct small_pool_struct * small_pool_ptr; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 86 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 87 | typedef struct small_pool_struct { |
| 88 | small_pool_ptr next; /* next in list of pools */ |
| 89 | size_t bytes_used; /* how many bytes already used within pool */ |
| 90 | size_t bytes_left; /* bytes still available in this pool */ |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 91 | } small_pool_hdr; |
| 92 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 93 | typedef struct large_pool_struct FAR * large_pool_ptr; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 94 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 95 | typedef struct large_pool_struct { |
| 96 | large_pool_ptr next; /* next in list of pools */ |
| 97 | size_t bytes_used; /* how many bytes already used within pool */ |
| 98 | size_t bytes_left; /* bytes still available in this pool */ |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 99 | } large_pool_hdr; |
| 100 | |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 101 | /* |
| 102 | * Here is the full definition of a memory manager object. |
| 103 | */ |
| 104 | |
| 105 | typedef struct { |
| 106 | struct jpeg_memory_mgr pub; /* public fields */ |
| 107 | |
| 108 | /* Each pool identifier (lifetime class) names a linked list of pools. */ |
| 109 | small_pool_ptr small_list[JPOOL_NUMPOOLS]; |
| 110 | large_pool_ptr large_list[JPOOL_NUMPOOLS]; |
| 111 | |
| 112 | /* Since we only have one lifetime class of virtual arrays, only one |
| 113 | * linked list is necessary (for each datatype). Note that the virtual |
| 114 | * array control blocks being linked together are actually stored somewhere |
| 115 | * in the small-pool list. |
| 116 | */ |
| 117 | jvirt_sarray_ptr virt_sarray_list; |
| 118 | jvirt_barray_ptr virt_barray_list; |
| 119 | |
| 120 | /* This counts total space obtained from jpeg_get_small/large */ |
| 121 | long total_space_allocated; |
| 122 | |
| 123 | /* alloc_sarray and alloc_barray set this value for use by virtual |
| 124 | * array routines. |
| 125 | */ |
| 126 | JDIMENSION last_rowsperchunk; /* from most recent alloc_sarray/barray */ |
| 127 | } my_memory_mgr; |
| 128 | |
| 129 | typedef my_memory_mgr * my_mem_ptr; |
| 130 | |
| 131 | |
| 132 | /* |
| 133 | * The control blocks for virtual arrays. |
| 134 | * Note that these blocks are allocated in the "small" pool area. |
| 135 | * System-dependent info for the associated backing store (if any) is hidden |
| 136 | * inside the backing_store_info struct. |
| 137 | */ |
| 138 | |
| 139 | struct jvirt_sarray_control { |
| 140 | JSAMPARRAY mem_buffer; /* => the in-memory buffer */ |
| 141 | JDIMENSION rows_in_array; /* total virtual array height */ |
| 142 | JDIMENSION samplesperrow; /* width of array (and of memory buffer) */ |
| 143 | JDIMENSION maxaccess; /* max rows accessed by access_virt_sarray */ |
| 144 | JDIMENSION rows_in_mem; /* height of memory buffer */ |
| 145 | JDIMENSION rowsperchunk; /* allocation chunk size in mem_buffer */ |
| 146 | JDIMENSION cur_start_row; /* first logical row # in the buffer */ |
| 147 | JDIMENSION first_undef_row; /* row # of first uninitialized row */ |
| 148 | boolean pre_zero; /* pre-zero mode requested? */ |
| 149 | boolean dirty; /* do current buffer contents need written? */ |
| 150 | boolean b_s_open; /* is backing-store data valid? */ |
| 151 | jvirt_sarray_ptr next; /* link to next virtual sarray control block */ |
| 152 | backing_store_info b_s_info; /* System-dependent control info */ |
| 153 | }; |
| 154 | |
| 155 | struct jvirt_barray_control { |
| 156 | JBLOCKARRAY mem_buffer; /* => the in-memory buffer */ |
| 157 | JDIMENSION rows_in_array; /* total virtual array height */ |
| 158 | JDIMENSION blocksperrow; /* width of array (and of memory buffer) */ |
| 159 | JDIMENSION maxaccess; /* max rows accessed by access_virt_barray */ |
| 160 | JDIMENSION rows_in_mem; /* height of memory buffer */ |
| 161 | JDIMENSION rowsperchunk; /* allocation chunk size in mem_buffer */ |
| 162 | JDIMENSION cur_start_row; /* first logical row # in the buffer */ |
| 163 | JDIMENSION first_undef_row; /* row # of first uninitialized row */ |
| 164 | boolean pre_zero; /* pre-zero mode requested? */ |
| 165 | boolean dirty; /* do current buffer contents need written? */ |
| 166 | boolean b_s_open; /* is backing-store data valid? */ |
| 167 | jvirt_barray_ptr next; /* link to next virtual barray control block */ |
| 168 | backing_store_info b_s_info; /* System-dependent control info */ |
| 169 | }; |
| 170 | |
| 171 | |
| 172 | #ifdef MEM_STATS /* optional extra stuff for statistics */ |
| 173 | |
| 174 | LOCAL(void) |
| 175 | print_mem_stats (j_common_ptr cinfo, int pool_id) |
| 176 | { |
| 177 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 178 | small_pool_ptr shdr_ptr; |
| 179 | large_pool_ptr lhdr_ptr; |
| 180 | |
| 181 | /* Since this is only a debugging stub, we can cheat a little by using |
| 182 | * fprintf directly rather than going through the trace message code. |
| 183 | * This is helpful because message parm array can't handle longs. |
| 184 | */ |
| 185 | fprintf(stderr, "Freeing pool %d, total space = %ld\n", |
| 186 | pool_id, mem->total_space_allocated); |
| 187 | |
| 188 | for (lhdr_ptr = mem->large_list[pool_id]; lhdr_ptr != NULL; |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 189 | lhdr_ptr = lhdr_ptr->next) { |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 190 | fprintf(stderr, " Large chunk used %ld\n", |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 191 | (long) lhdr_ptr->bytes_used); |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | for (shdr_ptr = mem->small_list[pool_id]; shdr_ptr != NULL; |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 195 | shdr_ptr = shdr_ptr->next) { |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 196 | fprintf(stderr, " Small chunk used %ld free %ld\n", |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 197 | (long) shdr_ptr->bytes_used, |
| 198 | (long) shdr_ptr->bytes_left); |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 199 | } |
| 200 | } |
| 201 | |
| 202 | #endif /* MEM_STATS */ |
| 203 | |
| 204 | |
| 205 | LOCAL(void) |
| 206 | out_of_memory (j_common_ptr cinfo, int which) |
| 207 | /* Report an out-of-memory error and stop execution */ |
| 208 | /* If we compiled MEM_STATS support, report alloc requests before dying */ |
| 209 | { |
| 210 | #ifdef MEM_STATS |
| 211 | cinfo->err->trace_level = 2; /* force self_destruct to report stats */ |
| 212 | #endif |
| 213 | ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, which); |
| 214 | } |
| 215 | |
| 216 | |
| 217 | /* |
| 218 | * Allocation of "small" objects. |
| 219 | * |
| 220 | * For these, we use pooled storage. When a new pool must be created, |
| 221 | * we try to get enough space for the current request plus a "slop" factor, |
| 222 | * where the slop will be the amount of leftover space in the new pool. |
| 223 | * The speed vs. space tradeoff is largely determined by the slop values. |
| 224 | * A different slop value is provided for each pool class (lifetime), |
| 225 | * and we also distinguish the first pool of a class from later ones. |
| 226 | * NOTE: the values given work fairly well on both 16- and 32-bit-int |
| 227 | * machines, but may be too small if longs are 64 bits or more. |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 228 | * |
| 229 | * Since we do not know what alignment malloc() gives us, we have to |
| 230 | * allocate ALIGN_SIZE-1 extra space per pool to have room for alignment |
| 231 | * adjustment. |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 232 | */ |
| 233 | |
| 234 | static const size_t first_pool_slop[JPOOL_NUMPOOLS] = |
| 235 | { |
| 236 | 1600, /* first PERMANENT pool */ |
| 237 | 16000 /* first IMAGE pool */ |
| 238 | }; |
| 239 | |
| 240 | static const size_t extra_pool_slop[JPOOL_NUMPOOLS] = |
| 241 | { |
| 242 | 0, /* additional PERMANENT pools */ |
| 243 | 5000 /* additional IMAGE pools */ |
| 244 | }; |
| 245 | |
| 246 | #define MIN_SLOP 50 /* greater than 0 to avoid futile looping */ |
| 247 | |
| 248 | |
| 249 | METHODDEF(void *) |
| 250 | alloc_small (j_common_ptr cinfo, int pool_id, size_t sizeofobject) |
| 251 | /* Allocate a "small" object */ |
| 252 | { |
| 253 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 254 | small_pool_ptr hdr_ptr, prev_hdr_ptr; |
| 255 | char * data_ptr; |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 256 | size_t min_request, slop; |
| 257 | |
| 258 | /* |
| 259 | * Round up the requested size to a multiple of ALIGN_SIZE in order |
| 260 | * to assure alignment for the next object allocated in the same pool |
| 261 | * and so that algorithms can straddle outside the proper area up |
| 262 | * to the next alignment. |
| 263 | */ |
| 264 | sizeofobject = jround_up(sizeofobject, ALIGN_SIZE); |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 265 | |
| 266 | /* Check for unsatisfiable request (do now to ensure no overflow below) */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 267 | if ((SIZEOF(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) > MAX_ALLOC_CHUNK) |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 268 | out_of_memory(cinfo, 1); /* request exceeds malloc's ability */ |
| 269 | |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 270 | /* See if space is available in any existing pool */ |
| 271 | if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) |
| 272 | ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ |
| 273 | prev_hdr_ptr = NULL; |
| 274 | hdr_ptr = mem->small_list[pool_id]; |
| 275 | while (hdr_ptr != NULL) { |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 276 | if (hdr_ptr->bytes_left >= sizeofobject) |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 277 | break; /* found pool with enough space */ |
| 278 | prev_hdr_ptr = hdr_ptr; |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 279 | hdr_ptr = hdr_ptr->next; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | /* Time to make a new pool? */ |
| 283 | if (hdr_ptr == NULL) { |
| 284 | /* min_request is what we need now, slop is what will be leftover */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 285 | min_request = SIZEOF(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 286 | if (prev_hdr_ptr == NULL) /* first pool in class? */ |
| 287 | slop = first_pool_slop[pool_id]; |
| 288 | else |
| 289 | slop = extra_pool_slop[pool_id]; |
| 290 | /* Don't ask for more than MAX_ALLOC_CHUNK */ |
| 291 | if (slop > (size_t) (MAX_ALLOC_CHUNK-min_request)) |
| 292 | slop = (size_t) (MAX_ALLOC_CHUNK-min_request); |
| 293 | /* Try to get space, if fail reduce slop and try again */ |
| 294 | for (;;) { |
| 295 | hdr_ptr = (small_pool_ptr) jpeg_get_small(cinfo, min_request + slop); |
| 296 | if (hdr_ptr != NULL) |
| 297 | break; |
| 298 | slop /= 2; |
| 299 | if (slop < MIN_SLOP) /* give up when it gets real small */ |
| 300 | out_of_memory(cinfo, 2); /* jpeg_get_small failed */ |
| 301 | } |
| 302 | mem->total_space_allocated += min_request + slop; |
| 303 | /* Success, initialize the new pool header and add to end of list */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 304 | hdr_ptr->next = NULL; |
| 305 | hdr_ptr->bytes_used = 0; |
| 306 | hdr_ptr->bytes_left = sizeofobject + slop; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 307 | if (prev_hdr_ptr == NULL) /* first pool in class? */ |
| 308 | mem->small_list[pool_id] = hdr_ptr; |
| 309 | else |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 310 | prev_hdr_ptr->next = hdr_ptr; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | /* OK, allocate the object from the current pool */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 314 | data_ptr = (char *) hdr_ptr; /* point to first data byte in pool... */ |
| 315 | data_ptr += SIZEOF(small_pool_hdr); /* ...by skipping the header... */ |
| 316 | if ((unsigned long)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */ |
| 317 | data_ptr += ALIGN_SIZE - (unsigned long)data_ptr % ALIGN_SIZE; |
| 318 | data_ptr += hdr_ptr->bytes_used; /* point to place for object */ |
| 319 | hdr_ptr->bytes_used += sizeofobject; |
| 320 | hdr_ptr->bytes_left -= sizeofobject; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 321 | |
| 322 | return (void *) data_ptr; |
| 323 | } |
| 324 | |
| 325 | |
| 326 | /* |
| 327 | * Allocation of "large" objects. |
| 328 | * |
| 329 | * The external semantics of these are the same as "small" objects, |
| 330 | * except that FAR pointers are used on 80x86. However the pool |
| 331 | * management heuristics are quite different. We assume that each |
| 332 | * request is large enough that it may as well be passed directly to |
| 333 | * jpeg_get_large; the pool management just links everything together |
| 334 | * so that we can free it all on demand. |
| 335 | * Note: the major use of "large" objects is in JSAMPARRAY and JBLOCKARRAY |
| 336 | * structures. The routines that create these structures (see below) |
| 337 | * deliberately bunch rows together to ensure a large request size. |
| 338 | */ |
| 339 | |
| 340 | METHODDEF(void FAR *) |
| 341 | alloc_large (j_common_ptr cinfo, int pool_id, size_t sizeofobject) |
| 342 | /* Allocate a "large" object */ |
| 343 | { |
| 344 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 345 | large_pool_ptr hdr_ptr; |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 346 | char FAR * data_ptr; |
| 347 | |
| 348 | /* |
| 349 | * Round up the requested size to a multiple of ALIGN_SIZE so that |
| 350 | * algorithms can straddle outside the proper area up to the next |
| 351 | * alignment. |
| 352 | */ |
| 353 | sizeofobject = jround_up(sizeofobject, ALIGN_SIZE); |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 354 | |
| 355 | /* Check for unsatisfiable request (do now to ensure no overflow below) */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 356 | if ((SIZEOF(large_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) > MAX_ALLOC_CHUNK) |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 357 | out_of_memory(cinfo, 3); /* request exceeds malloc's ability */ |
| 358 | |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 359 | /* Always make a new pool */ |
| 360 | if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) |
| 361 | ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ |
| 362 | |
| 363 | hdr_ptr = (large_pool_ptr) jpeg_get_large(cinfo, sizeofobject + |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 364 | SIZEOF(large_pool_hdr) + |
| 365 | ALIGN_SIZE - 1); |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 366 | if (hdr_ptr == NULL) |
| 367 | out_of_memory(cinfo, 4); /* jpeg_get_large failed */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 368 | mem->total_space_allocated += sizeofobject + SIZEOF(large_pool_hdr) + ALIGN_SIZE - 1; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 369 | |
| 370 | /* Success, initialize the new pool header and add to list */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 371 | hdr_ptr->next = mem->large_list[pool_id]; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 372 | /* We maintain space counts in each pool header for statistical purposes, |
| 373 | * even though they are not needed for allocation. |
| 374 | */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 375 | hdr_ptr->bytes_used = sizeofobject; |
| 376 | hdr_ptr->bytes_left = 0; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 377 | mem->large_list[pool_id] = hdr_ptr; |
| 378 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 379 | data_ptr = (char *) hdr_ptr; /* point to first data byte in pool... */ |
| 380 | data_ptr += SIZEOF(small_pool_hdr); /* ...by skipping the header... */ |
| 381 | if ((unsigned long)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */ |
| 382 | data_ptr += ALIGN_SIZE - (unsigned long)data_ptr % ALIGN_SIZE; |
| 383 | |
| 384 | return (void FAR *) data_ptr; |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | |
| 388 | /* |
| 389 | * Creation of 2-D sample arrays. |
| 390 | * The pointers are in near heap, the samples themselves in FAR heap. |
| 391 | * |
| 392 | * To minimize allocation overhead and to allow I/O of large contiguous |
| 393 | * blocks, we allocate the sample rows in groups of as many rows as possible |
| 394 | * without exceeding MAX_ALLOC_CHUNK total bytes per allocation request. |
| 395 | * NB: the virtual array control routines, later in this file, know about |
| 396 | * this chunking of rows. The rowsperchunk value is left in the mem manager |
| 397 | * object so that it can be saved away if this sarray is the workspace for |
| 398 | * a virtual array. |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 399 | * |
| 400 | * Since we are often upsampling with a factor 2, we align the size (not |
| 401 | * the start) to 2 * ALIGN_SIZE so that the upsampling routines don't have |
| 402 | * to be as careful about size. |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 403 | */ |
| 404 | |
| 405 | METHODDEF(JSAMPARRAY) |
| 406 | alloc_sarray (j_common_ptr cinfo, int pool_id, |
| 407 | JDIMENSION samplesperrow, JDIMENSION numrows) |
| 408 | /* Allocate a 2-D sample array */ |
| 409 | { |
| 410 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 411 | JSAMPARRAY result; |
| 412 | JSAMPROW workspace; |
| 413 | JDIMENSION rowsperchunk, currow, i; |
| 414 | long ltemp; |
| 415 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 416 | /* Make sure each row is properly aligned */ |
| 417 | if ((ALIGN_SIZE % SIZEOF(JSAMPLE)) != 0) |
| 418 | out_of_memory(cinfo, 5); /* safety check */ |
| 419 | samplesperrow = jround_up(samplesperrow, (2 * ALIGN_SIZE) / SIZEOF(JSAMPLE)); |
| 420 | |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 421 | /* Calculate max # of rows allowed in one allocation chunk */ |
| 422 | ltemp = (MAX_ALLOC_CHUNK-SIZEOF(large_pool_hdr)) / |
| 423 | ((long) samplesperrow * SIZEOF(JSAMPLE)); |
| 424 | if (ltemp <= 0) |
| 425 | ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); |
| 426 | if (ltemp < (long) numrows) |
| 427 | rowsperchunk = (JDIMENSION) ltemp; |
| 428 | else |
| 429 | rowsperchunk = numrows; |
| 430 | mem->last_rowsperchunk = rowsperchunk; |
| 431 | |
| 432 | /* Get space for row pointers (small object) */ |
| 433 | result = (JSAMPARRAY) alloc_small(cinfo, pool_id, |
| 434 | (size_t) (numrows * SIZEOF(JSAMPROW))); |
| 435 | |
| 436 | /* Get the rows themselves (large objects) */ |
| 437 | currow = 0; |
| 438 | while (currow < numrows) { |
| 439 | rowsperchunk = MIN(rowsperchunk, numrows - currow); |
| 440 | workspace = (JSAMPROW) alloc_large(cinfo, pool_id, |
| 441 | (size_t) ((size_t) rowsperchunk * (size_t) samplesperrow |
| 442 | * SIZEOF(JSAMPLE))); |
| 443 | for (i = rowsperchunk; i > 0; i--) { |
| 444 | result[currow++] = workspace; |
| 445 | workspace += samplesperrow; |
| 446 | } |
| 447 | } |
| 448 | |
| 449 | return result; |
| 450 | } |
| 451 | |
| 452 | |
| 453 | /* |
| 454 | * Creation of 2-D coefficient-block arrays. |
| 455 | * This is essentially the same as the code for sample arrays, above. |
| 456 | */ |
| 457 | |
| 458 | METHODDEF(JBLOCKARRAY) |
| 459 | alloc_barray (j_common_ptr cinfo, int pool_id, |
| 460 | JDIMENSION blocksperrow, JDIMENSION numrows) |
| 461 | /* Allocate a 2-D coefficient-block array */ |
| 462 | { |
| 463 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 464 | JBLOCKARRAY result; |
| 465 | JBLOCKROW workspace; |
| 466 | JDIMENSION rowsperchunk, currow, i; |
| 467 | long ltemp; |
| 468 | |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 469 | /* Make sure each row is properly aligned */ |
| 470 | if ((SIZEOF(JBLOCK) % ALIGN_SIZE) != 0) |
| 471 | out_of_memory(cinfo, 6); /* safety check */ |
| 472 | |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 473 | /* Calculate max # of rows allowed in one allocation chunk */ |
| 474 | ltemp = (MAX_ALLOC_CHUNK-SIZEOF(large_pool_hdr)) / |
| 475 | ((long) blocksperrow * SIZEOF(JBLOCK)); |
| 476 | if (ltemp <= 0) |
| 477 | ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); |
| 478 | if (ltemp < (long) numrows) |
| 479 | rowsperchunk = (JDIMENSION) ltemp; |
| 480 | else |
| 481 | rowsperchunk = numrows; |
| 482 | mem->last_rowsperchunk = rowsperchunk; |
| 483 | |
| 484 | /* Get space for row pointers (small object) */ |
| 485 | result = (JBLOCKARRAY) alloc_small(cinfo, pool_id, |
| 486 | (size_t) (numrows * SIZEOF(JBLOCKROW))); |
| 487 | |
| 488 | /* Get the rows themselves (large objects) */ |
| 489 | currow = 0; |
| 490 | while (currow < numrows) { |
| 491 | rowsperchunk = MIN(rowsperchunk, numrows - currow); |
| 492 | workspace = (JBLOCKROW) alloc_large(cinfo, pool_id, |
| 493 | (size_t) ((size_t) rowsperchunk * (size_t) blocksperrow |
| 494 | * SIZEOF(JBLOCK))); |
| 495 | for (i = rowsperchunk; i > 0; i--) { |
| 496 | result[currow++] = workspace; |
| 497 | workspace += blocksperrow; |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | return result; |
| 502 | } |
| 503 | |
| 504 | |
| 505 | /* |
| 506 | * About virtual array management: |
| 507 | * |
| 508 | * The above "normal" array routines are only used to allocate strip buffers |
| 509 | * (as wide as the image, but just a few rows high). Full-image-sized buffers |
| 510 | * are handled as "virtual" arrays. The array is still accessed a strip at a |
| 511 | * time, but the memory manager must save the whole array for repeated |
| 512 | * accesses. The intended implementation is that there is a strip buffer in |
| 513 | * memory (as high as is possible given the desired memory limit), plus a |
| 514 | * backing file that holds the rest of the array. |
| 515 | * |
| 516 | * The request_virt_array routines are told the total size of the image and |
| 517 | * the maximum number of rows that will be accessed at once. The in-memory |
| 518 | * buffer must be at least as large as the maxaccess value. |
| 519 | * |
| 520 | * The request routines create control blocks but not the in-memory buffers. |
| 521 | * That is postponed until realize_virt_arrays is called. At that time the |
| 522 | * total amount of space needed is known (approximately, anyway), so free |
| 523 | * memory can be divided up fairly. |
| 524 | * |
| 525 | * The access_virt_array routines are responsible for making a specific strip |
| 526 | * area accessible (after reading or writing the backing file, if necessary). |
| 527 | * Note that the access routines are told whether the caller intends to modify |
| 528 | * the accessed strip; during a read-only pass this saves having to rewrite |
| 529 | * data to disk. The access routines are also responsible for pre-zeroing |
| 530 | * any newly accessed rows, if pre-zeroing was requested. |
| 531 | * |
| 532 | * In current usage, the access requests are usually for nonoverlapping |
| 533 | * strips; that is, successive access start_row numbers differ by exactly |
| 534 | * num_rows = maxaccess. This means we can get good performance with simple |
| 535 | * buffer dump/reload logic, by making the in-memory buffer be a multiple |
| 536 | * of the access height; then there will never be accesses across bufferload |
| 537 | * boundaries. The code will still work with overlapping access requests, |
| 538 | * but it doesn't handle bufferload overlaps very efficiently. |
| 539 | */ |
| 540 | |
| 541 | |
| 542 | METHODDEF(jvirt_sarray_ptr) |
| 543 | request_virt_sarray (j_common_ptr cinfo, int pool_id, boolean pre_zero, |
| 544 | JDIMENSION samplesperrow, JDIMENSION numrows, |
| 545 | JDIMENSION maxaccess) |
| 546 | /* Request a virtual 2-D sample array */ |
| 547 | { |
| 548 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 549 | jvirt_sarray_ptr result; |
| 550 | |
| 551 | /* Only IMAGE-lifetime virtual arrays are currently supported */ |
| 552 | if (pool_id != JPOOL_IMAGE) |
| 553 | ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ |
| 554 | |
| 555 | /* get control block */ |
| 556 | result = (jvirt_sarray_ptr) alloc_small(cinfo, pool_id, |
| 557 | SIZEOF(struct jvirt_sarray_control)); |
| 558 | |
| 559 | result->mem_buffer = NULL; /* marks array not yet realized */ |
| 560 | result->rows_in_array = numrows; |
| 561 | result->samplesperrow = samplesperrow; |
| 562 | result->maxaccess = maxaccess; |
| 563 | result->pre_zero = pre_zero; |
| 564 | result->b_s_open = FALSE; /* no associated backing-store object */ |
| 565 | result->next = mem->virt_sarray_list; /* add to list of virtual arrays */ |
| 566 | mem->virt_sarray_list = result; |
| 567 | |
| 568 | return result; |
| 569 | } |
| 570 | |
| 571 | |
| 572 | METHODDEF(jvirt_barray_ptr) |
| 573 | request_virt_barray (j_common_ptr cinfo, int pool_id, boolean pre_zero, |
| 574 | JDIMENSION blocksperrow, JDIMENSION numrows, |
| 575 | JDIMENSION maxaccess) |
| 576 | /* Request a virtual 2-D coefficient-block array */ |
| 577 | { |
| 578 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 579 | jvirt_barray_ptr result; |
| 580 | |
| 581 | /* Only IMAGE-lifetime virtual arrays are currently supported */ |
| 582 | if (pool_id != JPOOL_IMAGE) |
| 583 | ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ |
| 584 | |
| 585 | /* get control block */ |
| 586 | result = (jvirt_barray_ptr) alloc_small(cinfo, pool_id, |
| 587 | SIZEOF(struct jvirt_barray_control)); |
| 588 | |
| 589 | result->mem_buffer = NULL; /* marks array not yet realized */ |
| 590 | result->rows_in_array = numrows; |
| 591 | result->blocksperrow = blocksperrow; |
| 592 | result->maxaccess = maxaccess; |
| 593 | result->pre_zero = pre_zero; |
| 594 | result->b_s_open = FALSE; /* no associated backing-store object */ |
| 595 | result->next = mem->virt_barray_list; /* add to list of virtual arrays */ |
| 596 | mem->virt_barray_list = result; |
| 597 | |
| 598 | return result; |
| 599 | } |
| 600 | |
| 601 | |
| 602 | METHODDEF(void) |
| 603 | realize_virt_arrays (j_common_ptr cinfo) |
| 604 | /* Allocate the in-memory buffers for any unrealized virtual arrays */ |
| 605 | { |
| 606 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 607 | long space_per_minheight, maximum_space, avail_mem; |
| 608 | long minheights, max_minheights; |
| 609 | jvirt_sarray_ptr sptr; |
| 610 | jvirt_barray_ptr bptr; |
| 611 | |
| 612 | /* Compute the minimum space needed (maxaccess rows in each buffer) |
| 613 | * and the maximum space needed (full image height in each buffer). |
| 614 | * These may be of use to the system-dependent jpeg_mem_available routine. |
| 615 | */ |
| 616 | space_per_minheight = 0; |
| 617 | maximum_space = 0; |
| 618 | for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { |
| 619 | if (sptr->mem_buffer == NULL) { /* if not realized yet */ |
| 620 | space_per_minheight += (long) sptr->maxaccess * |
| 621 | (long) sptr->samplesperrow * SIZEOF(JSAMPLE); |
| 622 | maximum_space += (long) sptr->rows_in_array * |
| 623 | (long) sptr->samplesperrow * SIZEOF(JSAMPLE); |
| 624 | } |
| 625 | } |
| 626 | for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { |
| 627 | if (bptr->mem_buffer == NULL) { /* if not realized yet */ |
| 628 | space_per_minheight += (long) bptr->maxaccess * |
| 629 | (long) bptr->blocksperrow * SIZEOF(JBLOCK); |
| 630 | maximum_space += (long) bptr->rows_in_array * |
| 631 | (long) bptr->blocksperrow * SIZEOF(JBLOCK); |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | if (space_per_minheight <= 0) |
| 636 | return; /* no unrealized arrays, no work */ |
| 637 | |
| 638 | /* Determine amount of memory to actually use; this is system-dependent. */ |
| 639 | avail_mem = jpeg_mem_available(cinfo, space_per_minheight, maximum_space, |
| 640 | mem->total_space_allocated); |
| 641 | |
| 642 | /* If the maximum space needed is available, make all the buffers full |
| 643 | * height; otherwise parcel it out with the same number of minheights |
| 644 | * in each buffer. |
| 645 | */ |
| 646 | if (avail_mem >= maximum_space) |
| 647 | max_minheights = 1000000000L; |
| 648 | else { |
| 649 | max_minheights = avail_mem / space_per_minheight; |
| 650 | /* If there doesn't seem to be enough space, try to get the minimum |
| 651 | * anyway. This allows a "stub" implementation of jpeg_mem_available(). |
| 652 | */ |
| 653 | if (max_minheights <= 0) |
| 654 | max_minheights = 1; |
| 655 | } |
| 656 | |
| 657 | /* Allocate the in-memory buffers and initialize backing store as needed. */ |
| 658 | |
| 659 | for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { |
| 660 | if (sptr->mem_buffer == NULL) { /* if not realized yet */ |
| 661 | minheights = ((long) sptr->rows_in_array - 1L) / sptr->maxaccess + 1L; |
| 662 | if (minheights <= max_minheights) { |
| 663 | /* This buffer fits in memory */ |
| 664 | sptr->rows_in_mem = sptr->rows_in_array; |
| 665 | } else { |
| 666 | /* It doesn't fit in memory, create backing store. */ |
| 667 | sptr->rows_in_mem = (JDIMENSION) (max_minheights * sptr->maxaccess); |
| 668 | jpeg_open_backing_store(cinfo, & sptr->b_s_info, |
| 669 | (long) sptr->rows_in_array * |
| 670 | (long) sptr->samplesperrow * |
| 671 | (long) SIZEOF(JSAMPLE)); |
| 672 | sptr->b_s_open = TRUE; |
| 673 | } |
| 674 | sptr->mem_buffer = alloc_sarray(cinfo, JPOOL_IMAGE, |
| 675 | sptr->samplesperrow, sptr->rows_in_mem); |
| 676 | sptr->rowsperchunk = mem->last_rowsperchunk; |
| 677 | sptr->cur_start_row = 0; |
| 678 | sptr->first_undef_row = 0; |
| 679 | sptr->dirty = FALSE; |
| 680 | } |
| 681 | } |
| 682 | |
| 683 | for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { |
| 684 | if (bptr->mem_buffer == NULL) { /* if not realized yet */ |
| 685 | minheights = ((long) bptr->rows_in_array - 1L) / bptr->maxaccess + 1L; |
| 686 | if (minheights <= max_minheights) { |
| 687 | /* This buffer fits in memory */ |
| 688 | bptr->rows_in_mem = bptr->rows_in_array; |
| 689 | } else { |
| 690 | /* It doesn't fit in memory, create backing store. */ |
| 691 | bptr->rows_in_mem = (JDIMENSION) (max_minheights * bptr->maxaccess); |
| 692 | jpeg_open_backing_store(cinfo, & bptr->b_s_info, |
| 693 | (long) bptr->rows_in_array * |
| 694 | (long) bptr->blocksperrow * |
| 695 | (long) SIZEOF(JBLOCK)); |
| 696 | bptr->b_s_open = TRUE; |
| 697 | } |
| 698 | bptr->mem_buffer = alloc_barray(cinfo, JPOOL_IMAGE, |
| 699 | bptr->blocksperrow, bptr->rows_in_mem); |
| 700 | bptr->rowsperchunk = mem->last_rowsperchunk; |
| 701 | bptr->cur_start_row = 0; |
| 702 | bptr->first_undef_row = 0; |
| 703 | bptr->dirty = FALSE; |
| 704 | } |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | |
| 709 | LOCAL(void) |
| 710 | do_sarray_io (j_common_ptr cinfo, jvirt_sarray_ptr ptr, boolean writing) |
| 711 | /* Do backing store read or write of a virtual sample array */ |
| 712 | { |
| 713 | long bytesperrow, file_offset, byte_count, rows, thisrow, i; |
| 714 | |
| 715 | bytesperrow = (long) ptr->samplesperrow * SIZEOF(JSAMPLE); |
| 716 | file_offset = ptr->cur_start_row * bytesperrow; |
| 717 | /* Loop to read or write each allocation chunk in mem_buffer */ |
| 718 | for (i = 0; i < (long) ptr->rows_in_mem; i += ptr->rowsperchunk) { |
| 719 | /* One chunk, but check for short chunk at end of buffer */ |
| 720 | rows = MIN((long) ptr->rowsperchunk, (long) ptr->rows_in_mem - i); |
| 721 | /* Transfer no more than is currently defined */ |
| 722 | thisrow = (long) ptr->cur_start_row + i; |
| 723 | rows = MIN(rows, (long) ptr->first_undef_row - thisrow); |
| 724 | /* Transfer no more than fits in file */ |
| 725 | rows = MIN(rows, (long) ptr->rows_in_array - thisrow); |
| 726 | if (rows <= 0) /* this chunk might be past end of file! */ |
| 727 | break; |
| 728 | byte_count = rows * bytesperrow; |
| 729 | if (writing) |
| 730 | (*ptr->b_s_info.write_backing_store) (cinfo, & ptr->b_s_info, |
| 731 | (void FAR *) ptr->mem_buffer[i], |
| 732 | file_offset, byte_count); |
| 733 | else |
| 734 | (*ptr->b_s_info.read_backing_store) (cinfo, & ptr->b_s_info, |
| 735 | (void FAR *) ptr->mem_buffer[i], |
| 736 | file_offset, byte_count); |
| 737 | file_offset += byte_count; |
| 738 | } |
| 739 | } |
| 740 | |
| 741 | |
| 742 | LOCAL(void) |
| 743 | do_barray_io (j_common_ptr cinfo, jvirt_barray_ptr ptr, boolean writing) |
| 744 | /* Do backing store read or write of a virtual coefficient-block array */ |
| 745 | { |
| 746 | long bytesperrow, file_offset, byte_count, rows, thisrow, i; |
| 747 | |
| 748 | bytesperrow = (long) ptr->blocksperrow * SIZEOF(JBLOCK); |
| 749 | file_offset = ptr->cur_start_row * bytesperrow; |
| 750 | /* Loop to read or write each allocation chunk in mem_buffer */ |
| 751 | for (i = 0; i < (long) ptr->rows_in_mem; i += ptr->rowsperchunk) { |
| 752 | /* One chunk, but check for short chunk at end of buffer */ |
| 753 | rows = MIN((long) ptr->rowsperchunk, (long) ptr->rows_in_mem - i); |
| 754 | /* Transfer no more than is currently defined */ |
| 755 | thisrow = (long) ptr->cur_start_row + i; |
| 756 | rows = MIN(rows, (long) ptr->first_undef_row - thisrow); |
| 757 | /* Transfer no more than fits in file */ |
| 758 | rows = MIN(rows, (long) ptr->rows_in_array - thisrow); |
| 759 | if (rows <= 0) /* this chunk might be past end of file! */ |
| 760 | break; |
| 761 | byte_count = rows * bytesperrow; |
| 762 | if (writing) |
| 763 | (*ptr->b_s_info.write_backing_store) (cinfo, & ptr->b_s_info, |
| 764 | (void FAR *) ptr->mem_buffer[i], |
| 765 | file_offset, byte_count); |
| 766 | else |
| 767 | (*ptr->b_s_info.read_backing_store) (cinfo, & ptr->b_s_info, |
| 768 | (void FAR *) ptr->mem_buffer[i], |
| 769 | file_offset, byte_count); |
| 770 | file_offset += byte_count; |
| 771 | } |
| 772 | } |
| 773 | |
| 774 | |
| 775 | METHODDEF(JSAMPARRAY) |
| 776 | access_virt_sarray (j_common_ptr cinfo, jvirt_sarray_ptr ptr, |
| 777 | JDIMENSION start_row, JDIMENSION num_rows, |
| 778 | boolean writable) |
| 779 | /* Access the part of a virtual sample array starting at start_row */ |
| 780 | /* and extending for num_rows rows. writable is true if */ |
| 781 | /* caller intends to modify the accessed area. */ |
| 782 | { |
| 783 | JDIMENSION end_row = start_row + num_rows; |
| 784 | JDIMENSION undef_row; |
| 785 | |
| 786 | /* debugging check */ |
| 787 | if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess || |
| 788 | ptr->mem_buffer == NULL) |
| 789 | ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); |
| 790 | |
| 791 | /* Make the desired part of the virtual array accessible */ |
| 792 | if (start_row < ptr->cur_start_row || |
| 793 | end_row > ptr->cur_start_row+ptr->rows_in_mem) { |
| 794 | if (! ptr->b_s_open) |
| 795 | ERREXIT(cinfo, JERR_VIRTUAL_BUG); |
| 796 | /* Flush old buffer contents if necessary */ |
| 797 | if (ptr->dirty) { |
| 798 | do_sarray_io(cinfo, ptr, TRUE); |
| 799 | ptr->dirty = FALSE; |
| 800 | } |
| 801 | /* Decide what part of virtual array to access. |
| 802 | * Algorithm: if target address > current window, assume forward scan, |
| 803 | * load starting at target address. If target address < current window, |
| 804 | * assume backward scan, load so that target area is top of window. |
| 805 | * Note that when switching from forward write to forward read, will have |
| 806 | * start_row = 0, so the limiting case applies and we load from 0 anyway. |
| 807 | */ |
| 808 | if (start_row > ptr->cur_start_row) { |
| 809 | ptr->cur_start_row = start_row; |
| 810 | } else { |
| 811 | /* use long arithmetic here to avoid overflow & unsigned problems */ |
| 812 | long ltemp; |
| 813 | |
| 814 | ltemp = (long) end_row - (long) ptr->rows_in_mem; |
| 815 | if (ltemp < 0) |
| 816 | ltemp = 0; /* don't fall off front end of file */ |
| 817 | ptr->cur_start_row = (JDIMENSION) ltemp; |
| 818 | } |
| 819 | /* Read in the selected part of the array. |
| 820 | * During the initial write pass, we will do no actual read |
| 821 | * because the selected part is all undefined. |
| 822 | */ |
| 823 | do_sarray_io(cinfo, ptr, FALSE); |
| 824 | } |
| 825 | /* Ensure the accessed part of the array is defined; prezero if needed. |
| 826 | * To improve locality of access, we only prezero the part of the array |
| 827 | * that the caller is about to access, not the entire in-memory array. |
| 828 | */ |
| 829 | if (ptr->first_undef_row < end_row) { |
| 830 | if (ptr->first_undef_row < start_row) { |
| 831 | if (writable) /* writer skipped over a section of array */ |
| 832 | ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); |
| 833 | undef_row = start_row; /* but reader is allowed to read ahead */ |
| 834 | } else { |
| 835 | undef_row = ptr->first_undef_row; |
| 836 | } |
| 837 | if (writable) |
| 838 | ptr->first_undef_row = end_row; |
| 839 | if (ptr->pre_zero) { |
| 840 | size_t bytesperrow = (size_t) ptr->samplesperrow * SIZEOF(JSAMPLE); |
| 841 | undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */ |
| 842 | end_row -= ptr->cur_start_row; |
| 843 | while (undef_row < end_row) { |
| 844 | jzero_far((void FAR *) ptr->mem_buffer[undef_row], bytesperrow); |
| 845 | undef_row++; |
| 846 | } |
| 847 | } else { |
| 848 | if (! writable) /* reader looking at undefined data */ |
| 849 | ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); |
| 850 | } |
| 851 | } |
| 852 | /* Flag the buffer dirty if caller will write in it */ |
| 853 | if (writable) |
| 854 | ptr->dirty = TRUE; |
| 855 | /* Return address of proper part of the buffer */ |
| 856 | return ptr->mem_buffer + (start_row - ptr->cur_start_row); |
| 857 | } |
| 858 | |
| 859 | |
| 860 | METHODDEF(JBLOCKARRAY) |
| 861 | access_virt_barray (j_common_ptr cinfo, jvirt_barray_ptr ptr, |
| 862 | JDIMENSION start_row, JDIMENSION num_rows, |
| 863 | boolean writable) |
| 864 | /* Access the part of a virtual block array starting at start_row */ |
| 865 | /* and extending for num_rows rows. writable is true if */ |
| 866 | /* caller intends to modify the accessed area. */ |
| 867 | { |
| 868 | JDIMENSION end_row = start_row + num_rows; |
| 869 | JDIMENSION undef_row; |
| 870 | |
| 871 | /* debugging check */ |
| 872 | if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess || |
| 873 | ptr->mem_buffer == NULL) |
| 874 | ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); |
| 875 | |
| 876 | /* Make the desired part of the virtual array accessible */ |
| 877 | if (start_row < ptr->cur_start_row || |
| 878 | end_row > ptr->cur_start_row+ptr->rows_in_mem) { |
| 879 | if (! ptr->b_s_open) |
| 880 | ERREXIT(cinfo, JERR_VIRTUAL_BUG); |
| 881 | /* Flush old buffer contents if necessary */ |
| 882 | if (ptr->dirty) { |
| 883 | do_barray_io(cinfo, ptr, TRUE); |
| 884 | ptr->dirty = FALSE; |
| 885 | } |
| 886 | /* Decide what part of virtual array to access. |
| 887 | * Algorithm: if target address > current window, assume forward scan, |
| 888 | * load starting at target address. If target address < current window, |
| 889 | * assume backward scan, load so that target area is top of window. |
| 890 | * Note that when switching from forward write to forward read, will have |
| 891 | * start_row = 0, so the limiting case applies and we load from 0 anyway. |
| 892 | */ |
| 893 | if (start_row > ptr->cur_start_row) { |
| 894 | ptr->cur_start_row = start_row; |
| 895 | } else { |
| 896 | /* use long arithmetic here to avoid overflow & unsigned problems */ |
| 897 | long ltemp; |
| 898 | |
| 899 | ltemp = (long) end_row - (long) ptr->rows_in_mem; |
| 900 | if (ltemp < 0) |
| 901 | ltemp = 0; /* don't fall off front end of file */ |
| 902 | ptr->cur_start_row = (JDIMENSION) ltemp; |
| 903 | } |
| 904 | /* Read in the selected part of the array. |
| 905 | * During the initial write pass, we will do no actual read |
| 906 | * because the selected part is all undefined. |
| 907 | */ |
| 908 | do_barray_io(cinfo, ptr, FALSE); |
| 909 | } |
| 910 | /* Ensure the accessed part of the array is defined; prezero if needed. |
| 911 | * To improve locality of access, we only prezero the part of the array |
| 912 | * that the caller is about to access, not the entire in-memory array. |
| 913 | */ |
| 914 | if (ptr->first_undef_row < end_row) { |
| 915 | if (ptr->first_undef_row < start_row) { |
| 916 | if (writable) /* writer skipped over a section of array */ |
| 917 | ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); |
| 918 | undef_row = start_row; /* but reader is allowed to read ahead */ |
| 919 | } else { |
| 920 | undef_row = ptr->first_undef_row; |
| 921 | } |
| 922 | if (writable) |
| 923 | ptr->first_undef_row = end_row; |
| 924 | if (ptr->pre_zero) { |
| 925 | size_t bytesperrow = (size_t) ptr->blocksperrow * SIZEOF(JBLOCK); |
| 926 | undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */ |
| 927 | end_row -= ptr->cur_start_row; |
| 928 | while (undef_row < end_row) { |
| 929 | jzero_far((void FAR *) ptr->mem_buffer[undef_row], bytesperrow); |
| 930 | undef_row++; |
| 931 | } |
| 932 | } else { |
| 933 | if (! writable) /* reader looking at undefined data */ |
| 934 | ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); |
| 935 | } |
| 936 | } |
| 937 | /* Flag the buffer dirty if caller will write in it */ |
| 938 | if (writable) |
| 939 | ptr->dirty = TRUE; |
| 940 | /* Return address of proper part of the buffer */ |
| 941 | return ptr->mem_buffer + (start_row - ptr->cur_start_row); |
| 942 | } |
| 943 | |
| 944 | |
| 945 | /* |
| 946 | * Release all objects belonging to a specified pool. |
| 947 | */ |
| 948 | |
| 949 | METHODDEF(void) |
| 950 | free_pool (j_common_ptr cinfo, int pool_id) |
| 951 | { |
| 952 | my_mem_ptr mem = (my_mem_ptr) cinfo->mem; |
| 953 | small_pool_ptr shdr_ptr; |
| 954 | large_pool_ptr lhdr_ptr; |
| 955 | size_t space_freed; |
| 956 | |
| 957 | if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) |
| 958 | ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ |
| 959 | |
| 960 | #ifdef MEM_STATS |
| 961 | if (cinfo->err->trace_level > 1) |
| 962 | print_mem_stats(cinfo, pool_id); /* print pool's memory usage statistics */ |
| 963 | #endif |
| 964 | |
| 965 | /* If freeing IMAGE pool, close any virtual arrays first */ |
| 966 | if (pool_id == JPOOL_IMAGE) { |
| 967 | jvirt_sarray_ptr sptr; |
| 968 | jvirt_barray_ptr bptr; |
| 969 | |
| 970 | for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { |
| 971 | if (sptr->b_s_open) { /* there may be no backing store */ |
| 972 | sptr->b_s_open = FALSE; /* prevent recursive close if error */ |
| 973 | (*sptr->b_s_info.close_backing_store) (cinfo, & sptr->b_s_info); |
| 974 | } |
| 975 | } |
| 976 | mem->virt_sarray_list = NULL; |
| 977 | for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { |
| 978 | if (bptr->b_s_open) { /* there may be no backing store */ |
| 979 | bptr->b_s_open = FALSE; /* prevent recursive close if error */ |
| 980 | (*bptr->b_s_info.close_backing_store) (cinfo, & bptr->b_s_info); |
| 981 | } |
| 982 | } |
| 983 | mem->virt_barray_list = NULL; |
| 984 | } |
| 985 | |
| 986 | /* Release large objects */ |
| 987 | lhdr_ptr = mem->large_list[pool_id]; |
| 988 | mem->large_list[pool_id] = NULL; |
| 989 | |
| 990 | while (lhdr_ptr != NULL) { |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 991 | large_pool_ptr next_lhdr_ptr = lhdr_ptr->next; |
| 992 | space_freed = lhdr_ptr->bytes_used + |
| 993 | lhdr_ptr->bytes_left + |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 994 | SIZEOF(large_pool_hdr); |
| 995 | jpeg_free_large(cinfo, (void FAR *) lhdr_ptr, space_freed); |
| 996 | mem->total_space_allocated -= space_freed; |
| 997 | lhdr_ptr = next_lhdr_ptr; |
| 998 | } |
| 999 | |
| 1000 | /* Release small objects */ |
| 1001 | shdr_ptr = mem->small_list[pool_id]; |
| 1002 | mem->small_list[pool_id] = NULL; |
| 1003 | |
| 1004 | while (shdr_ptr != NULL) { |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 1005 | small_pool_ptr next_shdr_ptr = shdr_ptr->next; |
| 1006 | space_freed = shdr_ptr->bytes_used + |
| 1007 | shdr_ptr->bytes_left + |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 1008 | SIZEOF(small_pool_hdr); |
| 1009 | jpeg_free_small(cinfo, (void *) shdr_ptr, space_freed); |
| 1010 | mem->total_space_allocated -= space_freed; |
| 1011 | shdr_ptr = next_shdr_ptr; |
| 1012 | } |
| 1013 | } |
| 1014 | |
| 1015 | |
| 1016 | /* |
| 1017 | * Close up shop entirely. |
| 1018 | * Note that this cannot be called unless cinfo->mem is non-NULL. |
| 1019 | */ |
| 1020 | |
| 1021 | METHODDEF(void) |
| 1022 | self_destruct (j_common_ptr cinfo) |
| 1023 | { |
| 1024 | int pool; |
| 1025 | |
| 1026 | /* Close all backing store, release all memory. |
| 1027 | * Releasing pools in reverse order might help avoid fragmentation |
| 1028 | * with some (brain-damaged) malloc libraries. |
| 1029 | */ |
| 1030 | for (pool = JPOOL_NUMPOOLS-1; pool >= JPOOL_PERMANENT; pool--) { |
| 1031 | free_pool(cinfo, pool); |
| 1032 | } |
| 1033 | |
| 1034 | /* Release the memory manager control block too. */ |
| 1035 | jpeg_free_small(cinfo, (void *) cinfo->mem, SIZEOF(my_memory_mgr)); |
| 1036 | cinfo->mem = NULL; /* ensures I will be called only once */ |
| 1037 | |
| 1038 | jpeg_mem_term(cinfo); /* system-dependent cleanup */ |
| 1039 | } |
| 1040 | |
| 1041 | |
| 1042 | /* |
| 1043 | * Memory manager initialization. |
| 1044 | * When this is called, only the error manager pointer is valid in cinfo! |
| 1045 | */ |
| 1046 | |
| 1047 | GLOBAL(void) |
| 1048 | jinit_memory_mgr (j_common_ptr cinfo) |
| 1049 | { |
| 1050 | my_mem_ptr mem; |
| 1051 | long max_to_use; |
| 1052 | int pool; |
| 1053 | size_t test_mac; |
| 1054 | |
| 1055 | cinfo->mem = NULL; /* for safety if init fails */ |
| 1056 | |
| 1057 | /* Check for configuration errors. |
| 1058 | * SIZEOF(ALIGN_TYPE) should be a power of 2; otherwise, it probably |
| 1059 | * doesn't reflect any real hardware alignment requirement. |
| 1060 | * The test is a little tricky: for X>0, X and X-1 have no one-bits |
| 1061 | * in common if and only if X is a power of 2, ie has only one one-bit. |
| 1062 | * Some compilers may give an "unreachable code" warning here; ignore it. |
| 1063 | */ |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 1064 | if ((ALIGN_SIZE & (ALIGN_SIZE-1)) != 0) |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 1065 | ERREXIT(cinfo, JERR_BAD_ALIGN_TYPE); |
| 1066 | /* MAX_ALLOC_CHUNK must be representable as type size_t, and must be |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 1067 | * a multiple of ALIGN_SIZE. |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 1068 | * Again, an "unreachable code" warning may be ignored here. |
| 1069 | * But a "constant too large" warning means you need to fix MAX_ALLOC_CHUNK. |
| 1070 | */ |
| 1071 | test_mac = (size_t) MAX_ALLOC_CHUNK; |
| 1072 | if ((long) test_mac != MAX_ALLOC_CHUNK || |
Pierre Ossman | 0d04355 | 2009-03-09 10:34:53 +0000 | [diff] [blame] | 1073 | (MAX_ALLOC_CHUNK % ALIGN_SIZE) != 0) |
Constantin Kaplinsky | a2adc8d | 2006-05-25 05:01:55 +0000 | [diff] [blame] | 1074 | ERREXIT(cinfo, JERR_BAD_ALLOC_CHUNK); |
| 1075 | |
| 1076 | max_to_use = jpeg_mem_init(cinfo); /* system-dependent initialization */ |
| 1077 | |
| 1078 | /* Attempt to allocate memory manager's control block */ |
| 1079 | mem = (my_mem_ptr) jpeg_get_small(cinfo, SIZEOF(my_memory_mgr)); |
| 1080 | |
| 1081 | if (mem == NULL) { |
| 1082 | jpeg_mem_term(cinfo); /* system-dependent cleanup */ |
| 1083 | ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 0); |
| 1084 | } |
| 1085 | |
| 1086 | /* OK, fill in the method pointers */ |
| 1087 | mem->pub.alloc_small = alloc_small; |
| 1088 | mem->pub.alloc_large = alloc_large; |
| 1089 | mem->pub.alloc_sarray = alloc_sarray; |
| 1090 | mem->pub.alloc_barray = alloc_barray; |
| 1091 | mem->pub.request_virt_sarray = request_virt_sarray; |
| 1092 | mem->pub.request_virt_barray = request_virt_barray; |
| 1093 | mem->pub.realize_virt_arrays = realize_virt_arrays; |
| 1094 | mem->pub.access_virt_sarray = access_virt_sarray; |
| 1095 | mem->pub.access_virt_barray = access_virt_barray; |
| 1096 | mem->pub.free_pool = free_pool; |
| 1097 | mem->pub.self_destruct = self_destruct; |
| 1098 | |
| 1099 | /* Make MAX_ALLOC_CHUNK accessible to other modules */ |
| 1100 | mem->pub.max_alloc_chunk = MAX_ALLOC_CHUNK; |
| 1101 | |
| 1102 | /* Initialize working state */ |
| 1103 | mem->pub.max_memory_to_use = max_to_use; |
| 1104 | |
| 1105 | for (pool = JPOOL_NUMPOOLS-1; pool >= JPOOL_PERMANENT; pool--) { |
| 1106 | mem->small_list[pool] = NULL; |
| 1107 | mem->large_list[pool] = NULL; |
| 1108 | } |
| 1109 | mem->virt_sarray_list = NULL; |
| 1110 | mem->virt_barray_list = NULL; |
| 1111 | |
| 1112 | mem->total_space_allocated = SIZEOF(my_memory_mgr); |
| 1113 | |
| 1114 | /* Declare ourselves open for business */ |
| 1115 | cinfo->mem = & mem->pub; |
| 1116 | |
| 1117 | /* Check for an environment variable JPEGMEM; if found, override the |
| 1118 | * default max_memory setting from jpeg_mem_init. Note that the |
| 1119 | * surrounding application may again override this value. |
| 1120 | * If your system doesn't support getenv(), define NO_GETENV to disable |
| 1121 | * this feature. |
| 1122 | */ |
| 1123 | #ifndef NO_GETENV |
| 1124 | { char * memenv; |
| 1125 | |
| 1126 | if ((memenv = getenv("JPEGMEM")) != NULL) { |
| 1127 | char ch = 'x'; |
| 1128 | |
| 1129 | if (sscanf(memenv, "%ld%c", &max_to_use, &ch) > 0) { |
| 1130 | if (ch == 'm' || ch == 'M') |
| 1131 | max_to_use *= 1000L; |
| 1132 | mem->pub.max_memory_to_use = max_to_use * 1000L; |
| 1133 | } |
| 1134 | } |
| 1135 | } |
| 1136 | #endif |
| 1137 | |
| 1138 | } |