Skip to content

Commit 5f28067

Browse files
committed
Fix overflows in avifImageRGBToYUV()
Fix overflows when multiplying with rowBytes in avifImageRGBToYUV(), by storing the various uint32_t rowBytes fields in local variables of the size_t type. Then multiplications with the size_t rowBytes local variables will be done in size_t. Part of the fix to #2271.
1 parent 39cf485 commit 5f28067

File tree

1 file changed

+66
-62
lines changed

1 file changed

+66
-62
lines changed

src/reformat.c

Lines changed: 66 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -271,51 +271,55 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
271271
struct YUVBlock yuvBlock[2][2];
272272
float rgbPixel[3];
273273
const float rgbMaxChannelF = state.rgb.maxChannelF;
274-
uint8_t ** yuvPlanes = image->yuvPlanes;
275-
uint32_t * yuvRowBytes = image->yuvRowBytes;
274+
const size_t offsetBytesR = state.rgb.offsetBytesR;
275+
const size_t offsetBytesG = state.rgb.offsetBytesG;
276+
const size_t offsetBytesB = state.rgb.offsetBytesB;
277+
const size_t offsetBytesA = state.rgb.offsetBytesA;
278+
const size_t rgbPixelBytes = state.rgb.pixelBytes;
279+
const size_t rgbRowBytes = rgb->rowBytes;
280+
uint8_t * yPlane = image->yuvPlanes[AVIF_CHAN_Y];
281+
uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
282+
uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
283+
const size_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
284+
const size_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
285+
const size_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
276286
for (uint32_t outerJ = 0; outerJ < image->height; outerJ += 2) {
287+
uint32_t blockH = 2;
288+
if ((outerJ + 1) >= image->height) {
289+
blockH = 1;
290+
}
277291
for (uint32_t outerI = 0; outerI < image->width; outerI += 2) {
278-
int blockW = 2, blockH = 2;
292+
uint32_t blockW = 2;
279293
if ((outerI + 1) >= image->width) {
280294
blockW = 1;
281295
}
282-
if ((outerJ + 1) >= image->height) {
283-
blockH = 1;
284-
}
285296

286297
// Convert an entire 2x2 block to YUV, and populate any fully sampled channels as we go
287-
for (int bJ = 0; bJ < blockH; ++bJ) {
288-
for (int bI = 0; bI < blockW; ++bI) {
289-
int i = outerI + bI;
290-
int j = outerJ + bJ;
298+
for (uint32_t bJ = 0; bJ < blockH; ++bJ) {
299+
for (uint32_t bI = 0; bI < blockW; ++bI) {
300+
uint32_t i = outerI + bI;
301+
uint32_t j = outerJ + bJ;
291302

292303
// Unpack RGB into normalized float
293304
if (state.rgb.channelBytes > 1) {
294-
rgbPixel[0] =
295-
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesR + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
296-
rgbMaxChannelF;
297-
rgbPixel[1] =
298-
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesG + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
299-
rgbMaxChannelF;
300-
rgbPixel[2] =
301-
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesB + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
302-
rgbMaxChannelF;
303-
} else {
304-
rgbPixel[0] = rgb->pixels[state.rgb.offsetBytesR + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] /
305+
rgbPixel[0] = *((uint16_t *)(&rgb->pixels[offsetBytesR + (i * rgbPixelBytes) + (j * rgbRowBytes)])) /
305306
rgbMaxChannelF;
306-
rgbPixel[1] = rgb->pixels[state.rgb.offsetBytesG + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] /
307+
rgbPixel[1] = *((uint16_t *)(&rgb->pixels[offsetBytesG + (i * rgbPixelBytes) + (j * rgbRowBytes)])) /
307308
rgbMaxChannelF;
308-
rgbPixel[2] = rgb->pixels[state.rgb.offsetBytesB + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] /
309+
rgbPixel[2] = *((uint16_t *)(&rgb->pixels[offsetBytesB + (i * rgbPixelBytes) + (j * rgbRowBytes)])) /
309310
rgbMaxChannelF;
311+
} else {
312+
rgbPixel[0] = rgb->pixels[offsetBytesR + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
313+
rgbPixel[1] = rgb->pixels[offsetBytesG + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
314+
rgbPixel[2] = rgb->pixels[offsetBytesB + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
310315
}
311316

312317
if (alphaMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
313318
float a;
314319
if (state.rgb.channelBytes > 1) {
315-
a = *((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesA + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
316-
rgbMaxChannelF;
320+
a = *((uint16_t *)(&rgb->pixels[offsetBytesA + (i * rgbPixelBytes) + (j * rgbRowBytes)])) / rgbMaxChannelF;
317321
} else {
318-
a = rgb->pixels[state.rgb.offsetBytesA + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] / rgbMaxChannelF;
322+
a = rgb->pixels[offsetBytesA + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
319323
}
320324

321325
if (alphaMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
@@ -377,24 +381,24 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
377381
}
378382

379383
if (state.yuv.channelBytes > 1) {
380-
uint16_t * pY = (uint16_t *)&yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_Y])];
381-
*pY = (uint16_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
384+
uint16_t * yRow = (uint16_t *)&yPlane[j * yRowBytes];
385+
yRow[i] = (uint16_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
382386
if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
383387
// YUV444, full chroma
384-
uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_U])];
385-
*pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
386-
uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_V])];
387-
*pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
388+
uint16_t * uRow = (uint16_t *)&uPlane[j * uRowBytes];
389+
uint16_t * vRow = (uint16_t *)&vPlane[j * vRowBytes];
390+
uRow[i] = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
391+
vRow[i] = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
388392
}
389393
} else {
390-
yuvPlanes[AVIF_CHAN_Y][i + (j * yuvRowBytes[AVIF_CHAN_Y])] =
391-
(uint8_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
394+
uint8_t * yRow = &yPlane[j * yRowBytes];
395+
yRow[i] = (uint8_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
392396
if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
393397
// YUV444, full chroma
394-
yuvPlanes[AVIF_CHAN_U][i + (j * yuvRowBytes[AVIF_CHAN_U])] =
395-
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
396-
yuvPlanes[AVIF_CHAN_V][i + (j * yuvRowBytes[AVIF_CHAN_V])] =
397-
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
398+
uint8_t * uRow = &uPlane[j * uRowBytes];
399+
uint8_t * vRow = &vPlane[j * vRowBytes];
400+
uRow[i] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
401+
vRow[i] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
398402
}
399403
}
400404
}
@@ -408,8 +412,8 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
408412

409413
float sumU = 0.0f;
410414
float sumV = 0.0f;
411-
for (int bJ = 0; bJ < blockH; ++bJ) {
412-
for (int bI = 0; bI < blockW; ++bI) {
415+
for (uint32_t bJ = 0; bJ < blockH; ++bJ) {
416+
for (uint32_t bI = 0; bI < blockW; ++bI) {
413417
sumU += yuvBlock[bI][bJ].u;
414418
sumV += yuvBlock[bI][bJ].v;
415419
}
@@ -420,26 +424,26 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
420424

421425
const int chromaShiftX = 1;
422426
const int chromaShiftY = 1;
423-
int uvI = outerI >> chromaShiftX;
424-
int uvJ = outerJ >> chromaShiftY;
427+
uint32_t uvI = outerI >> chromaShiftX;
428+
uint32_t uvJ = outerJ >> chromaShiftY;
425429
if (state.yuv.channelBytes > 1) {
426-
uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
427-
*pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
428-
uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
429-
*pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
430+
uint16_t * uRow = (uint16_t *)&uPlane[uvJ * uRowBytes];
431+
uint16_t * vRow = (uint16_t *)&vPlane[uvJ * vRowBytes];
432+
uRow[uvI] = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
433+
vRow[uvI] = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
430434
} else {
431-
yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
432-
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
433-
yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
434-
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
435+
uint8_t * uRow = &uPlane[uvJ * uRowBytes];
436+
uint8_t * vRow = &vPlane[uvJ * vRowBytes];
437+
uRow[uvI] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
438+
vRow[uvI] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
435439
}
436440
} else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV422) {
437441
// YUV422, average 2 samples (1x2), twice
438442

439-
for (int bJ = 0; bJ < blockH; ++bJ) {
443+
for (uint32_t bJ = 0; bJ < blockH; ++bJ) {
440444
float sumU = 0.0f;
441445
float sumV = 0.0f;
442-
for (int bI = 0; bI < blockW; ++bI) {
446+
for (uint32_t bI = 0; bI < blockW; ++bI) {
443447
sumU += yuvBlock[bI][bJ].u;
444448
sumV += yuvBlock[bI][bJ].v;
445449
}
@@ -448,18 +452,18 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
448452
float avgV = sumV / totalSamples;
449453

450454
const int chromaShiftX = 1;
451-
int uvI = outerI >> chromaShiftX;
452-
int uvJ = outerJ + bJ;
455+
uint32_t uvI = outerI >> chromaShiftX;
456+
uint32_t uvJ = outerJ + bJ;
453457
if (state.yuv.channelBytes > 1) {
454-
uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
455-
*pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
456-
uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
457-
*pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
458+
uint16_t * uRow = (uint16_t *)&uPlane[uvJ * uRowBytes];
459+
uint16_t * vRow = (uint16_t *)&vPlane[uvJ * vRowBytes];
460+
uRow[uvI] = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
461+
vRow[uvI] = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
458462
} else {
459-
yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
460-
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
461-
yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
462-
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
463+
uint8_t * uRow = &uPlane[uvJ * uRowBytes];
464+
uint8_t * vRow = &vPlane[uvJ * vRowBytes];
465+
uRow[uvI] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
466+
vRow[uvI] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
463467
}
464468
}
465469
}

0 commit comments

Comments
 (0)