summaryrefslogtreecommitdiffstats
path: root/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
diff options
context:
space:
mode:
authorMarco Nelissen <marcone@google.com>2016-01-27 14:41:58 -0800
committerSteve Kondik <steve@cyngn.com>2016-03-22 17:14:35 -0700
commit4b17bd19e9eceade55dd27aea10b2cefa4e929c7 (patch)
tree41e4982162c2c3557a41228eeae42354a060df9f /media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
parentd7a4d6dcb9a82eb5daad7ab897a1be5357ce19c3 (diff)
downloadframeworks_av-4b17bd19e9eceade55dd27aea10b2cefa4e929c7.zip
frameworks_av-4b17bd19e9eceade55dd27aea10b2cefa4e929c7.tar.gz
frameworks_av-4b17bd19e9eceade55dd27aea10b2cefa4e929c7.tar.bz2
Overflow fixes for amrwbenc
Most of these were encountered while running a mixed sanitized/unsanitized AMR encoder, so may not be reachable in real conditions. Change-Id: I85af7d40214133234009323e7e64432fc1be39ca
Diffstat (limited to 'media/libstagefright/codecs/amrwbenc/src/cor_h_x.c')
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/cor_h_x.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
index b2aa759..e834396 100644
--- a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
+++ b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
@@ -55,10 +55,10 @@ void cor_h_x(
p1 = &x[i];
p2 = &h[0];
for (j = i; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
y32[i] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
if(L_tmp > L_max)
{
L_max = L_tmp;
@@ -68,10 +68,10 @@ void cor_h_x(
p1 = &x[i+1];
p2 = &h[0];
for (j = i+1; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
y32[i+1] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
if(L_tmp > L_max1)
{
L_max1 = L_tmp;
@@ -81,10 +81,10 @@ void cor_h_x(
p1 = &x[i+2];
p2 = &h[0];
for (j = i+2; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
y32[i+2] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
if(L_tmp > L_max2)
{
L_max2 = L_tmp;
@@ -94,17 +94,23 @@ void cor_h_x(
p1 = &x[i+3];
p2 = &h[0];
for (j = i+3; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
y32[i+3] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
if(L_tmp > L_max3)
{
L_max3 = L_tmp;
}
}
/* tot += 3*max / 8 */
- L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
+ if (L_max > INT_MAX - L_max1 ||
+ L_max + L_max1 > INT_MAX - L_max2 ||
+ L_max + L_max1 + L_max2 > INT_MAX - L_max3) {
+ L_max = INT_MAX >> 2;
+ } else {
+ L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
+ }
L_tot = vo_L_add(L_tot, L_max); /* +max/4 */
L_tot = vo_L_add(L_tot, (L_max >> 1)); /* +max/8 */