summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorJean-Baptiste Queru <jbq@google.com>2012-08-21 09:09:44 -0700
committerAndroid Git Automerger <android-git-automerger@android.com>2012-08-21 09:09:44 -0700
commit136e83a2987acdb7a6956934098bfc0d347c4e7d (patch)
tree35d485bd485e9301b71bdaf010e5fc6e5546fe38 /media
parent164c75d47081e714b1a0b537b2b509d5ba9a4dd2 (diff)
parent894a7e450704b0c7858a2187a5432b479ac69c1d (diff)
downloadframeworks_av-136e83a2987acdb7a6956934098bfc0d347c4e7d.zip
frameworks_av-136e83a2987acdb7a6956934098bfc0d347c4e7d.tar.gz
frameworks_av-136e83a2987acdb7a6956934098bfc0d347c4e7d.tar.bz2
am 894a7e45: am 9e13e927: Merge "avcenc: Properly indent assembly blocks"
* commit '894a7e450704b0c7858a2187a5432b479ac69c1d': avcenc: Properly indent assembly blocks
Diffstat (limited to 'media')
-rw-r--r--media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h16
-rw-r--r--media/libstagefright/codecs/avc/enc/src/sad_inline.h50
-rw-r--r--media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h60
3 files changed, 108 insertions, 18 deletions
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
index bb4a510..4eb9b00 100644
--- a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
+++ b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
@@ -77,14 +77,26 @@ extern "C"
__inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
-__asm__ volatile("rsbs %1, %1, %2, asr #1\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2));
+ __asm__ volatile(
+ "rsbs %1, %1, %2, asr #1\n\t"
+ "rsbmi %1, %1, #0\n\t"
+ "add %0, %0, %1"
+ : "=r"(sad), "=r"(tmp)
+ : "r"(tmp2)
+ );
return sad;
}
__inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
-__asm__ volatile("rsbs %1, %2, %1, asr #2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2));
+ __asm__ volatile(
+ "rsbs %1, %2, %1, asr #2\n\t"
+ "rsbmi %1, %1, #0\n\t"
+ "add %0, %0, %1"
+ : "=r"(sad), "=r"(tmp)
+ : "r"(tmp2)
+ );
return sad;
}
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
index f6c3554..6695d63 100644
--- a/media/libstagefright/codecs/avc/enc/src/sad_inline.h
+++ b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
@@ -343,7 +343,13 @@ SadMBOffset1:
__inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
-__asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad): "r"(tmp), "r"(tmp2));
+ __asm__ volatile(
+ "rsbs %1, %1, %2\n\t"
+ "rsbmi %1, %1, #0\n\t"
+ "add %0, %0, %1"
+ : "=r"(sad)
+ : "r"(tmp), "r"(tmp2)
+ );
return sad;
}
@@ -351,7 +357,18 @@ __asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(s
{
int32 x7;
-__asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %1, %3, %1, lsr #1\n\tORRCC %1, %1, #0x80000000\n\tRSB %1, %1, %1, lsl #8\n\tADD %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask));
+ __asm__ volatile(
+ "EOR %1, %2, %0\n\t"
+ "SUBS %0, %2, %0\n\t"
+ "EOR %1, %1, %0\n\t"
+ "AND %1, %3, %1, lsr #1\n\t"
+ "ORRCC %1, %1, #0x80000000\n\t"
+ "RSB %1, %1, %1, lsl #8\n\t"
+ "ADD %0, %0, %1, asr #7\n\t"
+ "EOR %0, %0, %1, asr #7"
+ : "=r"(src1), "=&r"(x7)
+ : "r"(src2), "r"(mask)
+ );
return src1;
}
@@ -360,12 +377,31 @@ __asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %
{
int32 x7;
-__asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS %1, %3, %1, rrx\n\tRSB %1, %1, %1, lsl #8\n\tSUB %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask));
+ __asm__ volatile(
+ "EOR %1, %2, %0\n\t"
+ "ADDS %0, %2, %0\n\t"
+ "EOR %1, %1, %0\n\t"
+ "ANDS %1, %3, %1, rrx\n\t"
+ "RSB %1, %1, %1, lsl #8\n\t"
+ "SUB %0, %0, %1, asr #7\n\t"
+ "EOR %0, %0, %1, asr #7"
+ : "=r"(src1), "=&r"(x7)
+ : "r"(src2), "r"(mask)
+ );
return src1;
}
-#define sum_accumulate __asm__ volatile("SBC %0, %0, %1\n\tBIC %1, %4, %1\n\tADD %2, %2, %1, lsr #8\n\tSBC %0, %0, %3\n\tBIC %3, %4, %3\n\tADD %2, %2, %3, lsr #8": "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11): "r" (x6));
+#define sum_accumulate __asm__ volatile( \
+ "SBC %0, %0, %1\n\t" \
+ "BIC %1, %4, %1\n\t" \
+ "ADD %2, %2, %1, lsr #8\n\t" \
+ "SBC %0, %0, %3\n\t" \
+ "BIC %3, %4, %3\n\t" \
+ "ADD %2, %2, %3, lsr #8" \
+ : "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11) \
+ : "r" (x6) \
+ );
#define NUMBER 3
#define SHIFT 24
@@ -407,7 +443,7 @@ __asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS
x8 = 16;
///
-__asm__ volatile("MVN %0, #0xFF00": "=r"(x6));
+ __asm__ volatile("MVN %0, #0xFF00": "=r"(x6));
LOOP_SAD0:
/****** process 8 pixels ******/
@@ -431,10 +467,10 @@ LOOP_SAD0:
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 4));
-__asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx));
+ __asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx));
//x10 = *((int32*)ref); ref+=lx;
x14 = *((int32*)(blk + 4));
-__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
+ __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
index 8a7fe22..0165360 100644
--- a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
+++ b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
@@ -230,7 +230,7 @@ __inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)
x4 = x5 = 0;
x8 = 16; //<<===========*******
-__asm__ volatile("MVN %0, #0xFF0000": "=r"(x6));
+ __asm__ volatile("MVN %0, #0xFF0000": "=r"(x6));
#if (NUMBER==3)
LOOP_SAD3:
@@ -239,7 +239,7 @@ LOOP_SAD2:
#elif (NUMBER==1)
LOOP_SAD1:
#endif
-__asm__ volatile("BIC %0, %0, #3": "=r"(ref));
+ __asm__ volatile("BIC %0, %0, #3": "=r"(ref));
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 12));
x12 = *((int32*)(ref + 16));
@@ -247,11 +247,32 @@ __asm__ volatile("BIC %0, %0, #3": "=r"(ref));
x14 = *((int32*)(blk + 12));
#if (SHIFT==8)
-__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #8\n\t"
+ "BIC %0, %0, %1, lsl #24\n\t"
+ "MVN %1, %1, lsr #8\n\t"
+ "BIC %1, %1, %2, lsl #24"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==16)
-__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #16\n\t"
+ "BIC %0, %0, %1, lsl #16\n\t"
+ "MVN %1, %1, lsr #16\n\t"
+ "BIC %1, %1, %2, lsl #16"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==24)
-__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #24\n\t"
+ "BIC %0, %0, %1, lsl #8\n\t"
+ "MVN %1, %1, lsr #24\n\t"
+ "BIC %1, %1, %2, lsl #8"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#endif
x12 = *((int32*)(blk + 8));
@@ -271,13 +292,34 @@ __asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1,
x14 = *((int32*)(blk + 4));
#if (SHIFT==8)
-__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #8\n\t"
+ "BIC %0, %0, %1, lsl #24\n\t"
+ "MVN %1, %1, lsr #8\n\t"
+ "BIC %1, %1, %2, lsl #24"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==16)
-__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #16\n\t"
+ "BIC %0, %0, %1, lsl #16\n\t"
+ "MVN %1, %1, lsr #16\n\t"
+ "BIC %1, %1, %2, lsl #16"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==24)
-__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #24\n\t"
+ "BIC %0, %0, %1, lsl #8\n\t"
+ "MVN %1, %1, lsr #24\n\t"
+ "BIC %1, %1, %2, lsl #8"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#endif
-__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
+ __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
/* process x11 & x14 */
x11 = sad_4pixelN(x11, x14, x9);